hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4abe405a99def4cdeef62a0c7a5baa8f6061f188
| 8,002 |
ipynb
|
Jupyter Notebook
|
Chapter 12/step_functions/Scikit-Learn on Boston Housing - v1.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | 15 |
2021-10-01T02:36:24.000Z
|
2022-03-02T23:37:04.000Z
|
Chapter 12/step_functions/Scikit-Learn on Boston Housing - v1.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | null | null | null |
Chapter 12/step_functions/Scikit-Learn on Boston Housing - v1.ipynb
|
amikewatson/Learn-Amazon-SageMaker-second-edition
|
64955fd96a5917d8d4d5e18a6dfc57a5432250be
|
[
"MIT"
] | 14 |
2021-10-30T14:21:43.000Z
|
2022-03-11T02:14:28.000Z
| 24.322188 | 120 | 0.53999 |
[
[
[
"%%sh\npip -q install sagemaker stepfunctions --upgrade",
"_____no_output_____"
],
[
"# Enter your role ARN\nworkflow_execution_role = ''",
"_____no_output_____"
],
[
"import boto3\nimport sagemaker\nimport stepfunctions\n\nfrom stepfunctions import steps\nfrom stepfunctions.steps import TrainingStep, ModelStep, EndpointConfigStep, EndpointStep, TransformStep, Chain\nfrom stepfunctions.inputs import ExecutionInput\nfrom stepfunctions.workflow import Workflow",
"_____no_output_____"
],
[
"sess = sagemaker.Session()\nbucket = sess.default_bucket() \nrole = sagemaker.get_execution_role()\n\nprefix = 'sklearn-boston-housing-stepfunc'\n\ntraining_data = sess.upload_data(path='housing.csv', key_prefix=prefix + \"/training\")\noutput = 's3://{}/{}/output/'.format(bucket,prefix)\nprint(training_data)\nprint(output)",
"_____no_output_____"
],
[
"import pandas as pd\n\ndata = pd.read_csv('housing.csv')\ndata.head()",
"_____no_output_____"
],
[
"data.drop(['medv'], axis=1, inplace=True)\ndata.to_csv('test.csv', index=False, header=False)\n\nbatch_data = sess.upload_data(path='test.csv', key_prefix=prefix + \"/batch\")",
"_____no_output_____"
],
[
"from sagemaker.sklearn import SKLearn\n\nsk = SKLearn(entry_point='sklearn-boston-housing.py',\n role=role,\n framework_version='0.23-1',\n train_instance_count=1, \n train_instance_type='ml.m5.large',\n output_path=output,\n hyperparameters={\n 'normalize': True,\n 'test-size': 0.1,\n }\n)",
"_____no_output_____"
],
[
"execution_input = ExecutionInput(schema={\n 'JobName': str, \n 'ModelName': str,\n 'EndpointName': str\n})",
"_____no_output_____"
],
[
"training_step = TrainingStep(\n 'Train a Scikit-Learn script on the Boston Housing dataset', \n estimator=sk,\n data={'training': sagemaker.inputs.TrainingInput(training_data, content_type='text/csv')},\n job_name=execution_input['JobName'] \n)",
"_____no_output_____"
],
[
"model_step = ModelStep(\n 'Create the model in SageMaker',\n model=training_step.get_expected_model(),\n model_name=execution_input['ModelName'] \n)",
"_____no_output_____"
],
[
"transform_step = TransformStep(\n 'Transform the dataset in batch mode',\n transformer=sk.transformer(instance_count=1, instance_type='ml.m5.large'),\n job_name=execution_input['JobName'], \n model_name=execution_input['ModelName'], \n data=batch_data,\n content_type='text/csv'\n)",
"_____no_output_____"
],
[
"endpoint_config_step = EndpointConfigStep(\n \"Create an endpoint configuration for the model\",\n endpoint_config_name=execution_input['ModelName'],\n model_name=execution_input['ModelName'],\n initial_instance_count=1,\n instance_type='ml.m5.large'\n)",
"_____no_output_____"
],
[
"endpoint_step = EndpointStep(\n \"Create an endpoint hosting the model\",\n endpoint_name=execution_input['EndpointName'],\n endpoint_config_name=execution_input['ModelName']\n)",
"_____no_output_____"
],
[
"workflow_definition = Chain([\n training_step,\n model_step,\n transform_step,\n endpoint_config_step,\n endpoint_step\n])",
"_____no_output_____"
],
[
"import time\n\ntimestamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime())\n\nworkflow = Workflow(\n name='sklearn-boston-housing-workflow1-{}'.format(timestamp),\n definition=workflow_definition,\n role=workflow_execution_role,\n execution_input=execution_input\n)",
"_____no_output_____"
],
[
"# Not available in JupyterLab\n# see https://github.com/aws/aws-step-functions-data-science-sdk-python/issues/127\n\n# workflow.render_graph(portrait=True)",
"_____no_output_____"
],
[
"workflow.create()",
"_____no_output_____"
],
[
"execution = workflow.execute(\n inputs={\n 'JobName': 'sklearn-boston-housing-{}'.format(timestamp), \n 'ModelName': 'sklearn-boston-housing-{}'.format(timestamp),\n 'EndpointName': 'sklearn-boston-housing-{}'.format(timestamp)\n }\n)",
"_____no_output_____"
],
[
"# Not available in JupyterLab\n# see https://github.com/aws/aws-step-functions-data-science-sdk-python/issues/127\n\n# execution.render_progress()",
"_____no_output_____"
],
[
"execution.list_events()",
"_____no_output_____"
],
[
"workflow.list_executions(html=True)",
"_____no_output_____"
],
[
"Workflow.list_workflows(html=True)",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4abe4d5c834e8457546958ed65d70ca554538473
| 500,404 |
ipynb
|
Jupyter Notebook
|
notebook/China_open_source_blue_paper_2022.ipynb
|
SonglinLife/open-digger
|
3e3d45953dde880091fed0f961db40a0df823a37
|
[
"Apache-2.0"
] | 16 |
2020-08-19T01:45:54.000Z
|
2020-09-14T02:34:59.000Z
|
notebook/China_open_source_blue_paper_2022.ipynb
|
SonglinLife/open-digger
|
3e3d45953dde880091fed0f961db40a0df823a37
|
[
"Apache-2.0"
] | 61 |
2020-08-19T02:16:46.000Z
|
2020-09-14T05:19:00.000Z
|
notebook/China_open_source_blue_paper_2022.ipynb
|
SonglinLife/open-digger
|
3e3d45953dde880091fed0f961db40a0df823a37
|
[
"Apache-2.0"
] | 6 |
2020-08-18T11:06:40.000Z
|
2020-09-12T03:48:42.000Z
| 269.90507 | 84,581 | 0.906404 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4abe50e4d1442f828546eb1502ee11c3ade39610
| 19,536 |
ipynb
|
Jupyter Notebook
|
Assignments/Assignment1/Assignment1.ipynb
|
tomgrubbmath/IntroMathSoftware
|
da15f9f0649989b9c2f12c06015b65b32e728061
|
[
"MIT"
] | null | null | null |
Assignments/Assignment1/Assignment1.ipynb
|
tomgrubbmath/IntroMathSoftware
|
da15f9f0649989b9c2f12c06015b65b32e728061
|
[
"MIT"
] | null | null | null |
Assignments/Assignment1/Assignment1.ipynb
|
tomgrubbmath/IntroMathSoftware
|
da15f9f0649989b9c2f12c06015b65b32e728061
|
[
"MIT"
] | null | null | null | 26.082777 | 581 | 0.576577 |
[
[
[
"### Math 157: Intro to Mathematical Software\n### UC San Diego, Winter 2021",
"_____no_output_____"
],
[
"### Homework 1: due Thursday, Jan 14 at 8PM Pacific",
"_____no_output_____"
],
[
"In general, each homework will be presented as a single Jupyter notebook like this one. A problem will typically consist of multiple components; however, each overall problem within a homework will count the same towards that homework grade. In addition, homework sets may contain different numbers of problems, but the maximum score on each homework will count the same towards the overall course grade. (Remember, we only count your top 6 of 8 homeworks.)\n\nEach component of each problem will also briefly indicate the criteria on which it is being judged. \n- For free-response problems, answers should be in complete sentences. \"Conceptual correctness\" means we are looking for a specific answer but not a specific wording; \"thoroughness\" means you gave enough of a response (e.g., if we want three \"essentially different\" examples of some phenomenon, your examples should be really different). \n- For problems involving mathematical calculations, answers should be presented in standard mathematical notation using TeX in a Markdown cell. \"Mathematical correctness\" means what it would in a normal math course. \n- For problems requiring code, answers should appear as executable code. \"Code correctness\" means that the code executes without errors and does what was asked; we may assess this using automated testing software.\n\nWhile you are free to create additional notebooks in which to do scratch work, please compose your answers by typing them directly into this notebook copying/pasting.\n\n### Kernel: \nAll computations in this notebook should use the Python 3 (systemwide) kernel.",
"_____no_output_____"
],
[
"### Collaborators/resources used:\nTo start, please list all students you worked with in the box below. Additionally, include basic citations to resources you used along the way (it can be as simple as Title: hyperlink_to_the_webpage). You do not need to add citations to hyperlinks of resources that I have already added.\n\nRemember! Collaboration is *encouraged*, but *you must write up answers in your own words*. ",
"_____no_output_____"
],
[
"Answer Box:",
"_____no_output_____"
],
[
"### Problem 1: Markdown and Jupyter Notebooks\n\nGrading criterion: correctness. The following hyperlink may be useful for the questions on Jupyter Notebooks: http://maxmelnick.com/2016/04/19/python-beginner-tips-and-tricks.html",
"_____no_output_____"
],
[
"1a.) Write some text in Markdown that illustrates at least three formatting features not used anywhere else in this homework. Your text should also describe in words what these features are.",
"_____no_output_____"
],
[
"Answer Box:",
"_____no_output_____"
],
[
"1b.) What is the difference between \"Command Mode\" and \"Edit Mode\" in Jupyter Notebook?",
"_____no_output_____"
],
[
"Answer Box:",
"_____no_output_____"
],
[
"1c.) Write at least 3 keyboard shortcuts that can be used with Jupyter Notebook (and include what they do!) \n\nExample: In command mode, the shortcut \"A\" inserts a cell above a selected cell.",
"_____no_output_____"
],
[
"Answer Box:",
"_____no_output_____"
],
[
"Note: You should keep in mind your answers to 1c.) as the quarter progresses! Keyboard shortcuts can make your workflow much more efficient! ",
"_____no_output_____"
],
[
"### Problem 2: Python Basics",
"_____no_output_____"
],
[
"Grading criterion: correctness.",
"_____no_output_____"
],
[
"2a. In the cell below there is a list of numbers, `L`. Using *basic (one-line/one-command) Python List operations*, print out\n\n-The length of the list\n\n-The last number in the list in *two* separate ways\n\n-The list in reverse order\n\n-Every third number in the list\n\n-The sum of the numbers in the list\n",
"_____no_output_____"
]
],
[
[
"L = [x**2 % 491 for x in range(0,1000,27)]",
"_____no_output_____"
],
[
"#Your code goes here (There should be 6 separate print statements in this block, corresponding to the 6 questions listed above)",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
]
],
[
[
"2b. Run the following code. In this specific instance, is Python operating most closely to \"pass by reference\" or \"pass by value\"? (In reality Python operates under \"pass by object reference\" but I mainly want to stress what happens on this example). A useful resource could be: https://robertheaton.com/2014/02/09/pythons-pass-by-object-reference-as-explained-by-philip-k-dick/ . This is an important example to keep in mind as you write programs in python; even though a list can be initialized globally, things that happen to it inside a function call can *persist*.",
"_____no_output_____"
]
],
[
[
"numbers = [1,2,7]\nprint('The list of numbers is:', numbers)\n\ndef append5(L):\n L.append(5)\n return()\n\nappend5(numbers)\nappend5(numbers)\nappend5(numbers)\nprint('The new list of numbers is:', numbers)",
"The list of numbers is: [1, 2, 7]\nThe new list of numbers is: [1, 2, 7, 5, 5, 5]\n"
],
[
"def valueAppend5(L):\n print(L)\n L[0] = 100\n print(L)\n\nnumbers = [1,2,7]\nvalueAppend5(numbers)\nprint(numbers)",
"[1, 2, 7]\n[100, 2, 7]\n[100, 2, 7]\n"
]
],
[
[
"Answer Box:",
"_____no_output_____"
],
[
"2c. Build a dictionary in Python that has keys of 5 *distinct types*. The values can be arbitrary. Remember: to get the type of an object X in Python, call type(X). In the markdown cell below the dictionary, write out a reasonable key type you could use if you were using the key to index a location in a grid (i.e. the key has an x-coordinate and a y-coordinate). This link may be useful: https://www.tutorialsteacher.com/python/python-data-types",
"_____no_output_____"
]
],
[
[
"typeDict = ###Your code here",
"_____no_output_____"
]
],
[
[
"Answer Box:",
"_____no_output_____"
],
[
"### Problem 3: Truth values",
"_____no_output_____"
],
[
"Grading criterion: correctness and thoroughness.\n\nAn expression `e` will be called *truthy* if `bool(e)` is `True`. Otherwise `e` is *falsy*. In other words, the following conditional statement determines whether or not an object is truthy:",
"_____no_output_____"
]
],
[
[
"if e:\n print('This object is truthy')\nelse:\n print('This object is falsy')",
"_____no_output_____"
]
],
[
[
"3a. Create a list `l` consisting of 10 different Python objects that are falsy. For correctness, your list must have the property that `a is b` evaluates to `False` whenever `a` and `b` are entries of the list in different positions. For thoroughness, the entries should look as different as possible. (Hint: an empty list `[]` is an example.)",
"_____no_output_____"
]
],
[
[
"l = [] # insert ten objects here",
"_____no_output_____"
],
[
"# Use this code to test correctness of your answer. Each print statement should output True if you've done this correctly.\nprint(len(l) == 10) # Checks that your list has exactly 10 elements\nprint(all(not l[i] for i in range(10))) # Checks that your list consists of falsy elements\nprint(all(not (l[i] is l[j]) for i in range(10) for j in range(i+1, 10))) # Checks that different list elements correspond to different ",
"_____no_output_____"
]
],
[
[
"3b. In Python, \"is\" means \"identical objects\", whereas \"==\" can be much more subtle. Create a list `l` consisting of 5 tuples `(a, b)` for each of which `a==b` evaluates to `True` but `a is b` evaluates to `False`. (Hint: the tuple `([], [])` is an example)",
"_____no_output_____"
]
],
[
[
"l = [] # insert five objects here\n",
"_____no_output_____"
]
],
[
[
"3c: By analogy with the code snippet given to test your answer in 3a, write a code snippet to verify correctness of your answer to 3b. That is, the code snippet should print one or more True/False values, all of which are True if and only if the answer is correct.",
"_____no_output_____"
]
],
[
[
"# Your code snippet goes here",
"_____no_output_____"
]
],
[
[
"### Problem 4: Flow control\n\nGrading criterion: correctness of output.",
"_____no_output_____"
],
[
"4b. Write a function named `fizzBuzz` that accepts an integer `N` and for each integer `m` from `1` to `N`, prints `'Fizz'` if `m` is divisible by 2 but not 3, prints `'Buzz'` if `m` is divisible by 3 but not 2, prints `'FizzBuzz'` if `m` is divisible by 2 and 3, and prints `'Moot'` if none of the above are true.",
"_____no_output_____"
]
],
[
[
"def fizzBuzz(N):\n # Your code goes here",
"_____no_output_____"
],
[
"# To test your answer, run the following function call. I have displayed the output you should get in the raw cell below.\nfizzBuzz(7)",
"_____no_output_____"
]
],
[
[
"Moot\nFizz\nBuzz\nFizz\nMoot\nFizzBuzz\nMoot",
"_____no_output_____"
]
],
[
[
"### Problem 5: Better and worse\n\nGrading criterion: correctness of code and thoroughness of explanation. ",
"_____no_output_____"
],
[
"5a. The Fibonacci numbers are defined by $f_0 = f_1 = 1$ and, for $n\\geq 2$, \n$$f_n = f_{n-1}+f_{n-2}.$$\nWrite two functions which take as input a non-negative integer $n$ and output the $n$th Fibonacci number. The first function, `fib1`, *should use recursion*. The second function, `fib2`, *should not use recursion*. \n\nIf you have not seen recursion before, I have written a function which uses recursion to compute the $n$th factorial, $$n! = n\\cdot(n-1)\\dots 2\\cdot 1.$$\nYou may want to model `fib1` after this. ",
"_____no_output_____"
]
],
[
[
"def recursiveFactorial(N):\n if N == 0: #Every recursive function needs a base case, to tell the program where to start\n return(1) #In this case, the base case is 0! = 1\n else: #If we are not in the base case, this means N >= 1\n return(N*recursiveFactorial(N-1)) #In this case, we reduce the problem to finding recursiveFactorial(N-1), and then modify it by multiplying by N to get the final answer.",
"_____no_output_____"
],
[
"def fib1(N):\n #Your code goes here (use recursion this time!)",
"_____no_output_____"
],
[
"def fib2(N):\n #Your code goes here (do not use recursion this time!)",
"_____no_output_____"
],
[
"#To verify that your functions agree, run this line:\nprint(all([fib1(n) == fib2(n) for n in range(0,15)]))",
"_____no_output_____"
]
],
[
[
"5b.) What are the two code cells below this question computing? Based on the evaluation of the code cells, which method of computing Fibonacci numbers is preferable? (You may want to read about Python's time module here: https://docs.python.org/3/library/time.html, although you can probably guess what is happening with the timing anyways. For those of you interested in coding as a potential career, you may want to read about the differences between recursion and dynamic programming, which this problem highlights)",
"_____no_output_____"
]
],
[
[
"import time\na = time.time()\nfor i in range(10):\n g = fib1(30)\nb = time.time()\nprint((b-a)/10)",
"_____no_output_____"
],
[
"import time\na = time.time()\nfor i in range(10):\n g = fib2(30)\nb = time.time()\nprint((b-a)/10)",
"_____no_output_____"
]
],
[
[
"Answer Box:",
"_____no_output_____"
],
[
"### Problem 6: List comprehensions\n\nGrading criterion: correctness.",
"_____no_output_____"
],
[
"Translate each of the following mathematical definitions of sets into a Python list comprehension. WARNING: Remember how the range function in Python works, and remember that exponentiation in Python *does not* use the carat symbol.\n- $\\{x:0\\leq x\\leq 100\\}$\n- $\\{x: 0 < x < 100, x \\not\\equiv 0 \\pmod{3} \\}$\n- $\\{x: 10 < x < 50, x^2 \\equiv 1 \\pmod{5}\\}$\n- $\\{(x,y): 0 < x < 1000, 0 < y < 1000, x^2 - y^3 = 1\\}$",
"_____no_output_____"
]
],
[
[
"l1 = #Replace this comment with your one line list comprehension ",
"_____no_output_____"
],
[
"l2 = #Replace this comment with your one line list comprehension ",
"_____no_output_____"
],
[
"l3 = #Replace this comment with your one line list comprehension ",
"_____no_output_____"
],
[
"l4 = #Replace this comment with your one line list comprehension",
"_____no_output_____"
],
[
"print(l1)\nprint(l2)\nprint(l3)\nprint(l4)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"raw"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4abe519494fb5c5131f05493f4a95ea6210c666f
| 66,268 |
ipynb
|
Jupyter Notebook
|
Course3/Week3/C3_W3_Assignment.ipynb
|
capchitts/TensorFlowProfessionalCertificate
|
038eeba3cb789fdc9cbae9c30a650a06edd086c1
|
[
"MIT"
] | null | null | null |
Course3/Week3/C3_W3_Assignment.ipynb
|
capchitts/TensorFlowProfessionalCertificate
|
038eeba3cb789fdc9cbae9c30a650a06edd086c1
|
[
"MIT"
] | null | null | null |
Course3/Week3/C3_W3_Assignment.ipynb
|
capchitts/TensorFlowProfessionalCertificate
|
038eeba3cb789fdc9cbae9c30a650a06edd086c1
|
[
"MIT"
] | null | null | null | 116.056042 | 22,042 | 0.793384 |
[
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"**Note:** This notebook can run using TensorFlow 2.5.0",
"_____no_output_____"
]
],
[
[
"#!pip install tensorflow==2.5.0",
"_____no_output_____"
],
[
"import json\nimport tensorflow as tf\nimport csv\nimport random\nimport numpy as np\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import regularizers\n\n\nembedding_dim = 100\nmax_length = 16\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\ntraining_size=160000\ntest_portion=.1\n\ncorpus = []\n",
"_____no_output_____"
],
[
"# Note that I cleaned the Stanford dataset to remove LATIN1 encoding to make it easier for Python CSV reader\n# You can do that yourself with:\n# iconv -f LATIN1 -t UTF8 training.1600000.processed.noemoticon.csv -o training_cleaned.csv\n\n# training_cleaned.csv\n!gdown --id 1wd8KaeCSHxt-nEpMeuHFSNWrDp8joUXJ\n\nnum_sentences = 0\n\nwith open(\"./training_cleaned.csv\") as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n list_item=[]\n \n ### START CODE HERE\n list_item.append(row[5])\n this_label=row[0]\n if this_label=='0':\n list_item.append(0)\n else:\n list_item.append(1)\n ### END CODE HERE\n \n num_sentences = num_sentences + 1\n corpus.append(list_item)\n",
"Downloading...\nFrom: https://drive.google.com/uc?id=1wd8KaeCSHxt-nEpMeuHFSNWrDp8joUXJ\nTo: /content/training_cleaned.csv\n100% 239M/239M [00:02<00:00, 83.0MB/s]\n"
],
[
"print(num_sentences)\nprint(len(corpus))\nprint(corpus[1])\n\n# Expected Output:\n# 1600000\n# 1600000\n# [\"is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!\", 0]",
"1600000\n1600000\n[\"is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!\", 0]\n"
],
[
"sentences=[]\nlabels=[]\nrandom.shuffle(corpus)\nfor x in range(training_size):\n sentences.append(corpus[x][0])# YOUR CODE HERE)\n labels.append(corpus[x][1])# YOUR CODE HERE)\n\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(sentences)# YOUR CODE HERE)\n\nword_index = tokenizer.word_index\nvocab_size=len(word_index)# YOUR CODE HERE)\n\nsequences = tokenizer.texts_to_sequences(sentences)# YOUR CODE HERE)\npadded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)# YOUR CODE HERE)\n\nsplit = int(test_portion * training_size)\n\ntest_sequences = padded[0:split]# YOUR CODE HERE)\ntraining_sequences = padded[split:training_size]# YOUR CODE HERE)\ntest_labels = labels[0:split]# YOUR CODE HERE)\ntraining_labels = labels[split:training_size]# YOUR CODE HERE)",
"_____no_output_____"
],
[
"print(vocab_size)\nprint(word_index['i'])\n# Expected Output\n# 138856\n# 1",
"138478\n1\n"
],
[
"# Note this is the 100 dimension version of GloVe from Stanford\n\n# glove.6B.100d.txt\n!gdown --id 1W5vZy2etitAblLdFn8_DxnsQKzfFJ98g\n\nembeddings_index = {};\nwith open('./glove.6B.100d.txt') as f:\n for line in f:\n values = line.split();\n word = values[0];\n coefs = np.asarray(values[1:], dtype='float32');\n embeddings_index[word] = coefs;\n\nembeddings_matrix = np.zeros((vocab_size+1, embedding_dim));\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word);\n if embedding_vector is not None:\n embeddings_matrix[i] = embedding_vector;",
"Downloading...\nFrom: https://drive.google.com/uc?id=1W5vZy2etitAblLdFn8_DxnsQKzfFJ98g\nTo: /content/glove.6B.100d.txt\n100% 347M/347M [00:01<00:00, 196MB/s]\n"
],
[
"print(len(embeddings_matrix))\n# Expected Output\n# 138857",
"138479\n"
],
[
"model = tf.keras.Sequential([\n # YOUR CODE HERE\n tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length, weights=[embeddings_matrix], trainable=False),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Conv1D(64, 5, activation='relu'),\n tf.keras.layers.MaxPooling1D(pool_size=4),\n tf.keras.layers.LSTM(64),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])# YOUR CODE HERE)\nmodel.summary()\n\nnum_epochs = 50\n\ntraining_padded = np.array(training_sequences)\ntraining_labels = np.array(training_labels)\ntesting_padded = np.array(test_sequences)\ntesting_labels = np.array(test_labels)\n\nhistory = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2)\n\nprint(\"Training Complete\")",
"Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n embedding (Embedding) (None, 16, 100) 13847900 \n \n dropout (Dropout) (None, 16, 100) 0 \n \n conv1d (Conv1D) (None, 12, 64) 32064 \n \n max_pooling1d (MaxPooling1D (None, 3, 64) 0 \n ) \n \n lstm (LSTM) (None, 64) 33024 \n \n dense (Dense) (None, 1) 65 \n \n=================================================================\nTotal params: 13,913,053\nTrainable params: 65,153\nNon-trainable params: 13,847,900\n_________________________________________________________________\nEpoch 1/50\n4500/4500 - 34s - loss: 0.5669 - accuracy: 0.6999 - val_loss: 0.5263 - val_accuracy: 0.7343 - 34s/epoch - 7ms/step\nEpoch 2/50\n4500/4500 - 19s - loss: 0.5265 - accuracy: 0.7330 - val_loss: 0.5164 - val_accuracy: 0.7441 - 19s/epoch - 4ms/step\nEpoch 3/50\n4500/4500 - 19s - loss: 0.5114 - accuracy: 0.7455 - val_loss: 0.5099 - val_accuracy: 0.7481 - 19s/epoch - 4ms/step\nEpoch 4/50\n4500/4500 - 19s - loss: 0.4997 - accuracy: 0.7515 - val_loss: 0.5020 - val_accuracy: 0.7564 - 19s/epoch - 4ms/step\nEpoch 5/50\n4500/4500 - 19s - loss: 0.4916 - accuracy: 0.7580 - val_loss: 0.5014 - val_accuracy: 0.7529 - 19s/epoch - 4ms/step\nEpoch 6/50\n4500/4500 - 19s - loss: 0.4848 - accuracy: 0.7619 - val_loss: 0.5011 - val_accuracy: 0.7560 - 19s/epoch - 4ms/step\nEpoch 7/50\n4500/4500 - 19s - loss: 0.4786 - accuracy: 0.7667 - val_loss: 0.4993 - val_accuracy: 0.7549 - 19s/epoch - 4ms/step\nEpoch 8/50\n4500/4500 - 19s - loss: 0.4741 - accuracy: 0.7697 - val_loss: 0.5126 - val_accuracy: 0.7454 - 19s/epoch - 4ms/step\nEpoch 9/50\n4500/4500 - 19s - loss: 0.4683 - accuracy: 0.7718 - val_loss: 0.5038 - val_accuracy: 0.7527 - 19s/epoch - 4ms/step\nEpoch 10/50\n4500/4500 - 19s - loss: 0.4648 - accuracy: 0.7739 - val_loss: 0.5013 - val_accuracy: 0.7572 - 19s/epoch - 4ms/step\nEpoch 11/50\n4500/4500 - 19s - loss: 0.4618 - accuracy: 0.7770 - val_loss: 0.5043 - val_accuracy: 0.7528 - 19s/epoch - 4ms/step\nEpoch 12/50\n4500/4500 - 19s - loss: 0.4598 - accuracy: 0.7774 - val_loss: 0.5015 - val_accuracy: 0.7581 - 19s/epoch - 4ms/step\nEpoch 13/50\n4500/4500 - 19s - loss: 0.4565 - accuracy: 0.7794 - val_loss: 0.5046 - val_accuracy: 0.7552 - 19s/epoch - 4ms/step\nEpoch 14/50\n4500/4500 - 19s - loss: 0.4553 - accuracy: 0.7790 - val_loss: 0.5064 - val_accuracy: 0.7574 - 19s/epoch - 4ms/step\nEpoch 15/50\n4500/4500 - 19s - loss: 0.4519 - accuracy: 0.7831 - val_loss: 0.5119 - val_accuracy: 0.7546 - 19s/epoch - 4ms/step\nEpoch 16/50\n4500/4500 - 19s - loss: 0.4505 - accuracy: 0.7821 - val_loss: 0.5051 - val_accuracy: 0.7533 - 19s/epoch - 4ms/step\nEpoch 17/50\n4500/4500 - 19s - loss: 0.4481 - accuracy: 0.7854 - val_loss: 0.5054 - val_accuracy: 0.7529 - 19s/epoch - 4ms/step\nEpoch 18/50\n4500/4500 - 19s - loss: 0.4471 - accuracy: 0.7848 - val_loss: 0.5072 - val_accuracy: 0.7530 - 19s/epoch - 4ms/step\nEpoch 19/50\n4500/4500 - 19s - loss: 0.4448 - accuracy: 0.7866 - val_loss: 0.5075 - val_accuracy: 0.7513 - 19s/epoch - 4ms/step\nEpoch 20/50\n4500/4500 - 19s - loss: 0.4438 - accuracy: 0.7870 - val_loss: 0.5088 - val_accuracy: 0.7489 - 19s/epoch - 4ms/step\nEpoch 21/50\n4500/4500 - 19s - loss: 0.4429 - accuracy: 0.7875 - val_loss: 0.5077 - val_accuracy: 0.7524 - 19s/epoch - 4ms/step\nEpoch 22/50\n4500/4500 - 19s - loss: 0.4412 - accuracy: 0.7890 - val_loss: 0.5130 - val_accuracy: 0.7511 - 19s/epoch - 4ms/step\nEpoch 23/50\n4500/4500 - 19s - loss: 0.4394 - accuracy: 0.7903 - val_loss: 0.5164 - val_accuracy: 0.7491 - 19s/epoch - 4ms/step\nEpoch 24/50\n4500/4500 - 19s - loss: 0.4394 - accuracy: 0.7890 - val_loss: 0.5228 - val_accuracy: 0.7456 - 19s/epoch - 4ms/step\nEpoch 25/50\n4500/4500 - 19s - loss: 0.4393 - accuracy: 0.7886 - val_loss: 0.5189 - val_accuracy: 0.7498 - 19s/epoch - 4ms/step\nEpoch 26/50\n4500/4500 - 19s - loss: 0.4376 - accuracy: 0.7900 - val_loss: 0.5186 - val_accuracy: 0.7477 - 19s/epoch - 4ms/step\nEpoch 27/50\n4500/4500 - 19s - loss: 0.4368 - accuracy: 0.7912 - val_loss: 0.5190 - val_accuracy: 0.7496 - 19s/epoch - 4ms/step\nEpoch 28/50\n4500/4500 - 18s - loss: 0.4365 - accuracy: 0.7918 - val_loss: 0.5161 - val_accuracy: 0.7504 - 18s/epoch - 4ms/step\nEpoch 29/50\n4500/4500 - 19s - loss: 0.4362 - accuracy: 0.7921 - val_loss: 0.5161 - val_accuracy: 0.7501 - 19s/epoch - 4ms/step\nEpoch 30/50\n4500/4500 - 18s - loss: 0.4354 - accuracy: 0.7913 - val_loss: 0.5128 - val_accuracy: 0.7493 - 18s/epoch - 4ms/step\nEpoch 31/50\n4500/4500 - 18s - loss: 0.4357 - accuracy: 0.7908 - val_loss: 0.5210 - val_accuracy: 0.7489 - 18s/epoch - 4ms/step\nEpoch 32/50\n4500/4500 - 18s - loss: 0.4343 - accuracy: 0.7922 - val_loss: 0.5198 - val_accuracy: 0.7492 - 18s/epoch - 4ms/step\nEpoch 33/50\n4500/4500 - 18s - loss: 0.4337 - accuracy: 0.7925 - val_loss: 0.5179 - val_accuracy: 0.7502 - 18s/epoch - 4ms/step\nEpoch 34/50\n4500/4500 - 18s - loss: 0.4333 - accuracy: 0.7940 - val_loss: 0.5189 - val_accuracy: 0.7496 - 18s/epoch - 4ms/step\nEpoch 35/50\n4500/4500 - 18s - loss: 0.4334 - accuracy: 0.7929 - val_loss: 0.5149 - val_accuracy: 0.7513 - 18s/epoch - 4ms/step\nEpoch 36/50\n4500/4500 - 18s - loss: 0.4322 - accuracy: 0.7936 - val_loss: 0.5197 - val_accuracy: 0.7471 - 18s/epoch - 4ms/step\nEpoch 37/50\n4500/4500 - 18s - loss: 0.4322 - accuracy: 0.7932 - val_loss: 0.5206 - val_accuracy: 0.7509 - 18s/epoch - 4ms/step\nEpoch 38/50\n4500/4500 - 18s - loss: 0.4307 - accuracy: 0.7951 - val_loss: 0.5204 - val_accuracy: 0.7479 - 18s/epoch - 4ms/step\nEpoch 39/50\n4500/4500 - 18s - loss: 0.4303 - accuracy: 0.7951 - val_loss: 0.5230 - val_accuracy: 0.7493 - 18s/epoch - 4ms/step\nEpoch 40/50\n4500/4500 - 18s - loss: 0.4301 - accuracy: 0.7950 - val_loss: 0.5213 - val_accuracy: 0.7472 - 18s/epoch - 4ms/step\nEpoch 41/50\n4500/4500 - 18s - loss: 0.4293 - accuracy: 0.7957 - val_loss: 0.5253 - val_accuracy: 0.7501 - 18s/epoch - 4ms/step\nEpoch 42/50\n4500/4500 - 19s - loss: 0.4309 - accuracy: 0.7955 - val_loss: 0.5235 - val_accuracy: 0.7469 - 19s/epoch - 4ms/step\nEpoch 43/50\n4500/4500 - 19s - loss: 0.4299 - accuracy: 0.7958 - val_loss: 0.5185 - val_accuracy: 0.7477 - 19s/epoch - 4ms/step\nEpoch 44/50\n4500/4500 - 18s - loss: 0.4297 - accuracy: 0.7953 - val_loss: 0.5230 - val_accuracy: 0.7492 - 18s/epoch - 4ms/step\nEpoch 45/50\n4500/4500 - 19s - loss: 0.4290 - accuracy: 0.7968 - val_loss: 0.5192 - val_accuracy: 0.7484 - 19s/epoch - 4ms/step\nEpoch 46/50\n4500/4500 - 19s - loss: 0.4291 - accuracy: 0.7959 - val_loss: 0.5213 - val_accuracy: 0.7492 - 19s/epoch - 4ms/step\nEpoch 47/50\n4500/4500 - 19s - loss: 0.4280 - accuracy: 0.7952 - val_loss: 0.5224 - val_accuracy: 0.7477 - 19s/epoch - 4ms/step\nEpoch 48/50\n4500/4500 - 19s - loss: 0.4283 - accuracy: 0.7963 - val_loss: 0.5197 - val_accuracy: 0.7492 - 19s/epoch - 4ms/step\nEpoch 49/50\n4500/4500 - 18s - loss: 0.4295 - accuracy: 0.7964 - val_loss: 0.5214 - val_accuracy: 0.7462 - 18s/epoch - 4ms/step\nEpoch 50/50\n4500/4500 - 19s - loss: 0.4275 - accuracy: 0.7972 - val_loss: 0.5228 - val_accuracy: 0.7482 - 19s/epoch - 4ms/step\nTraining Complete\n"
],
[
"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------\n# Retrieve a list of list results on training and test data\n# sets for each training epoch\n#-----------------------------------------------------------\nacc=history.history['accuracy']\nval_acc=history.history['val_accuracy']\nloss=history.history['loss']\nval_loss=history.history['val_loss']\n\nepochs=range(len(acc)) # Get number of epochs\n\n#------------------------------------------------\n# Plot training and validation accuracy per epoch\n#------------------------------------------------\nplt.plot(epochs, acc, 'r')\nplt.plot(epochs, val_acc, 'b')\nplt.title('Training and validation accuracy')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend([\"Accuracy\", \"Validation Accuracy\"])\n\nplt.figure()\n\n#------------------------------------------------\n# Plot training and validation loss per epoch\n#------------------------------------------------\nplt.plot(epochs, loss, 'r')\nplt.plot(epochs, val_loss, 'b')\nplt.title('Training and validation loss')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\nplt.legend([\"Loss\", \"Validation Loss\"])\n\nplt.figure()\n\n\n# Expected Output\n# A chart where the validation loss does not increase sharply!",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abe5cf8eb52e63c002a138fb51d0c0b109faa74
| 586,647 |
ipynb
|
Jupyter Notebook
|
modules/module-10/mod10-regression-linear-regression.ipynb
|
mhall-simon/python
|
ef2d18cf13be00d14df7c5d63778031deac58299
|
[
"MIT"
] | 5 |
2021-02-18T00:42:46.000Z
|
2021-11-19T17:22:55.000Z
|
modules/module-10/mod10-regression-linear-regression.ipynb
|
mhall-simon/python
|
ef2d18cf13be00d14df7c5d63778031deac58299
|
[
"MIT"
] | null | null | null |
modules/module-10/mod10-regression-linear-regression.ipynb
|
mhall-simon/python
|
ef2d18cf13be00d14df7c5d63778031deac58299
|
[
"MIT"
] | null | null | null | 241.021775 | 259,584 | 0.897951 |
[
[
[
"# Module 10 - Regression Algorithms - Linear Regression",
"_____no_output_____"
],
[
"Welcome to Machine Learning (ML) in Python!\n\nWe're going to use a dataset about vehicles and their respective miles per gallon (mpg) to explore the relationships between variables.\n\nThe first thing to be familiar with is the data preprocessing workflow. Data needs to be prepared in order for us to successfully use it in ML. This is where a lot of the actual work is going to take place!\n\nI'm going to use this dataset for each of the regression algorithms, so we can see how each one differs.\n\nThe next notebooks with the dataset will be:\n\n- Linear Regression w/ Transformed Target (Logarithmic)\n- Ridge Regression with Standardized Inputs\n- Ridge and LASSO Regression with Polynomial Features\n\nThese four notebooks are designed to be a part of a series, with this one being the first.\n\nWe're going to start by importing our usual packages and then some IPython settings to get more output:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"",
"_____no_output_____"
]
],
[
[
"## Part A: Data Exploration",
"_____no_output_____"
],
[
"The first thing to do is import and explore our mpg dataset!\n\nThere's a few things to note in the dataset description: na values are denoted by `?` and column names are in a separate doc.\n\nI added the column names so we don't have to worry about them:",
"_____no_output_____"
]
],
[
[
"loc = \"https://raw.githubusercontent.com/mhall-simon/python/main/data/car-mpg/auto-mpg.data\"\n\ndf = pd.read_csv(loc, sep=\"\\s+\", header=None, na_values=\"?\")\n\ncols = {0:\"mpg\", 1:\"cylinders\", 2:\"displacement\", 3:\"horsepower\", 4:\"weight\", 5:\"accel\", 6:\"year\", 7:\"origin\", 8:\"model\"}\ndf = df.rename(columns=cols)\n\ndf.head(15)",
"_____no_output_____"
]
],
[
[
"When starting, it's always good to have a look at how complete our data set is.\n\nLet's see just how many na values were brought into the dataset per column:",
"_____no_output_____"
]
],
[
[
"df.isna().sum()",
"_____no_output_____"
]
],
[
[
"We have 6 missing values for horsepower!\n\nA safe assumption for imputing missing values is to insert the column mean, let's do that! (Feature engineering is somewhere that we can go into this more in depth.)\n\n*Note:* Imputing values is something that's not always objective, as it introduces some biases. We could also drop those 6 rows out of our dataset, however, I think imputing average hp isn't too serious of an issue.",
"_____no_output_____"
]
],
[
[
"df = df.replace(np.nan, df.horsepower.mean())\ndf.isna().sum()",
"_____no_output_____"
]
],
[
[
"Now, there's no more missing values!\n\nLet's get some descriptive statistics running for our numerical columns (non-numerical are automatically dropped):",
"_____no_output_____"
]
],
[
[
"df.describe()",
"_____no_output_____"
]
],
[
[
"Another thing we can look at is the number of unique car models in the dataset:",
"_____no_output_____"
]
],
[
[
"df.nunique(axis=0)",
"_____no_output_____"
]
],
[
[
"For the ML analysis, there's too many models to worry about, so we're going to have them drop off the dataset! We're trying to predict mpg, and with our data the model name will have practically no predictive power!\n\nOne Hot Encoding the makes/models would make the dataset have almost more columns than rows!",
"_____no_output_____"
]
],
[
[
"df = df.drop(\"model\", axis=1)\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Train-Test Split",
"_____no_output_____"
],
[
"We're getting closer to starting our analysis! The first major consideration is the train/test split, where we reserve a chunk of our dataset to validate the model.\n\nRemember, no peeking into the results with testing to train our model! That'll introduce a bias!\n\nLet's separate our data into X and y, and then run the split:",
"_____no_output_____"
]
],
[
[
"X = df.iloc[:,1:]\ny = df.iloc[:,0]",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=97)",
"_____no_output_____"
]
],
[
[
"Another important thing to look at is the distributions of continuous variables and their pairwise relationships.\n\nSeaborn has a really cool pairplot function that allows us to easily visualize this automatically! We just need to pass in columns of continuous variables.\n\nNote: This is a marginal dependence, and does not keep all other variables fixed! We should only analyze this after our split!",
"_____no_output_____"
]
],
[
[
"train_dataset = X_train.copy()\ntrain_dataset.insert(0, \"mpg\", y_train)\n\nsns.pairplot(train_dataset[['mpg','displacement','horsepower','weight','accel']], kind='reg', diag_kind='kde')",
"_____no_output_____"
]
],
[
[
"When looking at this, there's two things to takeaway:\n\n1. `mpg` is close to being normal, but there's a long tail. This means we may be better taking the log of mpg when running our analysis - something to explore in the next notebook.\n\n2. Some relationships are not quite linear! We will work on this more in the following notebooks!\n\nLet's now get into the ML aspects!",
"_____no_output_____"
],
[
"## Part B: Data Preprocessing & Pipeline",
"_____no_output_____"
],
[
"There's a lot of online tutorials that show the SKLearn models and how to call them in one line, and not much else.\n\nA really powerful tool is to leverage the pipelines, as you can adjsut easily on the fly and not rewrite too much code!\n\nPipelines also reduce the potential for errors, as we only define preprocessing steps, and don't actually need to manipulate our tables. When we transform the target with a log later, we also don't need to worry about switching between log and normal values! It'll be handled for us.\n\nIt's also not as bad as it seems!\n\nThe first main step is to separate our data into:\n\n- categorical columns that need to be one-hot encoded\n- continuous columns (no changes - for now)\n- other processing subsets (none in these examples, but binary columns would be handled a bit differently.)\n - label encoding the response (y) variable when we get into classification models\n\nLet's get right to it! We can split apart the explanatory column names into the two categories with basic lists:",
"_____no_output_____"
]
],
[
[
"categorical_columns = ['cylinders','origin','year']\nnumerical_columns = ['displacement','horsepower','weight','accel']",
"_____no_output_____"
]
],
[
[
"*Discussion:* Why is Year Categorical, even though it's a numerical year?\n\nIn Linear Regression, the year 70 (1970) would appear to be a factor of 80 (1980) by about 9/10ths, and it would be scaled that way. This would not make sense, as we expect only marginal increases in mpg year-over-year. To prevent a relationship like this, we're going to one-hot encode the years into categories.\n\nNow, let's put together our preprocessing pipeline.\n\nWe'll need to:\n\n1. OneHot Encode Categorical\n2. Leave Continuous Alone\n\nLet's build our preprocessor:",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import make_column_transformer\n\npreprocessor = make_column_transformer((OneHotEncoder(drop=\"first\"), categorical_columns), remainder=\"passthrough\")",
"_____no_output_____"
]
],
[
[
"Why are we dropping the first category in each categorical column?\n\nOur regression can imply the first one with zeros for all the encoded variables, and by not including it we are preventing colinearity from being introduced!\n\nA potential issue that can arise is when you encounter new labels in the test/validation sets that are not one-hot encoded. Right now, this would toss an error if it happens! Later notebooks will go into how to handle these errors.\n\nNow, let's build the pipeline:",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LinearRegression\n\nmodel = make_pipeline(preprocessor, LinearRegression())",
"_____no_output_____"
]
],
[
[
"And now we can easily train our model and preprocess our data all in one step:",
"_____no_output_____"
]
],
[
[
"model.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"Before we start evaluating the model, I'll show you some useful features with the pipeline:\n\n1. View Named Steps",
"_____no_output_____"
]
],
[
[
"model.named_steps",
"_____no_output_____"
]
],
[
[
"2. View Coefficients and Intercept (Expanded Later)",
"_____no_output_____"
]
],
[
[
"model.named_steps['linearregression'].coef_",
"_____no_output_____"
],
[
"model.named_steps['linearregression'].intercept_",
"_____no_output_____"
]
],
[
[
"3. Generate Predictions\n\n*Viewing First 10*",
"_____no_output_____"
]
],
[
[
"model.predict(X_train)[:10]",
"_____no_output_____"
]
],
[
[
"## Part C: Evaluating Machine Learning Model",
"_____no_output_____"
],
[
"So, now we have an ML model, but how do we know if it's good?\n\nAlso, what's our criteria for good?\n\nThis changes depending upon what you're doing!\n\nLet's bring in some metrics, and look at our \"in sample\" performance. This is the performance valuation in sample, without looking at any test data yet!\n\n- $r^2$: coefficient of determination\n- mean absolute error\n- mean squared error\n\nLet's generate our in-sample predictions based upon the model:",
"_____no_output_____"
]
],
[
[
"y_pred_in = model.predict(X_train)",
"_____no_output_____"
]
],
[
[
"And now let's generate some metrics:\n\nThis compares the training (truth) values, to the ones predicted by the line of best fit.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\n\nr2_score(y_train, y_pred_in)\nmean_squared_error(y_train, y_pred_in)\nmean_absolute_error(y_train, y_pred_in)",
"_____no_output_____"
]
],
[
[
"We're explaining about 87.5% of the variation in our in-sample dataset! That's pretty good, but will it hold when analyzing out of sample?\n\nAlso, we now know that our average absolute error is 2.09 mpg! That's not too bad, considering the range of the dataset and STD from the data:",
"_____no_output_____"
]
],
[
[
"y_train.std()\ny_train.max() - y_train.min()",
"_____no_output_____"
]
],
[
[
"Let's now visualize our predictions! As a note, we want all of our datapoints to be along the line!\n\n*Tip:* If you're reproducing this graph, ensure that the diagonal goes through the origin of the plot. The red line is setup to draw from corner to corner, and if you move your axes this may not work out!",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(5,5))\nplt.scatter(y_train, y_pred_in)\nax.plot([0,1],[0,1], transform=ax.transAxes, ls=\"--\", c=\"red\")\nplt.xlim([0,50])\nplt.ylim([0,50])\nplt.ylabel(\"Model Predictions\")\nplt.xlabel(\"Truth Values\")\nplt.title(\"In Sample Performance\")\nplt.show();",
"_____no_output_____"
]
],
[
[
"Our predictions are pretty good!\n\nA few things to note:\n\n- It's a really good fit, but it appears that there's a slight curve to this dataset.\n- This is still in sample (we trained the model on this data)\n- If we're making predictions, what regions are we confident in?\n\nI think average mpg we'll be accurate, however, at the edges we're missing some of the trend.\n\nLet's plot our residual error to see the shape:",
"_____no_output_____"
]
],
[
[
"plt.scatter(y_train, y_train-y_pred_in)\nplt.xlabel(\"Truth Values - In Sample\")\nplt.ylabel(\"Residual Error\")\nplt.xlim([5,50])\nplt.plot([5,50],[0,0], color='black', alpha=0.6)\nplt.show();",
"_____no_output_____"
]
],
[
[
"Our errors definitely have curvature in them! We'll improve upon this in the next module!\n\nFor now...\n\nLet's start looking at the coefficients in our model while it's simple.\n\nWe can grab coefficients out of the preprocessor to ensure that the coefficients line up with labels.\n\nIt'll always be in order of the preprocessor, so we can first fetch the feature names from the one hot encoded, and then just concatenate our numerical columns as there were no changes!",
"_____no_output_____"
]
],
[
[
"feature_names = (model.named_steps['columntransformer']\n .named_transformers_['onehotencoder']\n .get_feature_names(input_features=categorical_columns))\n\nfeature_names = np.concatenate([feature_names, numerical_columns])\n\ncoefs = pd.DataFrame(\n model.named_steps['linearregression'].coef_,\n columns=['Coefficients'],\n index=feature_names\n)\n\ncoefs",
"_____no_output_____"
]
],
[
[
"Let's plot the coefficients to see if there's anything we can learn out of it!",
"_____no_output_____"
]
],
[
[
"coefs.Coefficients.plot(kind='barh', figsize=(9,7))\nplt.title(\"Unscaled Linear Regression Coefficients\")\nplt.show();",
"_____no_output_____"
]
],
[
[
"Woah, it looks like weight in unimportant at first glance, even though it would probably impact mpg quite a bit!\n\nA word of caution! We just can't compare the coefficients, as they're in a different scale!\n\nIf we scale them with their standard deviation, then we can compare them. However, some meaning is lost!\n\nCurrently, the coefficient `-0.034440` for `horsepower` means that while holding all else equal, increasing the horsepower by 1 unit decreases mpg by about 0.034 mpg!\n\nSo, if we add 100 hp to the car, mileage decreases by about 3.4 mpg if we hold all else equal!\n\nLet's scale these coefficients to compare them better! Just keep in mind that the 1hp:-0.34mpg relationship will no longer be interpretable from the scaled coefficients. But, we will be able to compare between coefficients.\n\nUsing the model pipeline, we can easily transform our data using the built in transformer, and then take the std:\n\n`model.named_steps['columntransformer'].transform(DATASET)` is how we can use the transformer we built above.\n\nWhen training the model, this dataset transformation happened all behind the scenes!! However, we can reproduce it with our training sample to work with it manually:\n\n**NOTE:** The pipeline transformation is better than manual, because we know for certain the order of the columns that are being outputted. We fetched them above! The preprocessor in this instance returned a SciPy sparse matrix, which we can import with a new DataFrame constructor:",
"_____no_output_____"
]
],
[
[
"X_train_preprocessed = pd.DataFrame.sparse.from_spmatrix(\n model.named_steps['columntransformer'].transform(X_train),\n columns=feature_names\n)\n\nX_train_preprocessed.head(10)",
"_____no_output_____"
]
],
[
[
"By plotting the standard deviations, we can see for certain that the coeffs are definitely in a different scale!\n\nWeight varies in the thousands, while acceleration is usually around 10-20 seconds!!",
"_____no_output_____"
]
],
[
[
"X_train_preprocessed.std(axis=0).plot(kind='barh', figsize=(9,7))\nplt.title(\"Features Std Dev\")\nplt.show();",
"_____no_output_____"
]
],
[
[
"As you can probably see, the standard deviation of weight is far higher than any other variable!\n\nThis makes it impossible to compare.\n\nNow, let's scale everything.\n\nThis scale works because very large continuous variables have a large standard deviation, but very small coefficients, which brings them down. The opposite is true for very small continuous variables for standard deviations, their coefficient is usually much larger. By multiplying the two together, we're bringing everything in towrads the middle, and with the same units of measurement.",
"_____no_output_____"
]
],
[
[
"coefs['coefScaled'] = coefs.Coefficients * X_train_preprocessed.std(axis=0)\ncoefs",
"_____no_output_____"
]
],
[
[
"Now, let's plot the scaled coefs:",
"_____no_output_____"
]
],
[
[
"coefs.coefScaled.plot(kind=\"barh\", figsize=(9,7))\nplt.title(\"Scaled Linear Coefficients\")\nplt.show();",
"_____no_output_____"
]
],
[
[
"Earlier, weight had almost no impact on the model at first glance! Now, we can see that it's the most important explanatory variable for mpg.\n\nLet's now do our final validations for the model by bringing in the test data!!\n\nThe first is going to be done using the test (reserved) dataset, which we can make predictions with easily:",
"_____no_output_____"
]
],
[
[
"y_pred_out = model.predict(X_test)",
"_____no_output_____"
]
],
[
[
"And now let's generate a small DataFrame to compare metrics from in sample and out of sample!\n\nOut of sample performance is usually worse, it's usually a question of how much!",
"_____no_output_____"
]
],
[
[
"metrics = pd.DataFrame(index=['r2','mse','mae'],columns=['in','out'])\n\nmetrics['in'] = (r2_score(y_train, y_pred_in), mean_squared_error(y_train, y_pred_in), mean_absolute_error(y_train, y_pred_in))\nmetrics['out'] = (r2_score(y_test, y_pred_out), mean_squared_error(y_test, y_pred_out), mean_absolute_error(y_test, y_pred_out))\n\nmetrics",
"_____no_output_____"
]
],
[
[
"When looking at the data, we see that the $r^2$ value decreased slightly from 0.875 to 0.854! This is still fairly significant!\n\nAnd let's do a similar graph for out of sample performance:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(5,5))\nplt.scatter(y_test, y_pred_out)\nax.plot([0,1],[0,1], transform=ax.transAxes, ls=\"--\", c=\"red\")\nplt.xlim([0,50])\nplt.ylim([0,50])\nplt.ylabel(\"Model Predictions\")\nplt.xlabel(\"Truth Values\")\nplt.title(\"Out of Sample Performance\")\nplt.show();",
"_____no_output_____"
]
],
[
[
"We're doing pretty good! There's stil some curvature that we'll work on fixing in the next notebooks.\n\nLet's plot our residuals one more time:",
"_____no_output_____"
]
],
[
[
"plt.scatter(y_test, y_test-y_pred_out)\nplt.xlabel(\"Truth Values - Out of Sample\")\nplt.ylabel(\"Residual Error\")\nplt.xlim([5,50])\nplt.plot([5,50],[0,0], color='black', alpha=0.6)\nplt.show();",
"_____no_output_____"
]
],
[
[
"Our model is pretty good, except for when we go above 32-ish mpg. Our model is predicting values far too high.\n\nWe'll solve this in a later notebook.\n\nAnother key question for ML is...\n\nHow do we know if the performance is due to just our sample selected? How much would our model change depending upon the sample selected?\n\nWe can solve for this using cross validation!\n\nCross validation takes different samples from our dataset, runs the regression, and then outputs the results!\n\nWe can easily cut the dataset into chunks and see how it behaves.\n\nWe're going to plot the distributions of coefficients throughout the folds to see how stable the model is:",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import RepeatedKFold\n\n# Part 1: Defining Cross Validation Model\ncv_model = cross_validate(\n model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),\n return_estimator=True, n_jobs=-1\n)\n\n# Part 2: Analyzing Each Model's Coefficients, and Setting Them In DataFrame:\ncv_coefs = pd.DataFrame(\n [est.named_steps['linearregression'].coef_ * X_train_preprocessed.std(axis=0) for est in cv_model['estimator']],\n columns=feature_names\n)\n\n# Part 3: Plotting the Distribution of Coefficients\nplt.figure(figsize=(9,7))\nsns.stripplot(data=cv_coefs, orient='h', color='k', alpha=0.5)\nsns.boxplot(data=cv_coefs, orient='h', color='cyan', saturation=0.5)\nplt.axvline(x=0, color='.5')\nplt.xlabel('Coefficient importance')\nplt.title('Coefficient importance and its variability')\nplt.subplots_adjust(left=.3)\nplt.show();",
"_____no_output_____"
]
],
[
[
"What are the takeaways from this plot?\n\nOur model doesn't appear to be too sensitive to the splits in training and testing!\n\nThis is a signal that our model is robust, and we should have confidence that our findings weren't due to choosing a \"good\" sample!\n\nIf we saw a variable changing from -6 to +2, that would be a sign it is not stable!\n\nNow, we're ready to start exploring the second notebook! Which starts working towards a fix in the curvature!",
"_____no_output_____"
],
[
"## Bonus Box: Easily Checking for Variable Colinearity",
"_____no_output_____"
],
[
"If we suspect two variables are colinear, we can easily check for it with the following code:",
"_____no_output_____"
]
],
[
[
"plt.scatter(cv_coefs['weight'], cv_coefs['displacement'])\nplt.ylabel('Displacement coefficient')\nplt.xlabel('Weight coefficient')\nplt.grid(True)\nplt.title('Co-variations of variables across folds');",
"_____no_output_____"
]
],
[
[
"These are not colinear across folds, which is good for the model!\n\nIf they *were* colinear across folds, it would look something like this:\n\n<div>\n<img src=https://github.com/mhall-simon/python/blob/main/data/screenshots/Screen%20Shot%202021-03-22%20at%206.38.12%20PM.png?raw=True width=\"400\"/>\n</div>",
"_____no_output_____"
],
[
"If you notice strong colinearlity, then one should be removed and you can run the model again!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4abe67261b70164b2996827b1f4493c34f052a08
| 174,436 |
ipynb
|
Jupyter Notebook
|
Statistics and Models II/SOLUTIONS/Copy_of_SOLUTIONS_Numpy_vectorization.ipynb
|
wesleybeckner/technology_explorers
|
f32a953e6997157c978f34d8a726d61021e4f799
|
[
"MIT"
] | null | null | null |
Statistics and Models II/SOLUTIONS/Copy_of_SOLUTIONS_Numpy_vectorization.ipynb
|
wesleybeckner/technology_explorers
|
f32a953e6997157c978f34d8a726d61021e4f799
|
[
"MIT"
] | null | null | null |
Statistics and Models II/SOLUTIONS/Copy_of_SOLUTIONS_Numpy_vectorization.ipynb
|
wesleybeckner/technology_explorers
|
f32a953e6997157c978f34d8a726d61021e4f799
|
[
"MIT"
] | null | null | null | 174,436 | 174,436 | 0.849154 |
[
[
[
"# Losing your loops",
"_____no_output_____"
],
[
"## Python is slow!\n \n* dynamically typed -- Python interpreter needs to compare and convert(if needed) in runtime everytime a variable is written, modified or referenced\n* interpreted -- Vanilla Python comes with no compiler optimization\n* Uses buffers inefficiently because Python lists aren't homogenous, thus making it super slow compared to languages like C, C++ or Julia. \n\nMore info [here](\"http://jakevdp.github.io/blog/2014/05/09/why-python-is-slow/\").",
"_____no_output_____"
],
[
"### Timing a silly function in Python",
"_____no_output_____"
]
],
[
[
"def silly(N):\n d = 0.0\n for i in range(N):\n d += (i % 3 -1) * i",
"_____no_output_____"
],
[
"%timeit silly(10000)\n#1000 loops, best of 5: 1.43 ms per loop",
"1000 loops, best of 5: 1.45 ms per loop\n"
]
],
[
[
"### Timing the same silly function in C",
"_____no_output_____"
]
],
[
[
"%%writefile checktime.c\n\n#include <time.h>\n#include <stdio.h>\n\nvoid silly(int N){\n int d = 0.0;\n for(int i=0; i <= N; i++){\n d = d + (i % 3 -1) * i;\n }\n}\n\nlong double mean(long double arr[1000]){\n int i;\n long double sum = 0.0;\n long double average = 0.0;\n for(i = 0; i < 1000; i++){\n sum = sum + arr[i];\n }\n average = sum/1000;\n return average;\n}\n\nint main(){ \n long double time_elapsed = 0.0;\n long double mean_time = 0.0;\n long double min_time = 99.0;\n\n for(int j=0; j < 5; j++){\n long double timearr[1000];\n for(int i=0; i < 1000; i++){\n clock_t tic = clock(); \n silly(10000); \n clock_t toc = clock();\n time_elapsed = (long double)(toc - tic) / CLOCKS_PER_SEC;\n timearr[i] = time_elapsed;\n }\n mean_time = mean(timearr);\n if(mean_time < min_time){\n min_time = mean_time;\n } \n }\n\n printf(\"1000 loops, best of 5: %Lf s per loop\\n\", min_time);\n return 0; \n} ",
"Writing checktime.c\n"
],
[
"%%shell\ngcc checktime.c -o output\n./output\n#1000 loops, best of 5: 0.000028 s per loop",
"1000 loops, best of 5: 0.000028 s per loop\n"
]
],
[
[
"As you can see, the same code timed in C is ~100x faster than in vanilla Python\n\n<mark>\"What makes Python fast (for development), is what makes it slow (in code execution)\"</mark> -- Jake Vanderplas",
"_____no_output_____"
],
[
"## So, what's the remedy? Numpy!\nor is it?\nLet's check how Numpy compares with vanilla Python w.r.t. basic scalar Math operations",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\n%timeit math.log(10) #10000000 loops, best of 5: 165 ns per loop\n%timeit np.log(10) #1000000 loops, best of 5: 1.21 µs per loop",
"The slowest run took 34.18 times longer than the fastest. This could mean that an intermediate result is being cached.\n10000000 loops, best of 5: 165 ns per loop\nThe slowest run took 36.44 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000000 loops, best of 5: 1.18 µs per loop\n"
],
[
"%timeit math.exp(3) #10000000 loops, best of 5: 131 ns per loop\n%timeit np.exp(3) #1000000 loops, best of 5: 1.19 µs per loop",
"The slowest run took 64.42 times longer than the fastest. This could mean that an intermediate result is being cached.\n10000000 loops, best of 5: 128 ns per loop\nThe slowest run took 85.44 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000000 loops, best of 5: 1.13 µs per loop\n"
],
[
"# Sampling from a normal distribution\nimport random\n%timeit random.gauss(0, 1) #1000000 loops, best of 5: 776 ns per loop\n%timeit np.random.normal() #100000 loops, best of 5: 2.98 µs per loop",
"The slowest run took 11.83 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000000 loops, best of 5: 775 ns per loop\nThe slowest run took 13.18 times longer than the fastest. This could mean that an intermediate result is being cached.\n100000 loops, best of 5: 2.93 µs per loop\n"
]
],
[
[
"Matrix multiplication in vanilla Python\n\n<p align=\"center\">\n<img src=\"https://www.mscroggs.co.uk/img/full/multiply_matrices.gif\" width=500 height=200></img>\n</p>\n\n\n",
"_____no_output_____"
]
],
[
[
"def matmul_1(mat1, mat2):\n mat1_rows, mat1_cols = len(mat1), len(mat1[0])\n mat2_rows, mat2_cols = len(mat2), len(mat2[0])\n # assert mat1_cols == mat2_rows, \"Check matrix dimensions\"\n answer = [[0]*mat2_cols] * mat1_rows\n\n for i in range(mat1_rows):\n for j in range(mat2_cols):\n agg = 0\n for k in range(mat2_rows):\n agg += (mat1[i][k]*mat2[k][j])\n answer[i][j] = agg\n return answer\n\n# matmul_1([[1,1],[1,1]], [[2,2],[2,2]])",
"_____no_output_____"
]
],
[
[
"<p align=\"center\">\n<img src=\"https://boydjohnson.dev/blog/concurrency-matrix-multiplication/matrix-multiplication-good.gif\" width=400 height=300></img>\n</p>\n",
"_____no_output_____"
]
],
[
[
"%%timeit -n 10\nmatmul_1([[1]*50]*50, [[2]*50]*50) #10 loops, best of 5: 21.1 ms per loop",
"10 loops, best of 5: 21.6 ms per loop\n"
]
],
[
[
"#### Exercise: Matrix multiplication in Numpy loops\n\nWrite the same code as above, using Numpy arrays",
"_____no_output_____"
]
],
[
[
"def matmul_2(mat1, mat2):\n ############################################################################\n ### TODO: Complete this function to perform matmul on two ndarrays ###\n ############################################################################\n mat1_rows, mat1_cols = mat1.shape\n mat2_rows, mat2_cols = mat2.shape\n # assert mat1_cols == mat2_rows, \"Check matrix dimensions\"\n answer = np.zeros((mat1_rows, mat2_cols))\n\n for i in np.arange(mat1_rows):\n for j in np.arange(mat2_cols):\n agg = 0\n for k in np.arange(mat2_rows):\n agg += (mat1[i,k]*mat2[k,j])\n answer[i,j] = agg\n return answer\n\n# matmul_2(np.array([[1,1],[1,1]]), np.array([[2,2],[2,2]]))",
"_____no_output_____"
],
[
"%%timeit -n 10\nmatmul_2(np.full((50,50), 1), np.full((50,50), 2)) # 10 loops, best of 5: 152 ms per loop",
"10 loops, best of 5: 154 ms per loop\n"
]
],
[
[
"Numpy is again slower than vanilla Python. \n\n_So, why are we discussing this? Numpy seems to be slower than vanilla Python right?_\n\nTime to unleash Numpy's inner strength!!",
"_____no_output_____"
],
[
"## Vectorization, a.k.a. Array Programming\n\nDefinitions:\n* This practice of replacing explicit loops with array expressions is commonly referred to as vectorization. In general, vectorized array operations will often be one or two (or more) orders of magnitude faster than their pure Python equivalents, with the biggest impact in any kind of numerical computations. [[Source](https://www.oreilly.com/library/view/python-for-data/9781449323592/ch04.html)]\n\n* Generalizing operations we do on scalars (ie., single numbers) to apply transparently to vectors, matrices, and higher-dimensional arrays, which may be executed on a vector processor parallelly (either on a SIMD enabled CPU, or GPU).\n\nThe matrix multiplication example we looked at is one of the vector-equivalents to scalar multiplication.\n\n\n\n<p align=\"center\">\n<img src=\"https://media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41586-020-2649-2/MediaObjects/41586_2020_2649_Fig2_HTML.png?as=webp\" width=500 height=500></img>\n</p>",
"_____no_output_____"
]
],
[
[
"%%timeit -n 10\nnp.matmul(np.ones((50,50)), np.ones((50,50))*2) #10 loops, best of 5: 42.1 µs per loop ",
"10 loops, best of 5: 42.1 µs per loop\n"
]
],
[
[
"In this case it is evident that, vectorized Numpy is\n* ~500x faster than vanilla Python\n* ~3600x faster than loopy Numpy\n\nSo, we should somehow re-formulate the task-at-hand to a vectorized operation. This allows us to use Numpy's inbuilt vectorized functions. Fortunately, Numpy provides us many tricks to help us do this ",
"_____no_output_____"
],
[
"## Strategies to speed up vanilla Python using Numpy\n\nAlong with using these direct vectorized Numpy operations, we also have other tricks to speed-up things.",
"_____no_output_____"
],
[
"### **1. Universal functions** ([ufunc](https://numpy.org/doc/stable/reference/ufuncs.html))\nA ufunc operates on ndarrays in an element-by-element fashion.\nThe idea is to push the loop into the compiled layer that underlies NumPy, thus avoiding the slow loops in Python. \n\nJust perform an operation on an ndarray like you would on a scalar value. Numpy would do it for every element in the array using its optimized C/Fortran routines beneath (check list of available ufuncs [here](https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs)).\n",
"_____no_output_____"
],
[
"#### Exercise: Computing reciprocals",
"_____no_output_____"
]
],
[
[
"def compute_reciprocals(values):\n ############################################################################\n ### TODO: Compute element wise reciprocals ###\n ############################################################################\n output = np.empty(len(values))\n for i in range(len(values)):\n output[i] = 1.0 / values[i]\n return output\n \n# values = np.random.randint(1, 10, size=5)\n# compute_reciprocals(values)",
"_____no_output_____"
]
],
[
[
"Now, let's compute element-wise reciprocal using Numpy",
"_____no_output_____"
]
],
[
[
"################################################################################\n##### TODO: Compute element wise reciprocals without loops #####\n################################################################################\n(1.0 / big_array)",
"_____no_output_____"
]
],
[
[
"Let's time the two functions now",
"_____no_output_____"
]
],
[
[
"big_array = np.random.randint(1, 100, size=1000000)\n%timeit compute_reciprocals(big_array)",
"1 loop, best of 5: 460 ms per loop\n"
],
[
"%timeit (1.0 / big_array)",
"100 loops, best of 5: 2.61 ms per loop\n"
],
[
"np.allclose((1.0/big_array), compute_reciprocals(big_array))",
"_____no_output_____"
]
],
[
[
"#### Arithmetic with arrays\n\nStandard arithmetic operators are overloaded in Numpy to enable vectorization",
"_____no_output_____"
]
],
[
[
"x = np.arange(10)\nprint(\"x =\", x)\nprint(\"x + 5 =\", x + 5)\nprint(\"x - 5 =\", x - 5)\nprint(\"x * 2 =\", x * 2)\nprint(\"x / 2 =\", x / 2)\nprint(\"x // 2 =\", x // 2) # floor division\nprint(\"-x = \", -x) # negation\nprint(\"x ** 2 = \", x ** 2) # ??\nprint(\"x % 2 = \", x % 2) # ??",
"x = [0 1 2 3 4 5 6 7 8 9]\nx + 5 = [ 5 6 7 8 9 10 11 12 13 14]\nx - 5 = [-5 -4 -3 -2 -1 0 1 2 3 4]\nx * 2 = [ 0 2 4 6 8 10 12 14 16 18]\nx / 2 = [0. 0.5 1. 1.5 2. 2.5 3. 3.5 4. 4.5]\nx // 2 = [0 0 1 1 2 2 3 3 4 4]\n-x = [ 0 -1 -2 -3 -4 -5 -6 -7 -8 -9]\nx ** 2 = [ 0 1 4 9 16 25 36 49 64 81]\nx % 2 = [0 1 0 1 0 1 0 1 0 1]\n"
],
[
"# Chaining ufuncs\n-(0.5*x + 1) ** 0.5",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"theta = np.linspace(0, np.pi, 5)\nprint(\"theta = \", theta)\nprint(\"sin(theta) = \", np.around(np.sin(theta), decimals=3))\nprint(\"cos(theta) = \", np.around(np.cos(theta), decimals=3))\nprint(\"tan(theta) = \", np.tan(theta))",
"theta = [0. 0.78539816 1.57079633 2.35619449 3.14159265]\nsin(theta) = [0. 0.707 1. 0.707 0. ]\ncos(theta) = [ 1. 0.707 0. -0.707 -1. ]\ntan(theta) = [ 0.00000000e+00 1.00000000e+00 1.63312394e+16 -1.00000000e+00\n -1.22464680e-16]\n"
]
],
[
[
"#### Exercise: Count transitions\nGiven an ndarray consisting of booleans, find the count of `False` to `True` transitions",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nbool_arr = np.random.choice([False, True], size=100000)\nbool_arr",
"_____no_output_____"
]
],
[
[
"Let's do it in vanilla Python",
"_____no_output_____"
]
],
[
[
"def count_transitions(arr):\n ############################################################################\n ### TODO: count False to True transitions ###\n ############################################################################\n count = 0\n for i, j in zip(arr[:-1], arr[1:]):\n if j and not i:\n count += 1\n return count\n\n# count_transitions(bool_arr)",
"_____no_output_____"
]
],
[
[
"Now, try doing the same in Numpy using vectorization",
"_____no_output_____"
]
],
[
[
"################################################################################\n### TODO: count False to True transitions using vectorization ###\n################################################################################\n(np.logical_and((~bool_arr[:-1]), (bool_arr[1:]) )).sum()\n\n# Alternate solution\n(bool_arr[:-1] < bool_arr[1:]).sum()",
"_____no_output_____"
],
[
"%%timeit\ncount_transitions(bool_arr) # 100 loops, best of 5: 9.02 ms per loop",
"100 loops, best of 5: 9.02 ms per loop\n"
],
[
"%%timeit\n(bool_arr[:-1] < bool_arr[1:]).sum() # 1000 loops, best of 5: 230 µs per loop",
"1000 loops, best of 5: 230 µs per loop\n"
]
],
[
[
"### **2. Aggregations** \n\nComputing summary statistics over a data, like central tendencies, deviations, min, max, quantiles etc. can be vectorized using Numpy\n\nLet's compare Python's aggregation functions with those of Numpy",
"_____no_output_____"
]
],
[
[
"big_array = np.random.rand(1000000)\n%timeit sum(big_array)\n%timeit np.sum(big_array)",
"_____no_output_____"
],
[
"%timeit min(big_array)\n%timeit np.min(big_array)",
"_____no_output_____"
]
],
[
[
"For min, max, sum, and several other NumPy aggregates, a shorter syntax is to use methods of the array object itself",
"_____no_output_____"
]
],
[
[
"print(big_array.min(), big_array.max(), big_array.sum())",
"_____no_output_____"
]
],
[
[
"Multi-dimensional aggregates",
"_____no_output_____"
]
],
[
[
"M = np.random.random((3, 4))\nM",
"_____no_output_____"
]
],
[
[
"By default, each NumPy aggregation function will return the aggregate over the entire array",
"_____no_output_____"
]
],
[
[
"np.sum(M)",
"_____no_output_____"
]
],
[
[
"Aggregate across columns",
"_____no_output_____"
]
],
[
[
"M.min(axis=0)",
"_____no_output_____"
]
],
[
[
"Aggregate across rows",
"_____no_output_____"
]
],
[
[
"M.max(axis=1)",
"_____no_output_____"
]
],
[
[
"The following table provides a list of useful aggregation functions available in Numpy\n\n|Function Name | NaN-safe Version | Description |\n|-------------------|---------------------|-----------------------------------------------|\n| ``np.sum`` | ``np.nansum`` | Compute sum of elements |\n| ``np.prod`` | ``np.nanprod`` | Compute product of elements |\n| ``np.mean`` | ``np.nanmean`` | Compute mean of elements |\n| ``np.std`` | ``np.nanstd`` | Compute standard deviation |\n| ``np.var`` | ``np.nanvar`` | Compute variance |\n| ``np.min`` | ``np.nanmin`` | Find minimum value |\n| ``np.max`` | ``np.nanmax`` | Find maximum value |\n| ``np.argmin`` | ``np.nanargmin`` | Find index of minimum value |\n| ``np.argmax`` | ``np.nanargmax`` | Find index of maximum value |\n| ``np.median`` | ``np.nanmedian`` | Compute median of elements |\n| ``np.percentile`` | ``np.nanpercentile``| Compute rank-based statistics of elements |\n| ``np.any`` | N/A | Evaluate whether any elements are true |\n| ``np.all`` | N/A | Evaluate whether all elements are true |",
"_____no_output_____"
],
[
"### Exercise: Mean centering \nSubtract the mean of the list `rand_list` from every element in the same ",
"_____no_output_____"
]
],
[
[
"import random\nrand_list = [random.randint(10,20) for i in range(10000)]",
"_____no_output_____"
],
[
"def mean_center(data):\n ############################################################################\n ##### TODO: Complete this function in vanilla Python to perform #####\n ##### mean centering #####\n ############################################################################\n data = data.copy()\n sum = 0.0\n for i in range(len(data)):\n sum += data[i]\n mean = sum/len(data)\n\n for i in range(len(data)):\n data[i] -= mean\n return data",
"_____no_output_____"
]
],
[
[
"Now let's do it in Numpy using ufuncs",
"_____no_output_____"
]
],
[
[
"def mean_center_with_numpy(data):\n ############################################################################\n ##### TODO: Now do the same without using any for-loops #####\n ############################################################################\n \n rand_arr = np.array(data) # Convert to numpy array\n mean = np.mean(rand_arr)\n return rand_arr - mean\n\nnp.allclose(mean_center(rand_list), mean_center_with_numpy(rand_list))",
"_____no_output_____"
],
[
"%%timeit\nmean_center(rand_list) # 100 loops, best of 5: 2.2 ms per loop",
"100 loops, best of 5: 2.2 ms per loop\n"
],
[
"%%timeit\nmean_center_with_numpy(rand_list) # 1000 loops, best of 5: 742 µs per loop",
"The slowest run took 11.76 times longer than the fastest. This could mean that an intermediate result is being cached.\n1000 loops, best of 5: 742 µs per loop\n"
]
],
[
[
"### Exercise: Max profit over stock price data\n\nYou are given the stock closing price history as a sequence. Assume that you can make one purchase and one sale. What is the max profit that can be obtained?",
"_____no_output_____"
]
],
[
[
"# Generating the stock data\nnp.random.seed(42)\nprices = np.full(200, fill_value=np.nan)\nprices[[10, 25, 60, -5, 90 ,120, 150, 190]] = [120., 30., 75., 45., 60., 90., 90., 95.] # array indexing\n\nx = np.arange(len(prices))\nis_valid = ~np.isnan(prices)\nprices = np.interp(x=x, xp=x[is_valid], fp=prices[is_valid])\nprices += np.random.randn(len(prices)) * 2 # Gaussian noise\n\nfig, ax = plt.subplots()\nax.plot(prices)\nax.set_title('Stock Price History')\nax.set_xlabel('Time')\nax.set_ylabel('Price')",
"_____no_output_____"
],
[
"def profit(prices):\n ############################################################################\n ##### TODO: Compute the max profit. Have two accumulators. ##### \n ##### one to keep track of minima, one to record max profit #####\n ############################################################################\n max_px = 0\n min_px = prices[0]\n for px in prices[1:]:\n min_px = min(min_px, px)\n max_px = max(px - min_px, max_px)\n return max_px",
"_____no_output_____"
],
[
"def profit_with_numpy(prices):\n ############################################################################\n ##### TODO: Compute the max profit in Numpy without any for-loops ##### \n ##### check out <ufunc>.accumulate #####\n ############################################################################ \n prices = np.asarray(prices)\n accumulated_mins = np.minimum.accumulate(prices) # 1 pass through the data \n return np.max(prices - accumulated_mins) # 2 passes through the data ",
"_____no_output_____"
],
[
"print(profit(prices), profit_with_numpy(prices))\nnp.allclose(profit_with_numpy(prices), profit(prices))",
"70.08161120230956 70.08161120230956\n"
],
[
"%%timeit\nprofit(prices) # 10000 loops, best of 5: 59.8 µs per loop",
"10000 loops, best of 5: 59.8 µs per loop\n"
],
[
"%%timeit\nprofit_with_numpy(prices) # 100000 loops, best of 5: 8.34 µs per loop",
"The slowest run took 20.97 times longer than the fastest. This could mean that an intermediate result is being cached.\n100000 loops, best of 5: 8.34 µs per loop\n"
]
],
[
[
"### **3. Broadcasting**\n\nNumpy provides a set of rules which allows us to use ufuncs on arrays of different sizes and/or dimensions. \n\nPseudocode of broadcasting:\n\n```\nif the arrays have different number of dims:\n left-pad the smaller shape array with 1s to match the number of dims \n\nif any particular dim doesn't match:\n if one of the those dims is a 1:\n broadcast this dim\n else:\n throw error\n```\n\n<p align=\"center\">\n<img src=\"https://github.com/jakevdp/PythonDataScienceHandbook/raw/8a34a4f653bdbdc01415a94dc20d4e9b97438965/notebooks/figures/02.05-broadcasting.png\" width=500 height=350></img>\n</p>\n\nLet's understand these 3 cases ",
"_____no_output_____"
],
[
"Case 1",
"_____no_output_____"
]
],
[
[
"arr1 = np.arange(3)\narr2 = np.array(5) # scalar value\nprint(arr1.shape, arr2.shape)\nprint(arr1 + arr2)",
"(3,) ()\n[5 6 7]\n"
]
],
[
[
"Case 2",
"_____no_output_____"
]
],
[
[
"arr1 = np.ones((3,3))\narr2 = np.arange(3)\nprint(arr1.shape, arr2.shape)\nprint(arr1 + arr2)",
"(3, 3) (3,)\n[[1. 2. 3.]\n [1. 2. 3.]\n [1. 2. 3.]]\n"
]
],
[
[
"Case 3",
"_____no_output_____"
]
],
[
[
"arr1 = np.arange(3).reshape(3,1)\narr2 = np.arange(3)\nprint(arr1.shape, arr2.shape)\nprint(arr1 + arr2)",
"(3, 1) (3,)\n[[0 1 2]\n [1 2 3]\n [2 3 4]]\n"
]
],
[
[
"#### Exercise: Verify if broadcast succeeds. \nand guess the shape of the resulting array",
"_____no_output_____"
],
[
"```\narr1 = np.random.rand(3,4,6,2) # random array of shape (3,4,6,2)\narr2 = np.random.rand(3,4,1,2) \n\narr1 + arr2 \n```",
"_____no_output_____"
],
[
"```\narr1 = np.random.rand(3,6,4,2) # random array of shape (3,6,4,2)\narr2 = np.random.rand(1,2) \n\narr1 + arr2 \n```",
"_____no_output_____"
],
[
"```\narr1 = np.random.rand(3,6,4,2) # random array of shape (3,6,4,2)\narr2 = np.random.rand(1,4,6,2) \n\narr1 + arr2 \n```",
"_____no_output_____"
],
[
"#### Exercise: Find the point closest to `<x,y>` among a set of points \n\nRefresher: _Eucledian distance between two points:_\n<p align=\"center\">\n<img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Euclidean_distance_2d.svg/1200px-Euclidean_distance_2d.svg.png\" width=400 height=300></img>\n</p>\n\nHint: check out [`np.argmin`](https://numpy.org/doc/stable/reference/generated/numpy.argmin.html)\n",
"_____no_output_____"
],
[
"Generating our 2D points data, [`np.random.random`](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.random.html) returns a number from the uniform distribution of interval `[0.0, 1.0)`. \n\nIf we want our samples to be uniformly distributed in interval `[a, b)` instead, we can do `(a - b) * np.random.random(...) * a`",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.random.seed(42)\ntwod_points = (500) * np.random.random((10000, 2)) + 0\ntest_point = np.array([3,2])",
"_____no_output_____"
],
[
"def find_closest(data, point):\n ############################################################################\n ##### TODO: Find the closest point in vanilla Python ##### \n ############################################################################ \n \n min_dist = np.inf\n min_dist_index = 0 \n for i in np.arange(data.shape[0]):\n dist = np.sqrt( ((data[i][0] - point[0])**2) + ((data[i][1] - point[1])**2) )\n if dist < min_dist:\n min_dist = dist\n min_dist_index = i\n return data[min_dist_index] \n\nfind_closest(twod_points, test_point)",
"_____no_output_____"
],
[
"def find_closest_vect(data, point):\n ############################################################################\n ##### TODO: Find the closest point without for-loops ##### \n ############################################################################ \n\n index = np.argmin(np.sqrt(np.sum((data - test_point) ** 2, axis=1 )))\n return data[index]\n\nfind_closest_vect(twod_points, test_point)",
"_____no_output_____"
],
[
"%%timeit\nfind_closest(twod_points, test_point) # 10 loops, best of 5: 45.3 ms per loop",
"10 loops, best of 5: 45.3 ms per loop\n"
],
[
"%%timeit\nfind_closest_vect(twod_points, test_point) # 1000 loops, best of 5: 297 µs per loop",
"1000 loops, best of 5: 297 µs per loop\n"
]
],
[
[
"Let's visualize the `find_closest_vect` function",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nnp.random.seed(42)\ntwod_points_2 = (500) * np.random.random((100, 2)) + 0\ntest_point_2 = np.array([300,100])",
"_____no_output_____"
],
[
"nn = find_closest_vect(twod_points_2, test_point_2)\nnn",
"_____no_output_____"
],
[
"fig, (ax1) = plt.subplots(1,1, figsize=(8,8), dpi=100)\nax1.scatter(twod_points_2[:,0], twod_points_2[:,1], s=3, c=\"grey\"t)\nax1.scatter(*test_point, c=\"red\", s=6, label=\"test point\")\ndraw_circle = plt.Circle(nn, 6, fill=False)\nax1.add_artist(draw_circle)\nax1.legend()",
"_____no_output_____"
]
],
[
[
"#### Exercise: kNN\nNow that we have seen how to compute the point closest to a test-point `<x, y>` among a set of training points, we are one step closer to kNN\n\nGiven a set of 2D co-ordinates, find the distance of each point with every other point. This step is crucial in finding out the 'k' in kNN. \n\nLet's consider **'k'= 3**, for this try",
"_____no_output_____"
]
],
[
[
"################################################################################\n### TODO: Find the pairwise distance. It's just one line using broadcasting ####\n### You should have a (10000, 10000, 3) matrix after this #### \n################################################################################ \n\ndiff = twod_points.reshape(10000, 1, 2) - twod_points\n\n################################################################################\n### TODO: Using pairwise distance matrix we computed, let's compute ####\n### Euledian distance between the points. Again, just a line of code ####\n### The output of this step should give a (10000,10000) shape array ####\n### indicating the Eucledian distance of one point with every other #### \n################################################################################ \n\npairwise_diff = np.sqrt((diff ** 2).sum(axis=2))\n\n################################################################################\n### TODO: Now sort the (10000,10000) array on the desired axis. ####\n### Check out `np.argsort` #### \n################################################################################ \n\n# pairwise_diff[np.arange(10000), np.arange(10000)] = np.inf\nans = np.argsort(pairwise_diff, axis = 1)\n\n\n################################################################################\n### TODO: Choose 'k' columns from the sorted (10000,10000) array to get ####\n### the k-nearest neighbors of each point with others in the dataset #### \n################################################################################ \n\nans[:10,1:3]",
"_____no_output_____"
]
],
[
[
"Alternate way of broadcasting to get pairwise difference matrix (we did this in class)",
"_____no_output_____"
]
],
[
[
"diff = twod_points.reshape(10000, 2, 1) - np.swapaxes(twod_points, 0, 1).reshape(1, 2, 10000)\npairwise_diff = np.sqrt((diff ** 2).sum(axis=1))\nans = np.argsort(pairwise_diff, axis = 1)\nans[:10,1:3]",
"_____no_output_____"
]
],
[
[
"Verifying if what we did is indeed correct using sklearn's inbuilt kNN function",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import NearestNeighbors\nd, i = NearestNeighbors().fit(twod_points).kneighbors(twod_points, 4)\ni[:10, 1:3]",
"_____no_output_____"
]
],
[
[
"#### Exercise: Converting a color image to grayscale \n\nNote: \nA png image has 4 channels: R, G, B and alpha, whereas a grayscale has only one channel. So we need a smart way to combine these 3 channels, which renders the image grayscale. \n\n_Multiply R channel by `0.2126`, G channel by `0.7152`, B channel \nby `0.0722`, and ignore the alpha channel. Now add them up. This is \nthe recipe to get a grayscale image [[source]](https://en.wikipedia.org/wiki/Grayscale)._\n\nCode this using Numpy!",
"_____no_output_____"
]
],
[
[
"# Reading the image\nfrom urllib.request import urlopen\nf = urlopen(\"https://hsasf.hsa.washington.edu/wp-content/uploads/2018/09/UW-Logo.png\")\nimg = plt.imread(f)",
"_____no_output_____"
],
[
"################################################################################\n##### TODO: Convert a color png image to grayscale using broadcasting ##### \n##### `img` has shape (height, width, 4) \n################################################################################\n\nchannel_weights = [0.2126, 0.7152, 0.0722, 0]\ngrayscale_image = np.sum(img*channel_weights, axis=2)\nplt.gca().set_axis_off()\nplt.margins(0, 0)\nplt.imshow(grayscale_image, cmap='gray')\nplt.savefig(\"output_image_rotate.jpg\",bbox_inches='tight', pad_inches=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### **4. Slicing, masking, fancy indexing**",
"_____no_output_____"
],
[
"# Exercise: Rotating an image\n\nAny vector(point, `<x, y>`) in a 2D co-ordinate space can be rotated by angle $\\theta$ by doing this\n<p align=\"center\">\n<img src=\"https://wikimedia.org/api/rest_v1/media/math/render/svg/76cd56d49699c53e95cee42a40b340e0a167e078\" width=400 height=100></img>\n</p>\n\nSo, we have to rotate every pixel in our image by $\\theta$ to rotate the whole image. Let's look at a way to do this using Python loops \n",
"_____no_output_____"
]
],
[
[
"# Reading the image\nfrom urllib.request import urlopen\nf = urlopen(\"https://hsasf.hsa.washington.edu/wp-content/uploads/2018/09/UW-Logo.png\")\nimg = plt.imread(f)",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pdb import set_trace as bp\ndef rotate_image(img, rot_deg=45):\n\n rot_rad = rot_deg * np.pi / 180.0\n\n height, width, num_channels = img.shape\n print(\"Height, Width, Channels before padding: \", height, width, num_channels)\n\n # Pad the input image with white space so that it won't get cropped \n # when we rotate it\n diagonal = int(np.sqrt(height * height + width * width)) # Pythagoras theorm\n img_padded = np.zeros((diagonal, diagonal, num_channels))\n center_h = int((diagonal - height) // 2)\n center_w = int((diagonal - width) // 2)\n img_padded[center_h:-center_h-1, center_w:-center_w-1, :] = img\n rotated_image = np.zeros((diagonal, diagonal, num_channels))\n height, width, num_channels = img_padded.shape\n\n print(\"Height, Width, Channels after padding: \", height, width, num_channels)\n\n rotated_height, rotated_width, _ = rotated_image.shape\n mid_row = int((rotated_height+1) / 2)\n mid_col = int((rotated_width+1) / 2)\n\n # for each pixel in output image, find which pixel\n # it corresponds to in the input image\n for r in range(rotated_height): # iterating over rows\n for c in range(rotated_width): # iterating over cols\n # bp() \n\n x = -(r-mid_row)*np.sin(rot_rad) + (c-mid_col)*np.cos(rot_rad)\n y = (r-mid_row)*np.cos(rot_rad) + (c-mid_col)*np.sin(rot_rad)\n\n # add offset\n x += mid_col\n y += mid_row\n\n x = round(x)\n y = round(y)\n\n # print(r, \" \", c, \" corresponds to-> \" , y, \" \", x)\n # boundary check: if x/y corresponds to a valid pixel in input image\n if (x >= 0 and y >= 0 and x < rotated_height and y < rotated_width):\n rotated_image[r][c][:] = img_padded[y][x][:]\n return rotated_image",
"_____no_output_____"
],
[
"def rotate_image_vect(img, rot_deg=45):\n \n rot_rad = rot_deg * np.pi / 180.0\n\n height, width, num_channels = img.shape\n # print(\"Height, Width, Channels : \", height, width, num_channels)\n\n diagonal = int(np.sqrt(height * height + width * width)) # Pythagoras theorm\n img_padded = np.zeros((diagonal, diagonal, num_channels))\n center_h = int((diagonal - height) // 2)\n center_w = int((diagonal - width) // 2)\n img_padded[center_h:-center_h-1, center_w:-center_w-1, :] = img\n rotated_image = np.zeros((diagonal, diagonal, num_channels))\n height, width, num_channels = img_padded.shape\n\n rotated_height, rotated_width, _ = rotated_image.shape\n mid_row = int( (rotated_height+1)/2 )\n mid_col = int( (rotated_width+1)/2 )\n\n ############################################################################\n ##### TODO: Remove the nested-for-loops using vectorized operations ##### \n ############################################################################ \n\n # CREATE THE ROTATION MATRIX as a (2,2) ndarray\n rotate_m = np.array([[np.cos(rot_rad), np.sin(rot_rad)], \n [-np.sin(rot_rad), np.cos(rot_rad)]])\n\n # CREATE A GRID/MATRIX OF INDICES, where each element of this matrix will be \n # one of the combinations of the nested-for-loop-indices. In other words,\n # write the index space of the nested-for-loops as a matrix. \n # HINT: check out `np.meshgrid`\n\n grid = np.meshgrid(np.arange(rotated_height), np.arange(rotated_width))\n\n # CONVERT this grid into an ndarray, and reshape it to (2,-1) \n # Make a copy of the grid ndarray you just created.\n # Remember that the input and output image have same shape. So we need two \n # copies of the grid ndarray we created. \n\n i_org = np.array(grid).reshape(2, -1)\n i_new = i_org.copy()\n\n # SUBTRACT `mid_row` and `mid_col` from `i_new`\n i_new[0] = i_new[0] - mid_row\n i_new[1] = i_new[1] - mid_col\n\n # PERFORM ROTATION on `i_new`\n i_new = (rotate_m @ i_new).astype(int) # @ is short hand for dot-product/matmul\n\n # RECENTER (ie. add back) `mid_row` and `mid_col` to the output matrix of prev. step\n i_new[0] = i_new[0] + mid_row\n i_new[1] = i_new[1] + mid_col\n\n # CREATE the boolean mask to perform the boundary check\n mask = np.logical_and.reduce(((i_new[0] >= 0),\n (i_new[1] >= 0),\n (i_new[0] < rotated_height),\n (i_new[1] < rotated_width)))\n \n # ASSIGN PIXELS FROM INPUT IMAGE TO THE ROTATED IMAGE USING THE mask created\n # in the previous step. Remember, the mask created gives us the \n # valid/in-boundary pixel indices/location in the input image\n\n rotated_image[i_org[0][mask], i_org[1][mask], :] = img_padded[i_new[0][mask], \n i_new[1][mask], :]\n return rotated_image",
"_____no_output_____"
],
[
"# printing the padded image\n# plt.gca().set_axis_off()\n# plt.margins(0, 0)\n# plt.imshow(img_padded)\n# plt.show()",
"_____no_output_____"
],
[
"plt.gca().set_axis_off()\nplt.margins(0, 0)\nrotated_image = rotate_image(img)\nplt.imshow(rotated_image)\n# plt.savefig(\"output_image_rotate.jpg\",bbox_inches='tight', pad_inches=0)\nplt.show()",
"Height, Width, Channels before padding: 1400 1400 4\nHeight, Width, Channels after padding: 1979 1979 4\n"
],
[
"plt.gca().set_axis_off()\nplt.margins(0, 0)\nrotated_image = rotate_image_vect(img)\nplt.imshow(rotated_image)\n# plt.savefig(\"output_image_rotate.jpg\",bbox_inches='tight', pad_inches=0)\nplt.show()",
"_____no_output_____"
],
[
"%%timeit\nrotate_image(img) # 1 loop, best of 5: 51.7 s per loop",
"1 loop, best of 5: 51.7 s per loop\n"
],
[
"%%timeit\nrotate_image_vect(img) # 1 loop, best of 5: 577 ms per loop",
"1 loop, best of 5: 577 ms per loop\n"
]
],
[
[
"References:\n\n1. [Jake Vanderplas' book](https://github.com/jakevdp/PythonDataScienceHandbook/tree/master/notebooks)\n2. [Array Programming, Wikipedia](https://en.wikipedia.org/wiki/Array_programming)\n3. [Nature article on Array programming in Numpy](https://www.nature.com/articles/s41586-020-2649-2)\n4. [Numpy Array Programming blog](https://realpython.com/numpy-array-programming/)\n3. [Rotating image without cv2, StackOverflow](https://stackoverflow.com/questions/57648391/how-do-i-rotate-an-image-manually-without-using-cv2-getrotationmatrix2d)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4abe86599aec5dceeadf76ca55f139d79f093dbb
| 174,318 |
ipynb
|
Jupyter Notebook
|
drug_treatment_extraction/notebooks/BioBERT_RE.ipynb
|
isaacmg/task-vt
|
1f850042b44d34b9e9f5ffedb4640e705093579f
|
[
"MIT"
] | 1 |
2021-07-27T04:55:57.000Z
|
2021-07-27T04:55:57.000Z
|
drug_treatment_extraction/notebooks/BioBERT_RE.ipynb
|
isaacmg/task-vt
|
1f850042b44d34b9e9f5ffedb4640e705093579f
|
[
"MIT"
] | 11 |
2020-04-06T15:41:50.000Z
|
2020-07-03T21:08:26.000Z
|
drug_treatment_extraction/notebooks/BioBERT_RE.ipynb
|
isaacmg/task-vt
|
1f850042b44d34b9e9f5ffedb4640e705093579f
|
[
"MIT"
] | 19 |
2020-03-29T12:29:37.000Z
|
2020-06-29T16:24:28.000Z
| 49.270209 | 64,467 | 0.562948 |
[
[
[
"<a href=\"https://colab.research.google.com/github/isaacmg/task-vt/blob/biobert_finetune/drug_treatment_extraction/notebooks/BioBERT_RE.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Finetuning BioBERT for RE\nThis is a fine-tuning notebook that we used to finetune BioBERT for relation classification (on our own data, GAD and Euadr) and then convert the resulting model checkpoint to PyTorch HuggingFace library for model inference. This was done for the vaccine and therapeutics task in order to identify drug treatment relations.\n",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/dmis-lab/biobert \nfrom google.colab import auth\nfrom datetime import datetime\nauth.authenticate_user()\n!pip install tensorflow==1.15\n",
"Successfully installed gast-0.2.2 tensorboard-1.15.0 tensorflow-1.15.0 tensorflow-estimator-1.15.1\n"
],
[
"import os\nos.chdir('biobert')",
"_____no_output_____"
]
],
[
[
"### Downloading data",
"_____no_output_____"
]
],
[
[
"!./download.sh\n!fileid=\"1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA\"\n!wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA\" -O biobert_w.tar.gz && rm -rf /tmp/cookies.txt\n!tar -xvf biobert_w.tar.gz\n%set_env RE_DIR datasets/RE/GAD/1\n%set_env TASK_NAME=gad\n%set_env OUTPUT_DIR=./re_outputs_1",
"_____no_output_____"
],
[
"%set_env BIOBERT_DIR=biobert_large",
"env: BIOBERT_DIR=biobert_large\n"
],
[
"!python run_re.py --task_name=$TASK_NAME --do_train=true --do_eval=true --do_predict=true --vocab_file=$BIOBERT_DIR/vocab_cased_pubmed_pmc_30k.txt --bert_config_file=$BIOBERT_DIR/bert_config_bio_58k_large.json --init_checkpoint=$BIOBERT_DIR/bio_bert_large_1000k.ckpt.index --max_seq_length=128 --train_batch_size=32 --learning_rate=2e-5 --num_train_epochs=3.0 --do_lower_case=false --data_dir=$RE_DIR --output_dir=$OUTPUT_DIR",
"2020-04-08 17:53:02.882983: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\nTraceback (most recent call last):\n File \"run_re.py\", line 25, in <module>\n import optimization\n File \"/content/biobert/optimization.py\", line 87, in <module>\n class AdamWeightDecayOptimizer(tf.train.Optimizer):\nAttributeError: module 'tensorflow._api.v2.train' has no attribute 'Optimizer'\n"
],
[
"#Uncomment this if you want to temporarily stash weights on GCS also collect garbage\n#!gsutil -m cp -r ./re_outputs_1/model.ckpt-0.data-00000-of-00001 gs://coronaviruspublicdata/new_data .\n#import gc \n#gc.collect()\n",
"_____no_output_____"
]
],
[
[
"### Converting the model to HuggingFace",
"_____no_output_____"
]
],
[
[
"!pip install transformers\nimport logging\nimport torch\nlogger = logging.getLogger('spam_application')\n\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n excluded = ['BERTAdam','_power','global_step']\n init_vars = list(filter(lambda x:all([True if e not in x[0] else False for e in excluded]),init_vars))\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n print(\"A name\",names)\n for name, array in zip(names, arrays):\n if name in ['output_weights', 'output_bias']:\n name = 'classifier/' + name\n name = name.split(\"/\")\n # if name in ['output_weights', 'output_bias']:\n # name = 'classifier/' + name\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n # if name in ['output_weights' , 'output_bias']:\n # name = 'classifier/' + name\n for m_name in name:\n\n print(\"model\",m_name)\n #print(scope_names)\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n print(scope_names)\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n # elif scope_names[0] == \"beta\":\n # print(scope_names)\n pointer = getattr(pointer, \"bias\")\n # elif scope_names[0] == \"output_bias\":\n # print(scope_names)\n # pointer = getattr(pointer, \"cls\")\n elif scope_names[0] == \"output_weights\":\n print(scope_names)\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n print(scope_names)\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n",
"_____no_output_____"
],
[
"from transformers import BertConfig, BertForSequenceClassification, BertForPreTraining\ndef convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):\n # Initialise PyTorch model\n config = BertConfig.from_json_file(bert_config_file)\n print(\"Building PyTorch model from configuration: {}\".format(str(config)))\n config.num_labels = 2\n model = BertForSequenceClassification(config)\n #model = BertForSequenceClassification(config)\n # Load \"weights from tf checkpoint\n load_tf_weights_in_bert(model, config, tf_checkpoint_path)\n\n # Save pytorch-model\n print(\"Save PyTorch model to {}\".format(pytorch_dump_path))\n model.save_pretrained(pytorch_dump_path)\n return model\n# Alternatevely you can download existing stashed data\n#!gsutil cp -r gs://coronaviruspublicdata/re_outputs_1 .",
"_____no_output_____"
],
[
"\nimport os\n!mkdir pytorch_output_temp\nmodel2 = convert_tf_checkpoint_to_pytorch(\"re_outputs_1\", \"biobert_large/bert_config_bio_58k_large.json\", \"pytorch_output_temp\")",
"Building PyTorch model from configuration: BertConfig {\n \"_num_labels\": 2,\n \"architectures\": null,\n \"attention_probs_dropout_prob\": 0.1,\n \"bad_words_ids\": null,\n \"bos_token_id\": null,\n \"decoder_start_token_id\": null,\n \"do_sample\": false,\n \"early_stopping\": false,\n \"eos_token_id\": null,\n \"finetuning_task\": null,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 1024,\n \"id2label\": {\n \"0\": \"LABEL_0\",\n \"1\": \"LABEL_1\"\n },\n \"initializer_range\": 0.02,\n \"intermediate_size\": 4096,\n \"is_decoder\": false,\n \"is_encoder_decoder\": false,\n \"label2id\": {\n \"LABEL_0\": 0,\n \"LABEL_1\": 1\n },\n \"layer_norm_eps\": 1e-12,\n \"length_penalty\": 1.0,\n \"max_length\": 20,\n \"max_position_embeddings\": 512,\n \"min_length\": 0,\n \"model_type\": \"bert\",\n \"no_repeat_ngram_size\": 0,\n \"num_attention_heads\": 16,\n \"num_beams\": 1,\n \"num_hidden_layers\": 24,\n \"num_return_sequences\": 1,\n \"output_attentions\": false,\n \"output_hidden_states\": false,\n \"output_past\": true,\n \"pad_token_id\": 0,\n \"prefix\": null,\n \"pruned_heads\": {},\n \"repetition_penalty\": 1.0,\n \"task_specific_params\": null,\n \"temperature\": 1.0,\n \"top_k\": 50,\n \"top_p\": 1.0,\n \"torchscript\": false,\n \"type_vocab_size\": 2,\n \"use_bfloat16\": false,\n \"vocab_size\": 58996\n}\n\nA name ['bert/embeddings/LayerNorm/beta', 'bert/embeddings/LayerNorm/beta/adam_m', 'bert/embeddings/LayerNorm/beta/adam_v', 'bert/embeddings/LayerNorm/gamma', 'bert/embeddings/LayerNorm/gamma/adam_m', 'bert/embeddings/LayerNorm/gamma/adam_v', 'bert/embeddings/position_embeddings', 'bert/embeddings/position_embeddings/adam_m', 'bert/embeddings/position_embeddings/adam_v', 'bert/embeddings/token_type_embeddings', 'bert/embeddings/token_type_embeddings/adam_m', 'bert/embeddings/token_type_embeddings/adam_v', 'bert/embeddings/word_embeddings', 'bert/embeddings/word_embeddings/adam_m', 'bert/embeddings/word_embeddings/adam_v', 'bert/encoder/layer_0/attention/output/LayerNorm/beta', 'bert/encoder/layer_0/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_0/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_0/attention/output/LayerNorm/gamma', 'bert/encoder/layer_0/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_0/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_0/attention/output/dense/bias', 'bert/encoder/layer_0/attention/output/dense/bias/adam_m', 'bert/encoder/layer_0/attention/output/dense/bias/adam_v', 'bert/encoder/layer_0/attention/output/dense/kernel', 'bert/encoder/layer_0/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_0/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_0/attention/self/key/bias', 'bert/encoder/layer_0/attention/self/key/bias/adam_m', 'bert/encoder/layer_0/attention/self/key/bias/adam_v', 'bert/encoder/layer_0/attention/self/key/kernel', 'bert/encoder/layer_0/attention/self/key/kernel/adam_m', 'bert/encoder/layer_0/attention/self/key/kernel/adam_v', 'bert/encoder/layer_0/attention/self/query/bias', 'bert/encoder/layer_0/attention/self/query/bias/adam_m', 'bert/encoder/layer_0/attention/self/query/bias/adam_v', 'bert/encoder/layer_0/attention/self/query/kernel', 'bert/encoder/layer_0/attention/self/query/kernel/adam_m', 'bert/encoder/layer_0/attention/self/query/kernel/adam_v', 'bert/encoder/layer_0/attention/self/value/bias', 'bert/encoder/layer_0/attention/self/value/bias/adam_m', 'bert/encoder/layer_0/attention/self/value/bias/adam_v', 'bert/encoder/layer_0/attention/self/value/kernel', 'bert/encoder/layer_0/attention/self/value/kernel/adam_m', 'bert/encoder/layer_0/attention/self/value/kernel/adam_v', 'bert/encoder/layer_0/intermediate/dense/bias', 'bert/encoder/layer_0/intermediate/dense/bias/adam_m', 'bert/encoder/layer_0/intermediate/dense/bias/adam_v', 'bert/encoder/layer_0/intermediate/dense/kernel', 'bert/encoder/layer_0/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_0/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_0/output/LayerNorm/beta', 'bert/encoder/layer_0/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_0/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_0/output/LayerNorm/gamma', 'bert/encoder/layer_0/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_0/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_0/output/dense/bias', 'bert/encoder/layer_0/output/dense/bias/adam_m', 'bert/encoder/layer_0/output/dense/bias/adam_v', 'bert/encoder/layer_0/output/dense/kernel', 'bert/encoder/layer_0/output/dense/kernel/adam_m', 'bert/encoder/layer_0/output/dense/kernel/adam_v', 'bert/encoder/layer_1/attention/output/LayerNorm/beta', 'bert/encoder/layer_1/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_1/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_1/attention/output/LayerNorm/gamma', 'bert/encoder/layer_1/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_1/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_1/attention/output/dense/bias', 'bert/encoder/layer_1/attention/output/dense/bias/adam_m', 'bert/encoder/layer_1/attention/output/dense/bias/adam_v', 'bert/encoder/layer_1/attention/output/dense/kernel', 'bert/encoder/layer_1/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_1/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_1/attention/self/key/bias', 'bert/encoder/layer_1/attention/self/key/bias/adam_m', 'bert/encoder/layer_1/attention/self/key/bias/adam_v', 'bert/encoder/layer_1/attention/self/key/kernel', 'bert/encoder/layer_1/attention/self/key/kernel/adam_m', 'bert/encoder/layer_1/attention/self/key/kernel/adam_v', 'bert/encoder/layer_1/attention/self/query/bias', 'bert/encoder/layer_1/attention/self/query/bias/adam_m', 'bert/encoder/layer_1/attention/self/query/bias/adam_v', 'bert/encoder/layer_1/attention/self/query/kernel', 'bert/encoder/layer_1/attention/self/query/kernel/adam_m', 'bert/encoder/layer_1/attention/self/query/kernel/adam_v', 'bert/encoder/layer_1/attention/self/value/bias', 'bert/encoder/layer_1/attention/self/value/bias/adam_m', 'bert/encoder/layer_1/attention/self/value/bias/adam_v', 'bert/encoder/layer_1/attention/self/value/kernel', 'bert/encoder/layer_1/attention/self/value/kernel/adam_m', 'bert/encoder/layer_1/attention/self/value/kernel/adam_v', 'bert/encoder/layer_1/intermediate/dense/bias', 'bert/encoder/layer_1/intermediate/dense/bias/adam_m', 'bert/encoder/layer_1/intermediate/dense/bias/adam_v', 'bert/encoder/layer_1/intermediate/dense/kernel', 'bert/encoder/layer_1/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_1/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_1/output/LayerNorm/beta', 'bert/encoder/layer_1/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_1/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_1/output/LayerNorm/gamma', 'bert/encoder/layer_1/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_1/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_1/output/dense/bias', 'bert/encoder/layer_1/output/dense/bias/adam_m', 'bert/encoder/layer_1/output/dense/bias/adam_v', 'bert/encoder/layer_1/output/dense/kernel', 'bert/encoder/layer_1/output/dense/kernel/adam_m', 'bert/encoder/layer_1/output/dense/kernel/adam_v', 'bert/encoder/layer_10/attention/output/LayerNorm/beta', 'bert/encoder/layer_10/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_10/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_10/attention/output/LayerNorm/gamma', 'bert/encoder/layer_10/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_10/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_10/attention/output/dense/bias', 'bert/encoder/layer_10/attention/output/dense/bias/adam_m', 'bert/encoder/layer_10/attention/output/dense/bias/adam_v', 'bert/encoder/layer_10/attention/output/dense/kernel', 'bert/encoder/layer_10/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_10/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_10/attention/self/key/bias', 'bert/encoder/layer_10/attention/self/key/bias/adam_m', 'bert/encoder/layer_10/attention/self/key/bias/adam_v', 'bert/encoder/layer_10/attention/self/key/kernel', 'bert/encoder/layer_10/attention/self/key/kernel/adam_m', 'bert/encoder/layer_10/attention/self/key/kernel/adam_v', 'bert/encoder/layer_10/attention/self/query/bias', 'bert/encoder/layer_10/attention/self/query/bias/adam_m', 'bert/encoder/layer_10/attention/self/query/bias/adam_v', 'bert/encoder/layer_10/attention/self/query/kernel', 'bert/encoder/layer_10/attention/self/query/kernel/adam_m', 'bert/encoder/layer_10/attention/self/query/kernel/adam_v', 'bert/encoder/layer_10/attention/self/value/bias', 'bert/encoder/layer_10/attention/self/value/bias/adam_m', 'bert/encoder/layer_10/attention/self/value/bias/adam_v', 'bert/encoder/layer_10/attention/self/value/kernel', 'bert/encoder/layer_10/attention/self/value/kernel/adam_m', 'bert/encoder/layer_10/attention/self/value/kernel/adam_v', 'bert/encoder/layer_10/intermediate/dense/bias', 'bert/encoder/layer_10/intermediate/dense/bias/adam_m', 'bert/encoder/layer_10/intermediate/dense/bias/adam_v', 'bert/encoder/layer_10/intermediate/dense/kernel', 'bert/encoder/layer_10/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_10/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_10/output/LayerNorm/beta', 'bert/encoder/layer_10/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_10/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_10/output/LayerNorm/gamma', 'bert/encoder/layer_10/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_10/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_10/output/dense/bias', 'bert/encoder/layer_10/output/dense/bias/adam_m', 'bert/encoder/layer_10/output/dense/bias/adam_v', 'bert/encoder/layer_10/output/dense/kernel', 'bert/encoder/layer_10/output/dense/kernel/adam_m', 'bert/encoder/layer_10/output/dense/kernel/adam_v', 'bert/encoder/layer_11/attention/output/LayerNorm/beta', 'bert/encoder/layer_11/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_11/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_11/attention/output/LayerNorm/gamma', 'bert/encoder/layer_11/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_11/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_11/attention/output/dense/bias', 'bert/encoder/layer_11/attention/output/dense/bias/adam_m', 'bert/encoder/layer_11/attention/output/dense/bias/adam_v', 'bert/encoder/layer_11/attention/output/dense/kernel', 'bert/encoder/layer_11/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_11/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_11/attention/self/key/bias', 'bert/encoder/layer_11/attention/self/key/bias/adam_m', 'bert/encoder/layer_11/attention/self/key/bias/adam_v', 'bert/encoder/layer_11/attention/self/key/kernel', 'bert/encoder/layer_11/attention/self/key/kernel/adam_m', 'bert/encoder/layer_11/attention/self/key/kernel/adam_v', 'bert/encoder/layer_11/attention/self/query/bias', 'bert/encoder/layer_11/attention/self/query/bias/adam_m', 'bert/encoder/layer_11/attention/self/query/bias/adam_v', 'bert/encoder/layer_11/attention/self/query/kernel', 'bert/encoder/layer_11/attention/self/query/kernel/adam_m', 'bert/encoder/layer_11/attention/self/query/kernel/adam_v', 'bert/encoder/layer_11/attention/self/value/bias', 'bert/encoder/layer_11/attention/self/value/bias/adam_m', 'bert/encoder/layer_11/attention/self/value/bias/adam_v', 'bert/encoder/layer_11/attention/self/value/kernel', 'bert/encoder/layer_11/attention/self/value/kernel/adam_m', 'bert/encoder/layer_11/attention/self/value/kernel/adam_v', 'bert/encoder/layer_11/intermediate/dense/bias', 'bert/encoder/layer_11/intermediate/dense/bias/adam_m', 'bert/encoder/layer_11/intermediate/dense/bias/adam_v', 'bert/encoder/layer_11/intermediate/dense/kernel', 'bert/encoder/layer_11/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_11/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_11/output/LayerNorm/beta', 'bert/encoder/layer_11/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_11/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_11/output/LayerNorm/gamma', 'bert/encoder/layer_11/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_11/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_11/output/dense/bias', 'bert/encoder/layer_11/output/dense/bias/adam_m', 'bert/encoder/layer_11/output/dense/bias/adam_v', 'bert/encoder/layer_11/output/dense/kernel', 'bert/encoder/layer_11/output/dense/kernel/adam_m', 'bert/encoder/layer_11/output/dense/kernel/adam_v', 'bert/encoder/layer_12/attention/output/LayerNorm/beta', 'bert/encoder/layer_12/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_12/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_12/attention/output/LayerNorm/gamma', 'bert/encoder/layer_12/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_12/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_12/attention/output/dense/bias', 'bert/encoder/layer_12/attention/output/dense/bias/adam_m', 'bert/encoder/layer_12/attention/output/dense/bias/adam_v', 'bert/encoder/layer_12/attention/output/dense/kernel', 'bert/encoder/layer_12/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_12/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_12/attention/self/key/bias', 'bert/encoder/layer_12/attention/self/key/bias/adam_m', 'bert/encoder/layer_12/attention/self/key/bias/adam_v', 'bert/encoder/layer_12/attention/self/key/kernel', 'bert/encoder/layer_12/attention/self/key/kernel/adam_m', 'bert/encoder/layer_12/attention/self/key/kernel/adam_v', 'bert/encoder/layer_12/attention/self/query/bias', 'bert/encoder/layer_12/attention/self/query/bias/adam_m', 'bert/encoder/layer_12/attention/self/query/bias/adam_v', 'bert/encoder/layer_12/attention/self/query/kernel', 'bert/encoder/layer_12/attention/self/query/kernel/adam_m', 'bert/encoder/layer_12/attention/self/query/kernel/adam_v', 'bert/encoder/layer_12/attention/self/value/bias', 'bert/encoder/layer_12/attention/self/value/bias/adam_m', 'bert/encoder/layer_12/attention/self/value/bias/adam_v', 'bert/encoder/layer_12/attention/self/value/kernel', 'bert/encoder/layer_12/attention/self/value/kernel/adam_m', 'bert/encoder/layer_12/attention/self/value/kernel/adam_v', 'bert/encoder/layer_12/intermediate/dense/bias', 'bert/encoder/layer_12/intermediate/dense/bias/adam_m', 'bert/encoder/layer_12/intermediate/dense/bias/adam_v', 'bert/encoder/layer_12/intermediate/dense/kernel', 'bert/encoder/layer_12/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_12/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_12/output/LayerNorm/beta', 'bert/encoder/layer_12/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_12/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_12/output/LayerNorm/gamma', 'bert/encoder/layer_12/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_12/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_12/output/dense/bias', 'bert/encoder/layer_12/output/dense/bias/adam_m', 'bert/encoder/layer_12/output/dense/bias/adam_v', 'bert/encoder/layer_12/output/dense/kernel', 'bert/encoder/layer_12/output/dense/kernel/adam_m', 'bert/encoder/layer_12/output/dense/kernel/adam_v', 'bert/encoder/layer_13/attention/output/LayerNorm/beta', 'bert/encoder/layer_13/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_13/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_13/attention/output/LayerNorm/gamma', 'bert/encoder/layer_13/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_13/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_13/attention/output/dense/bias', 'bert/encoder/layer_13/attention/output/dense/bias/adam_m', 'bert/encoder/layer_13/attention/output/dense/bias/adam_v', 'bert/encoder/layer_13/attention/output/dense/kernel', 'bert/encoder/layer_13/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_13/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_13/attention/self/key/bias', 'bert/encoder/layer_13/attention/self/key/bias/adam_m', 'bert/encoder/layer_13/attention/self/key/bias/adam_v', 'bert/encoder/layer_13/attention/self/key/kernel', 'bert/encoder/layer_13/attention/self/key/kernel/adam_m', 'bert/encoder/layer_13/attention/self/key/kernel/adam_v', 'bert/encoder/layer_13/attention/self/query/bias', 'bert/encoder/layer_13/attention/self/query/bias/adam_m', 'bert/encoder/layer_13/attention/self/query/bias/adam_v', 'bert/encoder/layer_13/attention/self/query/kernel', 'bert/encoder/layer_13/attention/self/query/kernel/adam_m', 'bert/encoder/layer_13/attention/self/query/kernel/adam_v', 'bert/encoder/layer_13/attention/self/value/bias', 'bert/encoder/layer_13/attention/self/value/bias/adam_m', 'bert/encoder/layer_13/attention/self/value/bias/adam_v', 'bert/encoder/layer_13/attention/self/value/kernel', 'bert/encoder/layer_13/attention/self/value/kernel/adam_m', 'bert/encoder/layer_13/attention/self/value/kernel/adam_v', 'bert/encoder/layer_13/intermediate/dense/bias', 'bert/encoder/layer_13/intermediate/dense/bias/adam_m', 'bert/encoder/layer_13/intermediate/dense/bias/adam_v', 'bert/encoder/layer_13/intermediate/dense/kernel', 'bert/encoder/layer_13/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_13/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_13/output/LayerNorm/beta', 'bert/encoder/layer_13/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_13/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_13/output/LayerNorm/gamma', 'bert/encoder/layer_13/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_13/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_13/output/dense/bias', 'bert/encoder/layer_13/output/dense/bias/adam_m', 'bert/encoder/layer_13/output/dense/bias/adam_v', 'bert/encoder/layer_13/output/dense/kernel', 'bert/encoder/layer_13/output/dense/kernel/adam_m', 'bert/encoder/layer_13/output/dense/kernel/adam_v', 'bert/encoder/layer_14/attention/output/LayerNorm/beta', 'bert/encoder/layer_14/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_14/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_14/attention/output/LayerNorm/gamma', 'bert/encoder/layer_14/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_14/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_14/attention/output/dense/bias', 'bert/encoder/layer_14/attention/output/dense/bias/adam_m', 'bert/encoder/layer_14/attention/output/dense/bias/adam_v', 'bert/encoder/layer_14/attention/output/dense/kernel', 'bert/encoder/layer_14/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_14/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_14/attention/self/key/bias', 'bert/encoder/layer_14/attention/self/key/bias/adam_m', 'bert/encoder/layer_14/attention/self/key/bias/adam_v', 'bert/encoder/layer_14/attention/self/key/kernel', 'bert/encoder/layer_14/attention/self/key/kernel/adam_m', 'bert/encoder/layer_14/attention/self/key/kernel/adam_v', 'bert/encoder/layer_14/attention/self/query/bias', 'bert/encoder/layer_14/attention/self/query/bias/adam_m', 'bert/encoder/layer_14/attention/self/query/bias/adam_v', 'bert/encoder/layer_14/attention/self/query/kernel', 'bert/encoder/layer_14/attention/self/query/kernel/adam_m', 'bert/encoder/layer_14/attention/self/query/kernel/adam_v', 'bert/encoder/layer_14/attention/self/value/bias', 'bert/encoder/layer_14/attention/self/value/bias/adam_m', 'bert/encoder/layer_14/attention/self/value/bias/adam_v', 'bert/encoder/layer_14/attention/self/value/kernel', 'bert/encoder/layer_14/attention/self/value/kernel/adam_m', 'bert/encoder/layer_14/attention/self/value/kernel/adam_v', 'bert/encoder/layer_14/intermediate/dense/bias', 'bert/encoder/layer_14/intermediate/dense/bias/adam_m', 'bert/encoder/layer_14/intermediate/dense/bias/adam_v', 'bert/encoder/layer_14/intermediate/dense/kernel', 'bert/encoder/layer_14/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_14/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_14/output/LayerNorm/beta', 'bert/encoder/layer_14/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_14/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_14/output/LayerNorm/gamma', 'bert/encoder/layer_14/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_14/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_14/output/dense/bias', 'bert/encoder/layer_14/output/dense/bias/adam_m', 'bert/encoder/layer_14/output/dense/bias/adam_v', 'bert/encoder/layer_14/output/dense/kernel', 'bert/encoder/layer_14/output/dense/kernel/adam_m', 'bert/encoder/layer_14/output/dense/kernel/adam_v', 'bert/encoder/layer_15/attention/output/LayerNorm/beta', 'bert/encoder/layer_15/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_15/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_15/attention/output/LayerNorm/gamma', 'bert/encoder/layer_15/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_15/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_15/attention/output/dense/bias', 'bert/encoder/layer_15/attention/output/dense/bias/adam_m', 'bert/encoder/layer_15/attention/output/dense/bias/adam_v', 'bert/encoder/layer_15/attention/output/dense/kernel', 'bert/encoder/layer_15/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_15/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_15/attention/self/key/bias', 'bert/encoder/layer_15/attention/self/key/bias/adam_m', 'bert/encoder/layer_15/attention/self/key/bias/adam_v', 'bert/encoder/layer_15/attention/self/key/kernel', 'bert/encoder/layer_15/attention/self/key/kernel/adam_m', 'bert/encoder/layer_15/attention/self/key/kernel/adam_v', 'bert/encoder/layer_15/attention/self/query/bias', 'bert/encoder/layer_15/attention/self/query/bias/adam_m', 'bert/encoder/layer_15/attention/self/query/bias/adam_v', 'bert/encoder/layer_15/attention/self/query/kernel', 'bert/encoder/layer_15/attention/self/query/kernel/adam_m', 'bert/encoder/layer_15/attention/self/query/kernel/adam_v', 'bert/encoder/layer_15/attention/self/value/bias', 'bert/encoder/layer_15/attention/self/value/bias/adam_m', 'bert/encoder/layer_15/attention/self/value/bias/adam_v', 'bert/encoder/layer_15/attention/self/value/kernel', 'bert/encoder/layer_15/attention/self/value/kernel/adam_m', 'bert/encoder/layer_15/attention/self/value/kernel/adam_v', 'bert/encoder/layer_15/intermediate/dense/bias', 'bert/encoder/layer_15/intermediate/dense/bias/adam_m', 'bert/encoder/layer_15/intermediate/dense/bias/adam_v', 'bert/encoder/layer_15/intermediate/dense/kernel', 'bert/encoder/layer_15/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_15/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_15/output/LayerNorm/beta', 'bert/encoder/layer_15/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_15/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_15/output/LayerNorm/gamma', 'bert/encoder/layer_15/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_15/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_15/output/dense/bias', 'bert/encoder/layer_15/output/dense/bias/adam_m', 'bert/encoder/layer_15/output/dense/bias/adam_v', 'bert/encoder/layer_15/output/dense/kernel', 'bert/encoder/layer_15/output/dense/kernel/adam_m', 'bert/encoder/layer_15/output/dense/kernel/adam_v', 'bert/encoder/layer_16/attention/output/LayerNorm/beta', 'bert/encoder/layer_16/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_16/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_16/attention/output/LayerNorm/gamma', 'bert/encoder/layer_16/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_16/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_16/attention/output/dense/bias', 'bert/encoder/layer_16/attention/output/dense/bias/adam_m', 'bert/encoder/layer_16/attention/output/dense/bias/adam_v', 'bert/encoder/layer_16/attention/output/dense/kernel', 'bert/encoder/layer_16/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_16/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_16/attention/self/key/bias', 'bert/encoder/layer_16/attention/self/key/bias/adam_m', 'bert/encoder/layer_16/attention/self/key/bias/adam_v', 'bert/encoder/layer_16/attention/self/key/kernel', 'bert/encoder/layer_16/attention/self/key/kernel/adam_m', 'bert/encoder/layer_16/attention/self/key/kernel/adam_v', 'bert/encoder/layer_16/attention/self/query/bias', 'bert/encoder/layer_16/attention/self/query/bias/adam_m', 'bert/encoder/layer_16/attention/self/query/bias/adam_v', 'bert/encoder/layer_16/attention/self/query/kernel', 'bert/encoder/layer_16/attention/self/query/kernel/adam_m', 'bert/encoder/layer_16/attention/self/query/kernel/adam_v', 'bert/encoder/layer_16/attention/self/value/bias', 'bert/encoder/layer_16/attention/self/value/bias/adam_m', 'bert/encoder/layer_16/attention/self/value/bias/adam_v', 'bert/encoder/layer_16/attention/self/value/kernel', 'bert/encoder/layer_16/attention/self/value/kernel/adam_m', 'bert/encoder/layer_16/attention/self/value/kernel/adam_v', 'bert/encoder/layer_16/intermediate/dense/bias', 'bert/encoder/layer_16/intermediate/dense/bias/adam_m', 'bert/encoder/layer_16/intermediate/dense/bias/adam_v', 'bert/encoder/layer_16/intermediate/dense/kernel', 'bert/encoder/layer_16/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_16/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_16/output/LayerNorm/beta', 'bert/encoder/layer_16/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_16/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_16/output/LayerNorm/gamma', 'bert/encoder/layer_16/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_16/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_16/output/dense/bias', 'bert/encoder/layer_16/output/dense/bias/adam_m', 'bert/encoder/layer_16/output/dense/bias/adam_v', 'bert/encoder/layer_16/output/dense/kernel', 'bert/encoder/layer_16/output/dense/kernel/adam_m', 'bert/encoder/layer_16/output/dense/kernel/adam_v', 'bert/encoder/layer_17/attention/output/LayerNorm/beta', 'bert/encoder/layer_17/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_17/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_17/attention/output/LayerNorm/gamma', 'bert/encoder/layer_17/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_17/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_17/attention/output/dense/bias', 'bert/encoder/layer_17/attention/output/dense/bias/adam_m', 'bert/encoder/layer_17/attention/output/dense/bias/adam_v', 'bert/encoder/layer_17/attention/output/dense/kernel', 'bert/encoder/layer_17/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_17/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_17/attention/self/key/bias', 'bert/encoder/layer_17/attention/self/key/bias/adam_m', 'bert/encoder/layer_17/attention/self/key/bias/adam_v', 'bert/encoder/layer_17/attention/self/key/kernel', 'bert/encoder/layer_17/attention/self/key/kernel/adam_m', 'bert/encoder/layer_17/attention/self/key/kernel/adam_v', 'bert/encoder/layer_17/attention/self/query/bias', 'bert/encoder/layer_17/attention/self/query/bias/adam_m', 'bert/encoder/layer_17/attention/self/query/bias/adam_v', 'bert/encoder/layer_17/attention/self/query/kernel', 'bert/encoder/layer_17/attention/self/query/kernel/adam_m', 'bert/encoder/layer_17/attention/self/query/kernel/adam_v', 'bert/encoder/layer_17/attention/self/value/bias', 'bert/encoder/layer_17/attention/self/value/bias/adam_m', 'bert/encoder/layer_17/attention/self/value/bias/adam_v', 'bert/encoder/layer_17/attention/self/value/kernel', 'bert/encoder/layer_17/attention/self/value/kernel/adam_m', 'bert/encoder/layer_17/attention/self/value/kernel/adam_v', 'bert/encoder/layer_17/intermediate/dense/bias', 'bert/encoder/layer_17/intermediate/dense/bias/adam_m', 'bert/encoder/layer_17/intermediate/dense/bias/adam_v', 'bert/encoder/layer_17/intermediate/dense/kernel', 'bert/encoder/layer_17/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_17/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_17/output/LayerNorm/beta', 'bert/encoder/layer_17/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_17/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_17/output/LayerNorm/gamma', 'bert/encoder/layer_17/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_17/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_17/output/dense/bias', 'bert/encoder/layer_17/output/dense/bias/adam_m', 'bert/encoder/layer_17/output/dense/bias/adam_v', 'bert/encoder/layer_17/output/dense/kernel', 'bert/encoder/layer_17/output/dense/kernel/adam_m', 'bert/encoder/layer_17/output/dense/kernel/adam_v', 'bert/encoder/layer_18/attention/output/LayerNorm/beta', 'bert/encoder/layer_18/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_18/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_18/attention/output/LayerNorm/gamma', 'bert/encoder/layer_18/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_18/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_18/attention/output/dense/bias', 'bert/encoder/layer_18/attention/output/dense/bias/adam_m', 'bert/encoder/layer_18/attention/output/dense/bias/adam_v', 'bert/encoder/layer_18/attention/output/dense/kernel', 'bert/encoder/layer_18/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_18/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_18/attention/self/key/bias', 'bert/encoder/layer_18/attention/self/key/bias/adam_m', 'bert/encoder/layer_18/attention/self/key/bias/adam_v', 'bert/encoder/layer_18/attention/self/key/kernel', 'bert/encoder/layer_18/attention/self/key/kernel/adam_m', 'bert/encoder/layer_18/attention/self/key/kernel/adam_v', 'bert/encoder/layer_18/attention/self/query/bias', 'bert/encoder/layer_18/attention/self/query/bias/adam_m', 'bert/encoder/layer_18/attention/self/query/bias/adam_v', 'bert/encoder/layer_18/attention/self/query/kernel', 'bert/encoder/layer_18/attention/self/query/kernel/adam_m', 'bert/encoder/layer_18/attention/self/query/kernel/adam_v', 'bert/encoder/layer_18/attention/self/value/bias', 'bert/encoder/layer_18/attention/self/value/bias/adam_m', 'bert/encoder/layer_18/attention/self/value/bias/adam_v', 'bert/encoder/layer_18/attention/self/value/kernel', 'bert/encoder/layer_18/attention/self/value/kernel/adam_m', 'bert/encoder/layer_18/attention/self/value/kernel/adam_v', 'bert/encoder/layer_18/intermediate/dense/bias', 'bert/encoder/layer_18/intermediate/dense/bias/adam_m', 'bert/encoder/layer_18/intermediate/dense/bias/adam_v', 'bert/encoder/layer_18/intermediate/dense/kernel', 'bert/encoder/layer_18/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_18/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_18/output/LayerNorm/beta', 'bert/encoder/layer_18/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_18/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_18/output/LayerNorm/gamma', 'bert/encoder/layer_18/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_18/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_18/output/dense/bias', 'bert/encoder/layer_18/output/dense/bias/adam_m', 'bert/encoder/layer_18/output/dense/bias/adam_v', 'bert/encoder/layer_18/output/dense/kernel', 'bert/encoder/layer_18/output/dense/kernel/adam_m', 'bert/encoder/layer_18/output/dense/kernel/adam_v', 'bert/encoder/layer_19/attention/output/LayerNorm/beta', 'bert/encoder/layer_19/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_19/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_19/attention/output/LayerNorm/gamma', 'bert/encoder/layer_19/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_19/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_19/attention/output/dense/bias', 'bert/encoder/layer_19/attention/output/dense/bias/adam_m', 'bert/encoder/layer_19/attention/output/dense/bias/adam_v', 'bert/encoder/layer_19/attention/output/dense/kernel', 'bert/encoder/layer_19/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_19/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_19/attention/self/key/bias', 'bert/encoder/layer_19/attention/self/key/bias/adam_m', 'bert/encoder/layer_19/attention/self/key/bias/adam_v', 'bert/encoder/layer_19/attention/self/key/kernel', 'bert/encoder/layer_19/attention/self/key/kernel/adam_m', 'bert/encoder/layer_19/attention/self/key/kernel/adam_v', 'bert/encoder/layer_19/attention/self/query/bias', 'bert/encoder/layer_19/attention/self/query/bias/adam_m', 'bert/encoder/layer_19/attention/self/query/bias/adam_v', 'bert/encoder/layer_19/attention/self/query/kernel', 'bert/encoder/layer_19/attention/self/query/kernel/adam_m', 'bert/encoder/layer_19/attention/self/query/kernel/adam_v', 'bert/encoder/layer_19/attention/self/value/bias', 'bert/encoder/layer_19/attention/self/value/bias/adam_m', 'bert/encoder/layer_19/attention/self/value/bias/adam_v', 'bert/encoder/layer_19/attention/self/value/kernel', 'bert/encoder/layer_19/attention/self/value/kernel/adam_m', 'bert/encoder/layer_19/attention/self/value/kernel/adam_v', 'bert/encoder/layer_19/intermediate/dense/bias', 'bert/encoder/layer_19/intermediate/dense/bias/adam_m', 'bert/encoder/layer_19/intermediate/dense/bias/adam_v', 'bert/encoder/layer_19/intermediate/dense/kernel', 'bert/encoder/layer_19/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_19/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_19/output/LayerNorm/beta', 'bert/encoder/layer_19/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_19/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_19/output/LayerNorm/gamma', 'bert/encoder/layer_19/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_19/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_19/output/dense/bias', 'bert/encoder/layer_19/output/dense/bias/adam_m', 'bert/encoder/layer_19/output/dense/bias/adam_v', 'bert/encoder/layer_19/output/dense/kernel', 'bert/encoder/layer_19/output/dense/kernel/adam_m', 'bert/encoder/layer_19/output/dense/kernel/adam_v', 'bert/encoder/layer_2/attention/output/LayerNorm/beta', 'bert/encoder/layer_2/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_2/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_2/attention/output/LayerNorm/gamma', 'bert/encoder/layer_2/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_2/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_2/attention/output/dense/bias', 'bert/encoder/layer_2/attention/output/dense/bias/adam_m', 'bert/encoder/layer_2/attention/output/dense/bias/adam_v', 'bert/encoder/layer_2/attention/output/dense/kernel', 'bert/encoder/layer_2/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_2/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_2/attention/self/key/bias', 'bert/encoder/layer_2/attention/self/key/bias/adam_m', 'bert/encoder/layer_2/attention/self/key/bias/adam_v', 'bert/encoder/layer_2/attention/self/key/kernel', 'bert/encoder/layer_2/attention/self/key/kernel/adam_m', 'bert/encoder/layer_2/attention/self/key/kernel/adam_v', 'bert/encoder/layer_2/attention/self/query/bias', 'bert/encoder/layer_2/attention/self/query/bias/adam_m', 'bert/encoder/layer_2/attention/self/query/bias/adam_v', 'bert/encoder/layer_2/attention/self/query/kernel', 'bert/encoder/layer_2/attention/self/query/kernel/adam_m', 'bert/encoder/layer_2/attention/self/query/kernel/adam_v', 'bert/encoder/layer_2/attention/self/value/bias', 'bert/encoder/layer_2/attention/self/value/bias/adam_m', 'bert/encoder/layer_2/attention/self/value/bias/adam_v', 'bert/encoder/layer_2/attention/self/value/kernel', 'bert/encoder/layer_2/attention/self/value/kernel/adam_m', 'bert/encoder/layer_2/attention/self/value/kernel/adam_v', 'bert/encoder/layer_2/intermediate/dense/bias', 'bert/encoder/layer_2/intermediate/dense/bias/adam_m', 'bert/encoder/layer_2/intermediate/dense/bias/adam_v', 'bert/encoder/layer_2/intermediate/dense/kernel', 'bert/encoder/layer_2/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_2/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_2/output/LayerNorm/beta', 'bert/encoder/layer_2/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_2/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_2/output/LayerNorm/gamma', 'bert/encoder/layer_2/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_2/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_2/output/dense/bias', 'bert/encoder/layer_2/output/dense/bias/adam_m', 'bert/encoder/layer_2/output/dense/bias/adam_v', 'bert/encoder/layer_2/output/dense/kernel', 'bert/encoder/layer_2/output/dense/kernel/adam_m', 'bert/encoder/layer_2/output/dense/kernel/adam_v', 'bert/encoder/layer_20/attention/output/LayerNorm/beta', 'bert/encoder/layer_20/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_20/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_20/attention/output/LayerNorm/gamma', 'bert/encoder/layer_20/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_20/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_20/attention/output/dense/bias', 'bert/encoder/layer_20/attention/output/dense/bias/adam_m', 'bert/encoder/layer_20/attention/output/dense/bias/adam_v', 'bert/encoder/layer_20/attention/output/dense/kernel', 'bert/encoder/layer_20/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_20/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_20/attention/self/key/bias', 'bert/encoder/layer_20/attention/self/key/bias/adam_m', 'bert/encoder/layer_20/attention/self/key/bias/adam_v', 'bert/encoder/layer_20/attention/self/key/kernel', 'bert/encoder/layer_20/attention/self/key/kernel/adam_m', 'bert/encoder/layer_20/attention/self/key/kernel/adam_v', 'bert/encoder/layer_20/attention/self/query/bias', 'bert/encoder/layer_20/attention/self/query/bias/adam_m', 'bert/encoder/layer_20/attention/self/query/bias/adam_v', 'bert/encoder/layer_20/attention/self/query/kernel', 'bert/encoder/layer_20/attention/self/query/kernel/adam_m', 'bert/encoder/layer_20/attention/self/query/kernel/adam_v', 'bert/encoder/layer_20/attention/self/value/bias', 'bert/encoder/layer_20/attention/self/value/bias/adam_m', 'bert/encoder/layer_20/attention/self/value/bias/adam_v', 'bert/encoder/layer_20/attention/self/value/kernel', 'bert/encoder/layer_20/attention/self/value/kernel/adam_m', 'bert/encoder/layer_20/attention/self/value/kernel/adam_v', 'bert/encoder/layer_20/intermediate/dense/bias', 'bert/encoder/layer_20/intermediate/dense/bias/adam_m', 'bert/encoder/layer_20/intermediate/dense/bias/adam_v', 'bert/encoder/layer_20/intermediate/dense/kernel', 'bert/encoder/layer_20/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_20/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_20/output/LayerNorm/beta', 'bert/encoder/layer_20/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_20/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_20/output/LayerNorm/gamma', 'bert/encoder/layer_20/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_20/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_20/output/dense/bias', 'bert/encoder/layer_20/output/dense/bias/adam_m', 'bert/encoder/layer_20/output/dense/bias/adam_v', 'bert/encoder/layer_20/output/dense/kernel', 'bert/encoder/layer_20/output/dense/kernel/adam_m', 'bert/encoder/layer_20/output/dense/kernel/adam_v', 'bert/encoder/layer_21/attention/output/LayerNorm/beta', 'bert/encoder/layer_21/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_21/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_21/attention/output/LayerNorm/gamma', 'bert/encoder/layer_21/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_21/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_21/attention/output/dense/bias', 'bert/encoder/layer_21/attention/output/dense/bias/adam_m', 'bert/encoder/layer_21/attention/output/dense/bias/adam_v', 'bert/encoder/layer_21/attention/output/dense/kernel', 'bert/encoder/layer_21/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_21/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_21/attention/self/key/bias', 'bert/encoder/layer_21/attention/self/key/bias/adam_m', 'bert/encoder/layer_21/attention/self/key/bias/adam_v', 'bert/encoder/layer_21/attention/self/key/kernel', 'bert/encoder/layer_21/attention/self/key/kernel/adam_m', 'bert/encoder/layer_21/attention/self/key/kernel/adam_v', 'bert/encoder/layer_21/attention/self/query/bias', 'bert/encoder/layer_21/attention/self/query/bias/adam_m', 'bert/encoder/layer_21/attention/self/query/bias/adam_v', 'bert/encoder/layer_21/attention/self/query/kernel', 'bert/encoder/layer_21/attention/self/query/kernel/adam_m', 'bert/encoder/layer_21/attention/self/query/kernel/adam_v', 'bert/encoder/layer_21/attention/self/value/bias', 'bert/encoder/layer_21/attention/self/value/bias/adam_m', 'bert/encoder/layer_21/attention/self/value/bias/adam_v', 'bert/encoder/layer_21/attention/self/value/kernel', 'bert/encoder/layer_21/attention/self/value/kernel/adam_m', 'bert/encoder/layer_21/attention/self/value/kernel/adam_v', 'bert/encoder/layer_21/intermediate/dense/bias', 'bert/encoder/layer_21/intermediate/dense/bias/adam_m', 'bert/encoder/layer_21/intermediate/dense/bias/adam_v', 'bert/encoder/layer_21/intermediate/dense/kernel', 'bert/encoder/layer_21/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_21/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_21/output/LayerNorm/beta', 'bert/encoder/layer_21/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_21/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_21/output/LayerNorm/gamma', 'bert/encoder/layer_21/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_21/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_21/output/dense/bias', 'bert/encoder/layer_21/output/dense/bias/adam_m', 'bert/encoder/layer_21/output/dense/bias/adam_v', 'bert/encoder/layer_21/output/dense/kernel', 'bert/encoder/layer_21/output/dense/kernel/adam_m', 'bert/encoder/layer_21/output/dense/kernel/adam_v', 'bert/encoder/layer_22/attention/output/LayerNorm/beta', 'bert/encoder/layer_22/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_22/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_22/attention/output/LayerNorm/gamma', 'bert/encoder/layer_22/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_22/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_22/attention/output/dense/bias', 'bert/encoder/layer_22/attention/output/dense/bias/adam_m', 'bert/encoder/layer_22/attention/output/dense/bias/adam_v', 'bert/encoder/layer_22/attention/output/dense/kernel', 'bert/encoder/layer_22/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_22/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_22/attention/self/key/bias', 'bert/encoder/layer_22/attention/self/key/bias/adam_m', 'bert/encoder/layer_22/attention/self/key/bias/adam_v', 'bert/encoder/layer_22/attention/self/key/kernel', 'bert/encoder/layer_22/attention/self/key/kernel/adam_m', 'bert/encoder/layer_22/attention/self/key/kernel/adam_v', 'bert/encoder/layer_22/attention/self/query/bias', 'bert/encoder/layer_22/attention/self/query/bias/adam_m', 'bert/encoder/layer_22/attention/self/query/bias/adam_v', 'bert/encoder/layer_22/attention/self/query/kernel', 'bert/encoder/layer_22/attention/self/query/kernel/adam_m', 'bert/encoder/layer_22/attention/self/query/kernel/adam_v', 'bert/encoder/layer_22/attention/self/value/bias', 'bert/encoder/layer_22/attention/self/value/bias/adam_m', 'bert/encoder/layer_22/attention/self/value/bias/adam_v', 'bert/encoder/layer_22/attention/self/value/kernel', 'bert/encoder/layer_22/attention/self/value/kernel/adam_m', 'bert/encoder/layer_22/attention/self/value/kernel/adam_v', 'bert/encoder/layer_22/intermediate/dense/bias', 'bert/encoder/layer_22/intermediate/dense/bias/adam_m', 'bert/encoder/layer_22/intermediate/dense/bias/adam_v', 'bert/encoder/layer_22/intermediate/dense/kernel', 'bert/encoder/layer_22/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_22/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_22/output/LayerNorm/beta', 'bert/encoder/layer_22/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_22/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_22/output/LayerNorm/gamma', 'bert/encoder/layer_22/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_22/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_22/output/dense/bias', 'bert/encoder/layer_22/output/dense/bias/adam_m', 'bert/encoder/layer_22/output/dense/bias/adam_v', 'bert/encoder/layer_22/output/dense/kernel', 'bert/encoder/layer_22/output/dense/kernel/adam_m', 'bert/encoder/layer_22/output/dense/kernel/adam_v', 'bert/encoder/layer_23/attention/output/LayerNorm/beta', 'bert/encoder/layer_23/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_23/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_23/attention/output/LayerNorm/gamma', 'bert/encoder/layer_23/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_23/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_23/attention/output/dense/bias', 'bert/encoder/layer_23/attention/output/dense/bias/adam_m', 'bert/encoder/layer_23/attention/output/dense/bias/adam_v', 'bert/encoder/layer_23/attention/output/dense/kernel', 'bert/encoder/layer_23/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_23/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_23/attention/self/key/bias', 'bert/encoder/layer_23/attention/self/key/bias/adam_m', 'bert/encoder/layer_23/attention/self/key/bias/adam_v', 'bert/encoder/layer_23/attention/self/key/kernel', 'bert/encoder/layer_23/attention/self/key/kernel/adam_m', 'bert/encoder/layer_23/attention/self/key/kernel/adam_v', 'bert/encoder/layer_23/attention/self/query/bias', 'bert/encoder/layer_23/attention/self/query/bias/adam_m', 'bert/encoder/layer_23/attention/self/query/bias/adam_v', 'bert/encoder/layer_23/attention/self/query/kernel', 'bert/encoder/layer_23/attention/self/query/kernel/adam_m', 'bert/encoder/layer_23/attention/self/query/kernel/adam_v', 'bert/encoder/layer_23/attention/self/value/bias', 'bert/encoder/layer_23/attention/self/value/bias/adam_m', 'bert/encoder/layer_23/attention/self/value/bias/adam_v', 'bert/encoder/layer_23/attention/self/value/kernel', 'bert/encoder/layer_23/attention/self/value/kernel/adam_m', 'bert/encoder/layer_23/attention/self/value/kernel/adam_v', 'bert/encoder/layer_23/intermediate/dense/bias', 'bert/encoder/layer_23/intermediate/dense/bias/adam_m', 'bert/encoder/layer_23/intermediate/dense/bias/adam_v', 'bert/encoder/layer_23/intermediate/dense/kernel', 'bert/encoder/layer_23/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_23/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_23/output/LayerNorm/beta', 'bert/encoder/layer_23/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_23/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_23/output/LayerNorm/gamma', 'bert/encoder/layer_23/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_23/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_23/output/dense/bias', 'bert/encoder/layer_23/output/dense/bias/adam_m', 'bert/encoder/layer_23/output/dense/bias/adam_v', 'bert/encoder/layer_23/output/dense/kernel', 'bert/encoder/layer_23/output/dense/kernel/adam_m', 'bert/encoder/layer_23/output/dense/kernel/adam_v', 'bert/encoder/layer_3/attention/output/LayerNorm/beta', 'bert/encoder/layer_3/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_3/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_3/attention/output/LayerNorm/gamma', 'bert/encoder/layer_3/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_3/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_3/attention/output/dense/bias', 'bert/encoder/layer_3/attention/output/dense/bias/adam_m', 'bert/encoder/layer_3/attention/output/dense/bias/adam_v', 'bert/encoder/layer_3/attention/output/dense/kernel', 'bert/encoder/layer_3/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_3/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_3/attention/self/key/bias', 'bert/encoder/layer_3/attention/self/key/bias/adam_m', 'bert/encoder/layer_3/attention/self/key/bias/adam_v', 'bert/encoder/layer_3/attention/self/key/kernel', 'bert/encoder/layer_3/attention/self/key/kernel/adam_m', 'bert/encoder/layer_3/attention/self/key/kernel/adam_v', 'bert/encoder/layer_3/attention/self/query/bias', 'bert/encoder/layer_3/attention/self/query/bias/adam_m', 'bert/encoder/layer_3/attention/self/query/bias/adam_v', 'bert/encoder/layer_3/attention/self/query/kernel', 'bert/encoder/layer_3/attention/self/query/kernel/adam_m', 'bert/encoder/layer_3/attention/self/query/kernel/adam_v', 'bert/encoder/layer_3/attention/self/value/bias', 'bert/encoder/layer_3/attention/self/value/bias/adam_m', 'bert/encoder/layer_3/attention/self/value/bias/adam_v', 'bert/encoder/layer_3/attention/self/value/kernel', 'bert/encoder/layer_3/attention/self/value/kernel/adam_m', 'bert/encoder/layer_3/attention/self/value/kernel/adam_v', 'bert/encoder/layer_3/intermediate/dense/bias', 'bert/encoder/layer_3/intermediate/dense/bias/adam_m', 'bert/encoder/layer_3/intermediate/dense/bias/adam_v', 'bert/encoder/layer_3/intermediate/dense/kernel', 'bert/encoder/layer_3/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_3/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_3/output/LayerNorm/beta', 'bert/encoder/layer_3/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_3/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_3/output/LayerNorm/gamma', 'bert/encoder/layer_3/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_3/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_3/output/dense/bias', 'bert/encoder/layer_3/output/dense/bias/adam_m', 'bert/encoder/layer_3/output/dense/bias/adam_v', 'bert/encoder/layer_3/output/dense/kernel', 'bert/encoder/layer_3/output/dense/kernel/adam_m', 'bert/encoder/layer_3/output/dense/kernel/adam_v', 'bert/encoder/layer_4/attention/output/LayerNorm/beta', 'bert/encoder/layer_4/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_4/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_4/attention/output/LayerNorm/gamma', 'bert/encoder/layer_4/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_4/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_4/attention/output/dense/bias', 'bert/encoder/layer_4/attention/output/dense/bias/adam_m', 'bert/encoder/layer_4/attention/output/dense/bias/adam_v', 'bert/encoder/layer_4/attention/output/dense/kernel', 'bert/encoder/layer_4/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_4/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_4/attention/self/key/bias', 'bert/encoder/layer_4/attention/self/key/bias/adam_m', 'bert/encoder/layer_4/attention/self/key/bias/adam_v', 'bert/encoder/layer_4/attention/self/key/kernel', 'bert/encoder/layer_4/attention/self/key/kernel/adam_m', 'bert/encoder/layer_4/attention/self/key/kernel/adam_v', 'bert/encoder/layer_4/attention/self/query/bias', 'bert/encoder/layer_4/attention/self/query/bias/adam_m', 'bert/encoder/layer_4/attention/self/query/bias/adam_v', 'bert/encoder/layer_4/attention/self/query/kernel', 'bert/encoder/layer_4/attention/self/query/kernel/adam_m', 'bert/encoder/layer_4/attention/self/query/kernel/adam_v', 'bert/encoder/layer_4/attention/self/value/bias', 'bert/encoder/layer_4/attention/self/value/bias/adam_m', 'bert/encoder/layer_4/attention/self/value/bias/adam_v', 'bert/encoder/layer_4/attention/self/value/kernel', 'bert/encoder/layer_4/attention/self/value/kernel/adam_m', 'bert/encoder/layer_4/attention/self/value/kernel/adam_v', 'bert/encoder/layer_4/intermediate/dense/bias', 'bert/encoder/layer_4/intermediate/dense/bias/adam_m', 'bert/encoder/layer_4/intermediate/dense/bias/adam_v', 'bert/encoder/layer_4/intermediate/dense/kernel', 'bert/encoder/layer_4/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_4/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_4/output/LayerNorm/beta', 'bert/encoder/layer_4/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_4/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_4/output/LayerNorm/gamma', 'bert/encoder/layer_4/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_4/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_4/output/dense/bias', 'bert/encoder/layer_4/output/dense/bias/adam_m', 'bert/encoder/layer_4/output/dense/bias/adam_v', 'bert/encoder/layer_4/output/dense/kernel', 'bert/encoder/layer_4/output/dense/kernel/adam_m', 'bert/encoder/layer_4/output/dense/kernel/adam_v', 'bert/encoder/layer_5/attention/output/LayerNorm/beta', 'bert/encoder/layer_5/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_5/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_5/attention/output/LayerNorm/gamma', 'bert/encoder/layer_5/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_5/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_5/attention/output/dense/bias', 'bert/encoder/layer_5/attention/output/dense/bias/adam_m', 'bert/encoder/layer_5/attention/output/dense/bias/adam_v', 'bert/encoder/layer_5/attention/output/dense/kernel', 'bert/encoder/layer_5/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_5/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_5/attention/self/key/bias', 'bert/encoder/layer_5/attention/self/key/bias/adam_m', 'bert/encoder/layer_5/attention/self/key/bias/adam_v', 'bert/encoder/layer_5/attention/self/key/kernel', 'bert/encoder/layer_5/attention/self/key/kernel/adam_m', 'bert/encoder/layer_5/attention/self/key/kernel/adam_v', 'bert/encoder/layer_5/attention/self/query/bias', 'bert/encoder/layer_5/attention/self/query/bias/adam_m', 'bert/encoder/layer_5/attention/self/query/bias/adam_v', 'bert/encoder/layer_5/attention/self/query/kernel', 'bert/encoder/layer_5/attention/self/query/kernel/adam_m', 'bert/encoder/layer_5/attention/self/query/kernel/adam_v', 'bert/encoder/layer_5/attention/self/value/bias', 'bert/encoder/layer_5/attention/self/value/bias/adam_m', 'bert/encoder/layer_5/attention/self/value/bias/adam_v', 'bert/encoder/layer_5/attention/self/value/kernel', 'bert/encoder/layer_5/attention/self/value/kernel/adam_m', 'bert/encoder/layer_5/attention/self/value/kernel/adam_v', 'bert/encoder/layer_5/intermediate/dense/bias', 'bert/encoder/layer_5/intermediate/dense/bias/adam_m', 'bert/encoder/layer_5/intermediate/dense/bias/adam_v', 'bert/encoder/layer_5/intermediate/dense/kernel', 'bert/encoder/layer_5/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_5/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_5/output/LayerNorm/beta', 'bert/encoder/layer_5/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_5/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_5/output/LayerNorm/gamma', 'bert/encoder/layer_5/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_5/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_5/output/dense/bias', 'bert/encoder/layer_5/output/dense/bias/adam_m', 'bert/encoder/layer_5/output/dense/bias/adam_v', 'bert/encoder/layer_5/output/dense/kernel', 'bert/encoder/layer_5/output/dense/kernel/adam_m', 'bert/encoder/layer_5/output/dense/kernel/adam_v', 'bert/encoder/layer_6/attention/output/LayerNorm/beta', 'bert/encoder/layer_6/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_6/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_6/attention/output/LayerNorm/gamma', 'bert/encoder/layer_6/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_6/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_6/attention/output/dense/bias', 'bert/encoder/layer_6/attention/output/dense/bias/adam_m', 'bert/encoder/layer_6/attention/output/dense/bias/adam_v', 'bert/encoder/layer_6/attention/output/dense/kernel', 'bert/encoder/layer_6/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_6/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_6/attention/self/key/bias', 'bert/encoder/layer_6/attention/self/key/bias/adam_m', 'bert/encoder/layer_6/attention/self/key/bias/adam_v', 'bert/encoder/layer_6/attention/self/key/kernel', 'bert/encoder/layer_6/attention/self/key/kernel/adam_m', 'bert/encoder/layer_6/attention/self/key/kernel/adam_v', 'bert/encoder/layer_6/attention/self/query/bias', 'bert/encoder/layer_6/attention/self/query/bias/adam_m', 'bert/encoder/layer_6/attention/self/query/bias/adam_v', 'bert/encoder/layer_6/attention/self/query/kernel', 'bert/encoder/layer_6/attention/self/query/kernel/adam_m', 'bert/encoder/layer_6/attention/self/query/kernel/adam_v', 'bert/encoder/layer_6/attention/self/value/bias', 'bert/encoder/layer_6/attention/self/value/bias/adam_m', 'bert/encoder/layer_6/attention/self/value/bias/adam_v', 'bert/encoder/layer_6/attention/self/value/kernel', 'bert/encoder/layer_6/attention/self/value/kernel/adam_m', 'bert/encoder/layer_6/attention/self/value/kernel/adam_v', 'bert/encoder/layer_6/intermediate/dense/bias', 'bert/encoder/layer_6/intermediate/dense/bias/adam_m', 'bert/encoder/layer_6/intermediate/dense/bias/adam_v', 'bert/encoder/layer_6/intermediate/dense/kernel', 'bert/encoder/layer_6/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_6/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_6/output/LayerNorm/beta', 'bert/encoder/layer_6/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_6/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_6/output/LayerNorm/gamma', 'bert/encoder/layer_6/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_6/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_6/output/dense/bias', 'bert/encoder/layer_6/output/dense/bias/adam_m', 'bert/encoder/layer_6/output/dense/bias/adam_v', 'bert/encoder/layer_6/output/dense/kernel', 'bert/encoder/layer_6/output/dense/kernel/adam_m', 'bert/encoder/layer_6/output/dense/kernel/adam_v', 'bert/encoder/layer_7/attention/output/LayerNorm/beta', 'bert/encoder/layer_7/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_7/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_7/attention/output/LayerNorm/gamma', 'bert/encoder/layer_7/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_7/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_7/attention/output/dense/bias', 'bert/encoder/layer_7/attention/output/dense/bias/adam_m', 'bert/encoder/layer_7/attention/output/dense/bias/adam_v', 'bert/encoder/layer_7/attention/output/dense/kernel', 'bert/encoder/layer_7/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_7/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_7/attention/self/key/bias', 'bert/encoder/layer_7/attention/self/key/bias/adam_m', 'bert/encoder/layer_7/attention/self/key/bias/adam_v', 'bert/encoder/layer_7/attention/self/key/kernel', 'bert/encoder/layer_7/attention/self/key/kernel/adam_m', 'bert/encoder/layer_7/attention/self/key/kernel/adam_v', 'bert/encoder/layer_7/attention/self/query/bias', 'bert/encoder/layer_7/attention/self/query/bias/adam_m', 'bert/encoder/layer_7/attention/self/query/bias/adam_v', 'bert/encoder/layer_7/attention/self/query/kernel', 'bert/encoder/layer_7/attention/self/query/kernel/adam_m', 'bert/encoder/layer_7/attention/self/query/kernel/adam_v', 'bert/encoder/layer_7/attention/self/value/bias', 'bert/encoder/layer_7/attention/self/value/bias/adam_m', 'bert/encoder/layer_7/attention/self/value/bias/adam_v', 'bert/encoder/layer_7/attention/self/value/kernel', 'bert/encoder/layer_7/attention/self/value/kernel/adam_m', 'bert/encoder/layer_7/attention/self/value/kernel/adam_v', 'bert/encoder/layer_7/intermediate/dense/bias', 'bert/encoder/layer_7/intermediate/dense/bias/adam_m', 'bert/encoder/layer_7/intermediate/dense/bias/adam_v', 'bert/encoder/layer_7/intermediate/dense/kernel', 'bert/encoder/layer_7/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_7/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_7/output/LayerNorm/beta', 'bert/encoder/layer_7/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_7/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_7/output/LayerNorm/gamma', 'bert/encoder/layer_7/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_7/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_7/output/dense/bias', 'bert/encoder/layer_7/output/dense/bias/adam_m', 'bert/encoder/layer_7/output/dense/bias/adam_v', 'bert/encoder/layer_7/output/dense/kernel', 'bert/encoder/layer_7/output/dense/kernel/adam_m', 'bert/encoder/layer_7/output/dense/kernel/adam_v', 'bert/encoder/layer_8/attention/output/LayerNorm/beta', 'bert/encoder/layer_8/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_8/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_8/attention/output/LayerNorm/gamma', 'bert/encoder/layer_8/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_8/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_8/attention/output/dense/bias', 'bert/encoder/layer_8/attention/output/dense/bias/adam_m', 'bert/encoder/layer_8/attention/output/dense/bias/adam_v', 'bert/encoder/layer_8/attention/output/dense/kernel', 'bert/encoder/layer_8/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_8/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_8/attention/self/key/bias', 'bert/encoder/layer_8/attention/self/key/bias/adam_m', 'bert/encoder/layer_8/attention/self/key/bias/adam_v', 'bert/encoder/layer_8/attention/self/key/kernel', 'bert/encoder/layer_8/attention/self/key/kernel/adam_m', 'bert/encoder/layer_8/attention/self/key/kernel/adam_v', 'bert/encoder/layer_8/attention/self/query/bias', 'bert/encoder/layer_8/attention/self/query/bias/adam_m', 'bert/encoder/layer_8/attention/self/query/bias/adam_v', 'bert/encoder/layer_8/attention/self/query/kernel', 'bert/encoder/layer_8/attention/self/query/kernel/adam_m', 'bert/encoder/layer_8/attention/self/query/kernel/adam_v', 'bert/encoder/layer_8/attention/self/value/bias', 'bert/encoder/layer_8/attention/self/value/bias/adam_m', 'bert/encoder/layer_8/attention/self/value/bias/adam_v', 'bert/encoder/layer_8/attention/self/value/kernel', 'bert/encoder/layer_8/attention/self/value/kernel/adam_m', 'bert/encoder/layer_8/attention/self/value/kernel/adam_v', 'bert/encoder/layer_8/intermediate/dense/bias', 'bert/encoder/layer_8/intermediate/dense/bias/adam_m', 'bert/encoder/layer_8/intermediate/dense/bias/adam_v', 'bert/encoder/layer_8/intermediate/dense/kernel', 'bert/encoder/layer_8/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_8/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_8/output/LayerNorm/beta', 'bert/encoder/layer_8/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_8/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_8/output/LayerNorm/gamma', 'bert/encoder/layer_8/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_8/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_8/output/dense/bias', 'bert/encoder/layer_8/output/dense/bias/adam_m', 'bert/encoder/layer_8/output/dense/bias/adam_v', 'bert/encoder/layer_8/output/dense/kernel', 'bert/encoder/layer_8/output/dense/kernel/adam_m', 'bert/encoder/layer_8/output/dense/kernel/adam_v', 'bert/encoder/layer_9/attention/output/LayerNorm/beta', 'bert/encoder/layer_9/attention/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_9/attention/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_9/attention/output/LayerNorm/gamma', 'bert/encoder/layer_9/attention/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_9/attention/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_9/attention/output/dense/bias', 'bert/encoder/layer_9/attention/output/dense/bias/adam_m', 'bert/encoder/layer_9/attention/output/dense/bias/adam_v', 'bert/encoder/layer_9/attention/output/dense/kernel', 'bert/encoder/layer_9/attention/output/dense/kernel/adam_m', 'bert/encoder/layer_9/attention/output/dense/kernel/adam_v', 'bert/encoder/layer_9/attention/self/key/bias', 'bert/encoder/layer_9/attention/self/key/bias/adam_m', 'bert/encoder/layer_9/attention/self/key/bias/adam_v', 'bert/encoder/layer_9/attention/self/key/kernel', 'bert/encoder/layer_9/attention/self/key/kernel/adam_m', 'bert/encoder/layer_9/attention/self/key/kernel/adam_v', 'bert/encoder/layer_9/attention/self/query/bias', 'bert/encoder/layer_9/attention/self/query/bias/adam_m', 'bert/encoder/layer_9/attention/self/query/bias/adam_v', 'bert/encoder/layer_9/attention/self/query/kernel', 'bert/encoder/layer_9/attention/self/query/kernel/adam_m', 'bert/encoder/layer_9/attention/self/query/kernel/adam_v', 'bert/encoder/layer_9/attention/self/value/bias', 'bert/encoder/layer_9/attention/self/value/bias/adam_m', 'bert/encoder/layer_9/attention/self/value/bias/adam_v', 'bert/encoder/layer_9/attention/self/value/kernel', 'bert/encoder/layer_9/attention/self/value/kernel/adam_m', 'bert/encoder/layer_9/attention/self/value/kernel/adam_v', 'bert/encoder/layer_9/intermediate/dense/bias', 'bert/encoder/layer_9/intermediate/dense/bias/adam_m', 'bert/encoder/layer_9/intermediate/dense/bias/adam_v', 'bert/encoder/layer_9/intermediate/dense/kernel', 'bert/encoder/layer_9/intermediate/dense/kernel/adam_m', 'bert/encoder/layer_9/intermediate/dense/kernel/adam_v', 'bert/encoder/layer_9/output/LayerNorm/beta', 'bert/encoder/layer_9/output/LayerNorm/beta/adam_m', 'bert/encoder/layer_9/output/LayerNorm/beta/adam_v', 'bert/encoder/layer_9/output/LayerNorm/gamma', 'bert/encoder/layer_9/output/LayerNorm/gamma/adam_m', 'bert/encoder/layer_9/output/LayerNorm/gamma/adam_v', 'bert/encoder/layer_9/output/dense/bias', 'bert/encoder/layer_9/output/dense/bias/adam_m', 'bert/encoder/layer_9/output/dense/bias/adam_v', 'bert/encoder/layer_9/output/dense/kernel', 'bert/encoder/layer_9/output/dense/kernel/adam_m', 'bert/encoder/layer_9/output/dense/kernel/adam_v', 'bert/pooler/dense/bias', 'bert/pooler/dense/bias/adam_m', 'bert/pooler/dense/bias/adam_v', 'bert/pooler/dense/kernel', 'bert/pooler/dense/kernel/adam_m', 'bert/pooler/dense/kernel/adam_v', 'output_bias', 'output_bias/adam_m', 'output_bias/adam_v', 'output_weights', 'output_weights/adam_m', 'output_weights/adam_v']\nmodel bert\nmodel embeddings\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel embeddings\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel embeddings\nmodel position_embeddings\nmodel bert\nmodel embeddings\nmodel token_type_embeddings\nmodel bert\nmodel embeddings\nmodel word_embeddings\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_0\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_0\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_0\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_1\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_1\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_1\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_10\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_10\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_10\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_11\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_11\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_11\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_12\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_12\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_12\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_13\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_13\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_13\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_14\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_14\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_14\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_15\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_15\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_15\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_16\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_16\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_16\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_17\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_17\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_17\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_18\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_18\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_18\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_19\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_19\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_19\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_2\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_2\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_2\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_20\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_20\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_20\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_21\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_21\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_21\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_22\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_22\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_22\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_23\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_23\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_23\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_3\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_3\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_3\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_4\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_4\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_4\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_5\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_5\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_5\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_6\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_6\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_6\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_7\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_7\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_7\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_8\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_8\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_8\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel key\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel key\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel query\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel query\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel value\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel attention\nmodel self\nmodel value\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel intermediate\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel intermediate\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel output\nmodel LayerNorm\nmodel beta\nmodel bert\nmodel encoder\nmodel layer_9\nmodel output\nmodel LayerNorm\nmodel gamma\n['gamma']\nmodel bert\nmodel encoder\nmodel layer_9\nmodel output\nmodel dense\nmodel bias\nmodel bert\nmodel encoder\nmodel layer_9\nmodel output\nmodel dense\nmodel kernel\n['kernel']\nmodel bert\nmodel pooler\nmodel dense\nmodel bias\nmodel bert\nmodel pooler\nmodel dense\nmodel kernel\n['kernel']\nmodel classifier\nmodel output_bias\nmodel classifier\nmodel output_weights\n['output_weights']\nSave PyTorch model to pytorch_output_temp\n"
]
],
[
[
"### Upload converted checkpoint and test inference\nIf everything goes smoothly we should be able to upload weights and use the converted model.",
"_____no_output_____"
]
],
[
[
"from transformers import BertTokenizer\ntokenizer = BertTokenizer.from_pretrained('biobert_large/vocab_cased_pubmed_pmc_30k.txt')\nmodel2.eval()\ninput_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\nlabels = torch.tensor([1]).unsqueeze(0) # Batch size 1\noutputs = model2(input_ids)",
"Calling BertTokenizer.from_pretrained() with the path to a single file or url is deprecated\n"
],
[
"outputs = model2(input_ids)\noutputs",
"_____no_output_____"
],
[
"input_ids = torch.tensor(tokenizer.encode(\"All our results indicate that the presence of the @GENE$ genotype (++) in patients with structural @DISEASE$, severe left ventricular dysfunction and malignant ventricular arrhythmias increases the risk for these patients of hemodynamic collapse during these arrhythmias\"))",
"_____no_output_____"
],
[
"outputs = model2(input_ids.unsqueeze(0))\noutputs",
"_____no_output_____"
],
[
"values, indices = torch.max(outputs[0], 1, keepdim=False)\nindices\n",
"_____no_output_____"
]
],
[
[
"**Lets refactor this into something nicer**",
"_____no_output_____"
]
],
[
[
"from transformers import BertConfig, BertForSequenceClassification, BertForPreTraining\nfrom transformers import BertTokenizer\nclass InferSequenceClassifier(object):\n def __init__(self, pytorch_model_path, token_path, add_special_tokens=False):\n self.tokenizer = BertTokenizer.from_pretrained(token_path)\n self.model = BertForSequenceClassification.from_pretrained(pytorch_model_path)\n self.add_special_tokens = add_special_tokens\n\n def make_prediction(self, text):\n input_ids = torch.tensor(self.tokenizer.encode(text, add_special_tokens=self.add_special_tokens))\n outputs = self.model(input_ids.unsqueeze(0))\n print(outputs)\n values, indices = torch.max(outputs[0], 1, keepdim=False)\n return indices",
"_____no_output_____"
],
[
"!cp biobert_large/vocab_cased_pubmed_pmc_30k.txt pytorch_output_temp/vocab.txt\n!cp biobert_large/bert_config_bio_58k_large.json pytorch_output_temp/config.json",
"_____no_output_____"
],
[
"seq_infer = InferSequenceClassifier(\"pytorch_output_temp\", \"pytorch_output_temp\", True)",
"_____no_output_____"
],
[
"seq_infer.make_prediction(\"@GENE$ influences brain beta-@DISEASE$ load, cerebrospinal fluid levels of beta-amyloid peptides and phosphorylated tau, and the genetic risk of late-onset sporadic AD.\")",
"(tensor([[-0.3223, 0.5159]], grad_fn=<AddmmBackward>),)\n"
],
[
"seq_infer.make_prediction(\"All our results indicate that the presence of the @GENE$ genotype (++) in patients with structural @DISEASE$, severe left ventricular dysfunction and malignant ventricular arrhythmias increases the risk for these patients of hemodynamic collapse during these arrhythmias\")",
"(tensor([[-0.4003, 0.4932]], grad_fn=<AddmmBackward>),)\n"
],
[
"seq_infer.make_prediction(\"Functional studies to unravel the biological significance of this region in regulating @GENE$ production is clearly indicated, which may lead to new strategies to modify the disease course of severe @DISEASE$.\")",
"(tensor([[-0.3648, 0.4784]], grad_fn=<AddmmBackward>),)\n"
],
[
"!gsutil cp -r pytorch_output_temp gs://coronavirusqa/re_convert",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abe887121af051aa47cc62fd83bb28fc86e3e42
| 3,252 |
ipynb
|
Jupyter Notebook
|
examples/user-guide/content/4-saving.ipynb
|
vlcekl/ngboost
|
a446e3ac84f196de29d734e43858c70ea04f4ea3
|
[
"Apache-2.0"
] | 1,255 |
2019-10-10T18:04:40.000Z
|
2022-03-27T17:43:13.000Z
|
examples/user-guide/content/4-saving.ipynb
|
vlcekl/ngboost
|
a446e3ac84f196de29d734e43858c70ea04f4ea3
|
[
"Apache-2.0"
] | 227 |
2019-10-12T02:04:25.000Z
|
2022-03-01T15:26:41.000Z
|
examples/user-guide/content/4-saving.ipynb
|
vlcekl/ngboost
|
a446e3ac84f196de29d734e43858c70ea04f4ea3
|
[
"Apache-2.0"
] | 225 |
2019-10-11T21:27:44.000Z
|
2022-03-29T07:51:14.000Z
| 21.68 | 94 | 0.537823 |
[
[
[
"# Saving Models",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('/Users/c242587/Desktop/projects/git/ngboost')",
"_____no_output_____"
],
[
"from ngboost import NGBRegressor\n\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\n\nX, Y = load_boston(True)\nX_reg_train, X_reg_test, Y_reg_train, Y_reg_test = train_test_split(X, Y, test_size=0.2)",
"_____no_output_____"
]
],
[
[
"Saving ngboost models is easy with the `pickle` package:",
"_____no_output_____"
]
],
[
[
"ngb = NGBRegressor().fit(X_reg_train, Y_reg_train)",
"[iter 0] loss=3.6313 val_loss=0.0000 scale=0.5000 norm=3.3049\n[iter 100] loss=3.0325 val_loss=0.0000 scale=1.0000 norm=3.5726\n[iter 200] loss=2.3759 val_loss=0.0000 scale=2.0000 norm=3.8621\n[iter 300] loss=2.0014 val_loss=0.0000 scale=2.0000 norm=3.1113\n[iter 400] loss=1.8321 val_loss=0.0000 scale=1.0000 norm=1.4179\n"
],
[
"import pickle\nfrom pathlib import Path\n\nfile_path = Path.home()/'Desktop'/'ngbtest.p'\n\nwith file_path.open(\"wb\") as f:\n pickle.dump(ngb, f)",
"_____no_output_____"
],
[
"with file_path.open(\"rb\") as f:\n ngb_unpickled = pickle.load(f)",
"_____no_output_____"
],
[
"Y_preds = ngb_unpickled.predict(X_reg_test)\nY_dists = ngb_unpickled.pred_dist(X_reg_test)\n\nY_dists[0:5].params",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4abe8f5939e6aed9db84f9211f96807a25f226f1
| 43,718 |
ipynb
|
Jupyter Notebook
|
Experiments/Grade3Spring/BF3正比计数管/bf3.ipynb
|
ustcpetergu/USTCPhysExpData
|
8615e5673002e1ddf6bc76fd1566c5df3f39c497
|
[
"MIT"
] | 7 |
2019-08-11T04:23:35.000Z
|
2021-03-27T06:51:33.000Z
|
Experiments/Grade3Spring/BF3正比计数管/.ipynb_checkpoints/bf3-checkpoint.ipynb
|
ustcpetergu/USTCPhysExpData
|
8615e5673002e1ddf6bc76fd1566c5df3f39c497
|
[
"MIT"
] | null | null | null |
Experiments/Grade3Spring/BF3正比计数管/.ipynb_checkpoints/bf3-checkpoint.ipynb
|
ustcpetergu/USTCPhysExpData
|
8615e5673002e1ddf6bc76fd1566c5df3f39c497
|
[
"MIT"
] | null | null | null | 181.40249 | 19,976 | 0.903632 |
[
[
[
"# -*- coding: utf-8 -*-\nfrom physicsexp.mainfunc import *\nfrom physicsexp.gendocx import *",
"_____no_output_____"
],
[
"# voltage plateau curve 1 (V), valve voltage 0.5V\nvolt = np.array([30*i+1290 for i in range(31)])\nvolt",
"_____no_output_____"
],
[
"count = np.array([\n 227 , 1853, 7711, 30929, 35579, 38687, 40657, 40244, 43141, 44248, 45497, \n 46315, 46412, 46135, 46383, 46495, 46166, 46375, 46362, 46599, 46436, 46173, \n 46440, 46787, 46326, 46577, 46795, 48648, 50061, 54933, 62629\n])",
"_____no_output_____"
],
[
"# voltage plateau curve 2(optimal) (V), valve voltage 0.2V\nvolt2 = np.array([30*i+960 for i in range(37)])\nvolt2",
"_____no_output_____"
],
[
"count2 = np.array([\n 121, 134, 210, 796, 1589, 8043, 25790, 21294, 36225, 35963, 40003, 41969, 43146,\n 43673, 44782, 45562, 46464, 46177, 46465, 46355, 46592, 46220, 46002, 46323, 46226, \n 46666, 46419, 46185, 46110, 46782, 46436, 46851, 46852, 47721, 51118, 56529, 60497\n])",
"_____no_output_____"
],
[
"simple_plot(volt, count, show=0, dot='+', lab='阈值0.5V')\nsimple_plot(volt2, count2, show=1, save='1.png', xlab='电压', ylab='计数', title='高压坪曲线', dot='.', lab='阈值0.2V')",
"_____no_output_____"
],
[
"# plateau length & plateau slope 1\nprint(volt[-5] - volt[11])\nprint(count[-5] - count[11])\nprint( (count[-5] - count[11])/(sum(count[11:-5+1])/len(count[11:-5+1])) / ((volt[-5] - volt[11])/100))",
"450\n480\n0.002297686875540764\n"
],
[
"# plateau length & plateau slope 2\nprint(volt2[-5] - volt2[16])\nprint(count2[-5] - count2[16])\nprint( (count2[-5] - count2[16])/(sum(count2[16:-5+1])/len(count2[16:-5+1])) / ((volt2[-5] - volt2[16])/100), '%/100V')\n# this is slightly better",
"480\n388\n0.0017413802207085908 %/100V\n"
],
[
"# screening threshold plateau(V)\n# voltage 1710V\nth = np.array([.0, .02, .04, .06, .08, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.])",
"_____no_output_____"
],
[
"thcount = np.array([\n 52281, 48644, 46780, 46453, 46314, 46414, 46463, 46513, 46652, 46521, 46243, 46488, 46083, 45549,\n 45045, 44477, 43684, 43350, 42611, 41718, 40864, 40302, 38847, 37756, 36366\n])",
"_____no_output_____"
],
[
"simple_plot(th, thcount, save='2.png', xlab='甄别阈(V)', ylab='计数', title='甄别阈坪曲线')",
"_____no_output_____"
],
[
"gendocx('gen.docx', '1.png', '2.png')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abe913dcf3486f6f049c07cc0eadb3794b11a39
| 5,123 |
ipynb
|
Jupyter Notebook
|
other_social/instagram_scrape.ipynb
|
tubademir23/twint
|
c2bb89700b2587fa5752cf45d047ea64041862a7
|
[
"MIT"
] | null | null | null |
other_social/instagram_scrape.ipynb
|
tubademir23/twint
|
c2bb89700b2587fa5752cf45d047ea64041862a7
|
[
"MIT"
] | null | null | null |
other_social/instagram_scrape.ipynb
|
tubademir23/twint
|
c2bb89700b2587fa5752cf45d047ea64041862a7
|
[
"MIT"
] | null | null | null | 27.842391 | 77 | 0.426313 |
[
[
[
"\r\n# çalışıyor.. instagram-scraper DiyanetTV",
"_____no_output_____"
],
[
"#https://pypi.org/project/instagramy/\r\nfrom instagramy import InstagramUser \r\nfrom instagramy import InstagramPost\r\n# Connecting the profile \r\nuser = InstagramUser(\"DiyanetTV\") \r\n\r\n# printing the basic details like \r\n# followers, following, bio \r\nprint(user.is_verified) \r\nprint(user) \r\nprint(user.biography) \r\nsession_id=\"10822774760%3A9qEyIdlDtoozLv%3A2\"\r\n",
"True\nDiyanet TV (diyanettv) -> Diyanet TV Resmi Instagram Sayfası\nDiyanet TV YouTube Kanalı: ⤵️\nDiyanet TV Resmi Instagram Sayfası\nDiyanet TV YouTube Kanalı: ⤵️\n"
],
[
"from instagramy.plugins.analysis import analyze_users_popularity\r\nimport pandas as pd\r\nteams = [\"DiyanetTV\", \"diyanetradyo\",\r\n \"diyanetduyurular\", \"diyanethbr\",\r\n \"diyanetduyurular\", \"diyanetesoralim\",\r\n \"diyanetcocuk\"]\r\ndata = analyze_users_popularity(teams, session_id)\r\npd.DataFrame.from_dict(data)\r\n\r\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4abe99963570b9521332c188a542d18fc902152f
| 758,852 |
ipynb
|
Jupyter Notebook
|
Build Week Project/Untitled.ipynb
|
VPDeb/DS-Unit-2-Applied-Modeling
|
d9d75ac3ce4f7ad630215da92de745641c18662f
|
[
"MIT"
] | null | null | null |
Build Week Project/Untitled.ipynb
|
VPDeb/DS-Unit-2-Applied-Modeling
|
d9d75ac3ce4f7ad630215da92de745641c18662f
|
[
"MIT"
] | null | null | null |
Build Week Project/Untitled.ipynb
|
VPDeb/DS-Unit-2-Applied-Modeling
|
d9d75ac3ce4f7ad630215da92de745641c18662f
|
[
"MIT"
] | null | null | null | 269.096454 | 57,228 | 0.752276 |
[
[
[
"!pip install eli5\n!pip install xgboost",
"Requirement already satisfied: eli5 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (0.10.1)\nRequirement already satisfied: attrs>16.0.0 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (20.2.0)\nRequirement already satisfied: six in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (1.15.0)\nRequirement already satisfied: scikit-learn>=0.18 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (0.23.2)\nRequirement already satisfied: jinja2 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (2.11.2)\nRequirement already satisfied: tabulate>=0.7.7 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (0.8.7)\nRequirement already satisfied: numpy>=1.9.0 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (1.19.2)\nRequirement already satisfied: graphviz in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (0.14.2)\nRequirement already satisfied: scipy in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from eli5) (1.5.3)\nRequirement already satisfied: joblib>=0.11 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from scikit-learn>=0.18->eli5) (0.17.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from scikit-learn>=0.18->eli5) (2.1.0)\nRequirement already satisfied: MarkupSafe>=0.23 in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from jinja2->eli5) (1.1.1)\nRequirement already satisfied: xgboost in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (1.2.1)\nRequirement already satisfied: numpy in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from xgboost) (1.19.2)\nRequirement already satisfied: scipy in /Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages (from xgboost) (1.5.3)\n"
]
],
[
[
"## Import of Libraries needed",
"_____no_output_____"
]
],
[
[
"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.impute import SimpleImputer\nfrom category_encoders import OrdinalEncoder\nfrom xgboost import XGBClassifier\nfrom sklearn.inspection import permutation_importance\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom sklearn.metrics import classification_report, plot_confusion_matrix, plot_roc_curve\nimport matplotlib.pyplot as plt\nfrom skopt import BayesSearchCV\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score",
"_____no_output_____"
]
],
[
[
"## Import Datasets",
"_____no_output_____"
]
],
[
[
"train = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\ncensus = pd.read_csv('census.csv')\nprint(train.shape, test.shape, census.shape)",
"(32561, 15) (16281, 15) (48842, 15)\n"
]
],
[
[
"## Begin EDA",
"_____no_output_____"
]
],
[
[
"#checking for null values and column types, interesting to see no 'missing' values I'll dive a little further.\ncensus.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48842 entries, 0 to 48841\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 48842 non-null int64 \n 1 workclass 48842 non-null object\n 2 fnlwgt 48842 non-null int64 \n 3 education 48842 non-null object\n 4 education-num 48842 non-null int64 \n 5 marital-status 48842 non-null object\n 6 occupation 48842 non-null object\n 7 relationship 48842 non-null object\n 8 race 48842 non-null object\n 9 sex 48842 non-null object\n 10 capital-gain 48842 non-null int64 \n 11 capital-loss 48842 non-null int64 \n 12 hours-per-week 48842 non-null int64 \n 13 native-country 48842 non-null object\n 14 income 48842 non-null object\ndtypes: int64(6), object(9)\nmemory usage: 5.6+ MB\n"
],
[
"#Aha missing values are disguised as '?'. Lets fix that.\ncensus['workclass'].value_counts()",
"_____no_output_____"
],
[
"#Found 3 Object Columns with '?' for missing values. We will fill these with the top value of each row.\ncensus.isin(['?']).sum()",
"_____no_output_____"
],
[
"#Time to make the 'missing' values into NaN so we can work with them\ncensus.replace({'?': np.NaN}, inplace=True)",
"_____no_output_____"
],
[
"#No more '?'\ncensus.workclass.value_counts()",
"_____no_output_____"
],
[
"# They are now registered as NaN. These will be replaced with the top value_counts in each column\ncensus.isnull().sum()",
"_____no_output_____"
],
[
"census.head()",
"_____no_output_____"
],
[
"#Printing Top Values to Fill NaNs\nprint('Top Value:',census['native-country'].describe())\nprint('Top Value:',census['occupation'].describe())\nprint('Top Value:',census['workclass'].describe())",
"Top Value: count 47985\nunique 41\ntop United-States\nfreq 43832\nName: native-country, dtype: object\nTop Value: count 46033\nunique 14\ntop Prof-specialty\nfreq 6172\nName: occupation, dtype: object\nTop Value: count 46043\nunique 8\ntop Private\nfreq 33906\nName: workclass, dtype: object\n"
],
[
"#filling NaN values\ncensus['workclass'].replace({np.NaN : 'Private'},inplace=True)\ncensus['occupation'].replace({np.NaN : 'Prof-specialty'}, inplace=True)\ncensus['native-country'].replace({np.NaN : 'United-States'},inplace=True)",
"_____no_output_____"
],
[
"#Sanity check to assure NaNs have been fixed with working values.\ncensus.isnull().sum()",
"_____no_output_____"
],
[
"#checking for high cardinality in the dataset as well as seeing what to do with the features. Looks like 'fnlwgt' has a very high cardinality and isnt useful for the model\ncensus.astype(object).nunique()",
"_____no_output_____"
]
],
[
[
"#Working on the wrangle function. Not sure how to get these three def/if/else functions wrapped into one working or multi working function inside of a wranglefunction🤔",
"_____no_output_____"
]
],
[
[
"#Create a New Feature that changes the income column into a 1 if they make more than 50K a year and 0 if they make 50K or less. New Feature called 'makes-50K+'.\ndef over50K(census):\n if census['income'] == '>50K':\n val = 1\n else:\n val = 0\n return val\ncensus['makes-50K+'] = census.apply(over50K, axis=1)",
"_____no_output_____"
],
[
"#Create a New Feature that changes the hours worked per week column into a 1 if they worked more than 40 hrs a week and 0 if they worked 40 or less. New Feature called 'over40hrs'.\ndef over40(census):\n if census['hours-per-week'] >40:\n val = 1\n else:\n val = 0\n return val\ncensus['over40hrs+'] = census.apply(over40, axis=1)",
"_____no_output_____"
],
[
"#Create a New Feature that changes the sex column into a 1 if they were Female and 0 if they were Male. New Feature called 'gender-F/1-M/0'. This is new Target column.\ndef gender(census):\n if census['sex'] == 'Female':\n val = 1\n else:\n val = 0\n return val\ncensus['gender-F/1-M/0'] = census.apply(gender, axis=1)",
"_____no_output_____"
],
[
"#checking to see new features were successful. They are all there.\ncensus.head()",
"_____no_output_____"
],
[
"# Time to drop columns we don't need anylonger. Feature'fnlwgt' is high card and Unnecessary while 'sex' would now become a leaky feature and income and hours per week are now redundant\ncensus = census.drop(columns=['fnlwgt','income','hours-per-week','sex','capital-gain','capital-loss'])",
"_____no_output_____"
],
[
"census",
"_____no_output_____"
]
],
[
[
"# Splitting the Data",
"_____no_output_____"
]
],
[
[
"#Split data randomly with a 60/20/20 split\n\ntrain, val, test = np.split(census.sample(frac=1), [int(.6*len(census)), int(.8*len(census))])\nprint('Training Set:',train.head(1))\nprint('Validation Set:',val.head(1))\nprint('Test Set',test.head(1))",
"Training Set: age workclass education education-num marital-status \\\n25295 62 Private HS-grad 9 Married-civ-spouse \n\n occupation relationship race native-country makes-50K+ \\\n25295 Transport-moving Husband White United-States 1 \n\n over40hrs+ gender-F/1-M/0 \n25295 0 0 \nValidation Set: age workclass education education-num marital-status \\\n28118 67 Self-emp-not-inc 1st-4th 2 Widowed \n\n occupation relationship race native-country makes-50K+ \\\n28118 Other-service Not-in-family Black United-States 0 \n\n over40hrs+ gender-F/1-M/0 \n28118 0 1 \nTest Set age workclass education education-num marital-status occupation \\\n23096 45 Private HS-grad 9 Separated Tech-support \n\n relationship race native-country makes-50K+ over40hrs+ \\\n23096 Not-in-family White United-States 0 0 \n\n gender-F/1-M/0 \n23096 1 \n"
],
[
"#Split the data into X and y for training the model and making predictions\ny_train = train[target]\nX_train = train.drop(target,axis=1)",
"_____no_output_____"
],
[
"y_val = val[target]\nX_val = val.drop(target,axis=1)",
"_____no_output_____"
],
[
"y_test = test[target]\nX_test = test.drop(target,axis=1)",
"_____no_output_____"
]
],
[
[
"# Establishing the Baseline",
"_____no_output_____"
]
],
[
[
"#First I will check that the target feature is between 50-70%. Its almost to far off but still within the parameters to continue.\ny_train.value_counts(normalize=True)",
"_____no_output_____"
],
[
"y_train.value_counts()",
"_____no_output_____"
],
[
"print('Baseline Accuracy:', y_train.value_counts(normalize=True).max())",
"Baseline Accuracy: 0.668213615423989\n"
]
],
[
[
"# Building the Model",
"_____no_output_____"
]
],
[
[
"#Starting with a pipeline. Using OrdinalEncoder for the object columns, we do not need and Imputer since they were all filled with top values and I am working with XGBClassifier.\nmodelxgb = make_pipeline(\n OrdinalEncoder(),\n XGBClassifier(n_jobs=-1)\n)\nmodelxgb.fit(X_train,y_train)",
"_____no_output_____"
],
[
"print('Training accuracy:', modelxgb.score(X_train, y_train))\nprint('Validation accuracy:', modelxgb.score(X_val, y_val))",
"Training accuracy: 0.882852755502474\nValidation accuracy: 0.8425470925470926\n"
],
[
"scores = cross_val_score(modelxgb, X_train, y_train, cv=20)\nscores",
"/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead\n elif pd.api.types.is_categorical(cols):\n"
],
[
"pipeline = make_pipeline(\n OrdinalEncoder(),\n RandomForestClassifier(random_state=42)\n)\n\nparams = {\n 'randomforestclassifier__n_estimators': range(50,500,50),\n 'randomforestclassifier__max_depth': range(5,101,5),\n 'randomforestclassifier__max_samples': np.arange(0.2, 0.7, 0.2)\n}\n\nmodel = RandomizedSearchCV(\n pipeline,\n param_distributions=params,\n cv=5,\n verbose=1,\n n_iter=5\n)",
"_____no_output_____"
],
[
"model.fit(X_train,y_train)",
"Fitting 5 folds for each of 5 candidates, totalling 25 fits\n"
],
[
"scores = cross_val_score(model, X_train, y_train, cv=10)\nscores",
"Fitting 5 folds for each of 5 candidates, totalling 25 fits\n"
],
[
"print('Training accuracy:', model.score(X_train, y_train))\nprint('Validation accuracy:', model.score(X_val, y_val))",
"Training accuracy: 0.9034294488995052\nValidation accuracy: 0.8354832104832105\n"
],
[
"\n# make predictions for test data\ny_pred = model.predict(X_test)\n\n# evaluate predictions\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))",
"Accuracy: 83.70%\n"
],
[
"\n# k-fold cross validation evaluation of xgboost model\nfrom numpy import loadtxt\nimport xgboost\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\n# load data\n# split data into X and y\n# CV model\nmodel_w_kf = modelxgb\nkfold = KFold(n_splits=3, random_state=7)\nresults = cross_val_score(modelxgb, X_train, y_train, cv=kfold)\nprint(\"Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))",
"/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/sklearn/model_selection/_split.py:293: FutureWarning: Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n warnings.warn(\n/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead\n elif pd.api.types.is_categorical(cols):\n"
],
[
"from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression\nlog_model = make_pipeline(\n OrdinalEncoder(),\n LogisticRegression(max_iter=5)\n )\n\nlog_model.fit(X_train, y_train)\n",
"/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:762: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n"
],
[
"print('Training accuracy:', log_model.score(X_train, y_train))\nprint('Validation accuracy:', log_model.score(X_val, y_val))",
"Training accuracy: 0.6682477392936359\nValidation accuracy: 0.672911547911548\n"
],
[
"from sklearn.svm import SVC\nsvc_model = make_pipeline(\n OrdinalEncoder(),\n SVC()\n )\n\nsvc_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"print('Training accuracy:', svc_model.score(X_train, y_train))\nprint('Validation accuracy:', svc_model.score(X_val, y_val))",
"Training accuracy: 0.7907183074560655\nValidation accuracy: 0.7856265356265356\n"
],
[
"lin_model = make_pipeline(\n OrdinalEncoder(),\n LinearRegression()\n )\n\nlin_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"print('Training accuracy:', lin_model.score(X_train, y_train))\nprint('Validation accuracy:', lin_model.score(X_val, y_val))",
"Training accuracy: 0.36342428028378027\nValidation accuracy: 0.3611681263770725\n"
],
[
"modelxgb.fit(X_train, y_train)\n# make predictions for test data\ny_pred = modelxgb.predict(X_test)\n# evaluate predictions\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))",
"/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead\n elif pd.api.types.is_categorical(cols):\n"
],
[
"from sklearn.ensemble import GradientBoostingClassifier\n\nmodel_skgb = make_pipeline(\n OrdinalEncoder(),\n GradientBoostingClassifier(random_state=42)\n)\n\nmodel_skgb.fit(X_train, y_train);",
"_____no_output_____"
],
[
"print('Training accuracy:', model_skgb.score(X_train, y_train))\nprint('Validation accuracy:', model_skgb.score(X_val, y_val))",
"Training accuracy: 0.846681453676847\nValidation accuracy: 0.8384520884520884\n"
],
[
"\n# make predictions for test data\ny_pred = model_skgb.predict(X_test)\n# evaluate predictions\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))",
"Accuracy: 84.17%\n"
],
[
"X.relationship.value_counts()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nimportances = modelxgb.named_steps['xgbclassifier'].feature_importances_\n\nfeat_imp = pd.Series(importances, index=X_train.columns).sort_values()\nfeat_imp.tail(10).plot(kind='barh')\nplt.xlabel('Gini importance')\nplt.ylabel('Feature')\nplt.title('Feature importance for model_skgb');",
"_____no_output_____"
],
[
"# Using sklearn\nfrom sklearn.inspection import permutation_importance\n\nperm_imp = permutation_importance(modelxgb, X_val, y_val, n_jobs=10, random_state=42)",
"_____no_output_____"
],
[
"# Put results into DataFrame\ndata = {'importances_mean' : perm_imp['importances_mean'],\n 'importances_std' : perm_imp['importances_std']}\n\ndf = pd.DataFrame(data, index=X_val.columns)\ndf.sort_values('importances_mean', ascending=True, inplace=True)\n\n# Make plot\ndf['importances_mean'].tail(10).plot(kind='barh')\nplt.xlabel('Importance (change in accuracy)')\nplt.ylabel('Feature')\nplt.title('Permutation importance for model_xgb');",
"_____no_output_____"
],
[
"perm_imp = permutation_importance(modelxgb, X_test, y_test, n_jobs=10, random_state=42)\n\ndata = {'importances_mean' : perm_imp['importances_mean'],\n 'importances_std' : perm_imp['importances_std']}\n\npermutation_importances = pd.DataFrame(data, index=X_test.columns)\npermutation_importances.sort_values('importances_mean', ascending=True, inplace=True)\npermutation_importances",
"_____no_output_____"
],
[
"fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(12,5))\n\nplot_roc_curve(model, X_test, y_test, ax=ax1)\nplot_roc_curve(modelxgb, X_test, y_test, ax=ax2)\nax1.plot([(0,0), (1,1)], color='grey', linestyle='--')\nax2.plot([(0,0), (1,1)], color='grey', linestyle='--')\nax1.set_title('Random Forest')\nax2.set_title('XG Boost')\n\nplt.show()",
"_____no_output_____"
],
[
"%matplotlib inline\nimport seaborn as sns\nsns.distplot(y_train);",
"/Users/victoriadebebe/.local/share/virtualenvs/DS-Unit-2-Applied-Modeling-vWvUDltt/lib/python3.8/site-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n"
],
[
"#XGBoost model made without pipeline so shap graphing would not be an issue.\n\nimport category_encoders as ce\nore = ce.OrdinalEncoder()\nXTO_train = ore.fit_transform(X_train)\nXTO_val = ore.transform(X_val)\nmodelxgb2 = XGBClassifier()\n\nmodelxgb2.fit(XTO_train,y_train)",
"_____no_output_____"
],
[
"print('Training accuracy:', modelxgb2.score(XTO_train, y_train))\nprint('Validation accuracy:', modelxgb2.score(XTO_val, y_val))",
"Training accuracy: 0.882852755502474\nValidation accuracy: 0.8425470925470926\n"
],
[
"import shap\nrow2 = X_test\nshap_values = shap.TreeExplainer(modelxgb2).shap_values(XTO_train)\nshap.summary_plot(shap_values, XTO_train, plot_type=\"bar\")",
"_____no_output_____"
],
[
"import shap\nrow = XTO_val.iloc[[795]]\nexplainer = shap.TreeExplainer(modelxgb2)\nshap_values = explainer.shap_values(row)\nshap.initjs()\nshap.force_plot(\n base_value=explainer.expected_value,\n shap_values=shap_values,\n features=row)",
"_____no_output_____"
],
[
"row",
"_____no_output_____"
],
[
"model.predict(row)",
"_____no_output_____"
],
[
"row_check = y_val.iloc[[795]]\nrow_check",
"_____no_output_____"
],
[
"import pdpbox.pdp as pdp\nfrom pdpbox.pdp import pdp_isolate, pdp_plot",
"_____no_output_____"
],
[
"feature = 'makes-50K+'\n\nisolate = pdp_isolate(\n model=model,\n dataset=XTO_test, # <-- use validation data\n model_features=XTO_test.columns,\n feature=feature\n)\n\npdp_plot(isolate, feature_name=feature);",
"_____no_output_____"
],
[
"from pdpbox.pdp import pdp_interact, pdp_interact_plot\n\nfeatures = ['makes-50K+', 'over40hrs+']\n\ninteract = pdp_interact(\n model=model,\n dataset=XTO_test, # <-- use validation data\n model_features=XTO_test.columns,\n features=features\n)\n\npdp_interact_plot(interact, plot_type='contour', feature_names=features);",
"_____no_output_____"
],
[
"X_enc = ore.fit_transform(X.drop(columns=['capital-gain','capital-loss']))\nmodel.fit(X_enc,y)\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom pdpbox import pdp\nfeature = 'race'\npdp_dist = pdp.pdp_isolate(model=modelxgb2, dataset=X_enc, model_features=X_enc.columns, feature=feature)\npdp.pdp_plot(pdp_dist, feature);",
"_____no_output_____"
],
[
"features = ['occupation', 'makes-50K+']\n\ninteraction = pdp_interact(\n model=modelxgb2, \n dataset=X_enc, \n model_features=X_enc.columns, \n features=features\n)\n\npdp_interact_plot(interaction, plot_type='grid', feature_names=features);",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abe9e005897731144343cd74bcf48490b865057
| 155,078 |
ipynb
|
Jupyter Notebook
|
climate_starter.ipynb
|
juliakind/sqlalchemy-challenge
|
39d1ed333a7f8aad5b5b6423870c8609c10aa38b
|
[
"ADSL"
] | 1 |
2020-02-22T17:30:28.000Z
|
2020-02-22T17:30:28.000Z
|
climate_starter.ipynb
|
juliakind/sqlalchemy-challenge
|
39d1ed333a7f8aad5b5b6423870c8609c10aa38b
|
[
"ADSL"
] | null | null | null |
climate_starter.ipynb
|
juliakind/sqlalchemy-challenge
|
39d1ed333a7f8aad5b5b6423870c8609c10aa38b
|
[
"ADSL"
] | null | null | null | 62.080865 | 32,516 | 0.677285 |
[
[
[
"%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom sqlalchemy import inspect",
"_____no_output_____"
]
],
[
[
"# Reflect Tables into SQLAlchemy ORM",
"_____no_output_____"
]
],
[
[
"import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()",
"_____no_output_____"
],
[
"engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")",
"_____no_output_____"
],
[
"# reflecting an existing database into a new model\nBase = automap_base()\n# reflecting the tables\nBase.prepare(engine, reflect=True)",
"_____no_output_____"
],
[
"# Displaying classes\nBase.classes.keys()",
"_____no_output_____"
],
[
"# Saving data bases to variables\nMeasurement = Base.classes.measurement\nStation = Base.classes.station",
"_____no_output_____"
],
[
"# Starting session from Python to the DB\nsession = Session(engine)",
"_____no_output_____"
]
],
[
[
"# Exploratory Climate Analysis",
"_____no_output_____"
]
],
[
[
"#Getting the last date in Measurment DB\nmax_date = session.query(func.max(Measurement.date)).first()\nmax_date",
"_____no_output_____"
],
[
"# Calculating the date 1 year ago from the last data point in the database\nbegin_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)\nbegin_date",
"_____no_output_____"
],
[
"# Querying the Base tables returns results in a list\ndata = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= begin_date).order_by(Measurement.date).all()\ndata",
"_____no_output_____"
],
[
"# Getting names and types of columns in \"measurement\" data set\ninspector = inspect(engine)\ncolumns = inspector.get_columns(\"measurement\")\nfor column in columns:\n print(column[\"name\"], column[\"type\"])",
"id INTEGER\nstation TEXT\ndate TEXT\nprcp FLOAT\ntobs FLOAT\n"
],
[
"# Getting names and types of columns in \"station\" data set\ninspector = inspect(engine)\ncolumns = inspector.get_columns(\"station\")\nfor column in columns:\n print(column[\"name\"], column[\"type\"])",
"id INTEGER\nstation TEXT\nname TEXT\nlatitude FLOAT\nlongitude FLOAT\nelevation FLOAT\n"
],
[
"# Save the query results as a Pandas DataFrame and setting the index to the date column\nprecip_df = pd.DataFrame(data, columns=[\"Date\", \"Precipitation\"])\nprecip_df[\"Date\"] = pd.to_datetime(precip_df[\"Date\"])\n#Resettinng index to Date column\nprecip_df = precip_df.set_index(\"Date\")\n#Dropping all N/As\nprecip_df = precip_df.dropna(how = \"any\")\n#Sorting by Date colummn - ascending\nprecip_df = precip_df.sort_values(by=\"Date\", ascending=True)\nprecip_df",
"_____no_output_____"
],
[
"# Use Pandas Plotting with Matplotlib to plot the data\nplt.figure(figsize=(10,5))\nplt.plot(precip_df, label=\"Precipitation by Date\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Precipitation(in)\")\nplt.xticks(rotation=\"45\")\nplt.legend(loc=\"upper center\")\nplt.savefig(\"Output/Precipitation_plot.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"#calcualting the summary statistics for the precipitation data\nprecip_df.describe()",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# Query to count the number of stations in \"Stations\" data\nsession.query(func.count(Station.id)).all()",
"_____no_output_____"
],
[
"# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\n\nstations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()\nstations",
"_____no_output_____"
],
[
"# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\n\nsession.query(Measurement.station, func.min(Measurement.tobs), \n func.max(Measurement.tobs), func.avg(Measurement.tobs)). filter(Measurement.station == \"USC00519281\").\\\n group_by(Measurement.station).all()\n",
"_____no_output_____"
],
[
"# Choose the station with the highest number of temperature observations.\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\n#Filtering data by date and by station\ndata_2 = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == \"USC00519281\").\\\n filter(func.strftime( Measurement.date) >= begin_date).all()\ndata_2",
"_____no_output_____"
],
[
"# Cleaning temp.data and setting index to date\ntemp_df = pd.DataFrame(data_2, columns=[\"Date\", \"Temperature\"])\ntemp_df = temp_df.sort_values(by=\"Date\", ascending=True)\ntemp_df.set_index(\"Date\", inplace=True)\ntemp_df.head()",
"_____no_output_____"
],
[
"plt.figure(figsize=[8,5])\n#Ploting the results as a histogram with 12 bins\n\nplt.hist(x=temp_df[\"Temperature\"], bins=12, label=\"tobs\")\n# Labeling figure\nplt.grid\nplt.xlabel(\"Temperature (F)\")\nplt.ylabel(\"Frequency\")\nplt.title(\"Temperature Frequency Histogram\")\nplt.legend()\n# Saving Plot\nplt.savefig(\"Output/Temp Frequency Histogram\");\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' \n# and return the minimum, average, and maximum temperatures for that range of dates\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n# function usage example\nprint(calc_temps('2011-02-28', '2011-03-05'))",
"[(61.0, 69.75510204081633, 75.0)]\n"
],
[
"# using the example to calculate min, max and average tempreture for my vacation date\n# Vacation Dates\nstart_date = \"2020-04-01\"\nend_date = \"2020-04-11\"\n\n# Previous Year Dates\nhst_start_date = \"2017-04-01\"\nhst_end_date = \"2017-04-11\"\n\n# Min,average and max temp calculation\ntemp_min = calc_temps(hst_start_date, hst_end_date)[0][0]\ntemp_avg = calc_temps(hst_start_date, hst_end_date)[0][1]\ntemp_max = calc_temps(hst_start_date, hst_end_date)[0][2]\n\nprint(temp_min, temp_avg, temp_max)",
"67.0 74.54285714285714 82.0\n"
],
[
"# Ploting the results from your previous query as a bar chart. \n# Use \"Trip Avg Temp\" as your Title\n# Use the average temperature for the y value\n# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)\nx_axis = 1\ny_axis = temp_avg\nerror = temp_max-temp_min\n\n# Defining Bar and Error paramaters\nplt.bar(x_axis, y_axis, yerr=error, align='center', color = \"r\")\nplt.tick_params(bottom=False,labelbottom=False)\n\n\n# Labeling, tickers and grids\nplt.ylabel(\"Temperature (F)\")\n\nplt.title(\"Trip Avg Temperature\")\nplt.grid(b=None, which=\"major\", axis=\"x\")\nplt.margins(1.5, 1.5)\nplt.ylim(0, 90)\nplt.savefig(\"Output/Trip Average Temperature\")\n\n#Show the Plot\nplt.show();",
"_____no_output_____"
]
],
[
[
"## Optional Challenge Assignment",
"_____no_output_____"
]
],
[
[
"# Create a query that will calculate the daily normals \n# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)\n\ndef daily_normals(date):\n \"\"\"Daily Normals.\n \n Args:\n date (str): A date string in the format '%m-%d'\n \n Returns:\n A list of tuples containing the daily normals, tmin, tavg, and tmax\n \n \"\"\"\n \n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n return session.query(*sel).filter(func.strftime(\"%m-%d\", Measurement.date) == date).all()\n \ndaily_normals(\"04-01\")",
"_____no_output_____"
],
[
"# calculate the daily normals for your trip\n# push each tuple of calculations into a list called `normals`\n\n# Seting the start and end date of the trip from historic dates\nhst_start_date # defined above \nhst_end_date\n# Useing the start and end date to create a range of dates\n\ndates = session.query(Measurement.date).filter(Measurement.date >= hst_start_date).filter(Measurement.date <= hst_end_date).group_by(Measurement.date).all()\n#saving trip dates into array \narr_dates = [x[0] for x in dates]\n# Reformating dates to mm-dd format and getting data ion a list\n\narr_dates_mm_dd= [x[5:] for x in arr_dates]\n\nstart_mmdd = arr_dates_mm_dd[0]\nend_mmdd = arr_dates_mm_dd[10]\n# Looping through the list of mm-dd and getting max,ave, min temp averages\ntemps_by_dates = [session.query(func.min(Measurement.tobs),\n func.avg(Measurement.tobs),\n func.max(Measurement.tobs)).filter(func.strftime(\"%m-%d\", Measurement.date) >= start_mmdd).filter(func.strftime(\"%m-%d\", Measurement.date) <= end_mmdd).group_by(func.strftime(\"%m-%d\", Measurement.date)).all()]\n\ntemps_by_dates = temps_by_dates[0]\n#displaying averages for each date of the trip\n\ntemps_by_dates",
"_____no_output_____"
],
[
"# reformating list of temp into Pandas DataFrame\ntemps_by_dates_df= pd.DataFrame(temps_by_dates,columns=[\"min_t\",\"avg_t\",\"max_t\"])\n\n#Adding date column\ntemps_by_dates_df[\"date\"]= arr_dates_mm_dd\n\n# Seting index to date\ntemps_by_dates_df.set_index(\"date\",inplace=True)\n\ntemps_by_dates_df",
"_____no_output_____"
],
[
"# Ploting the daily normals as an area plot with `stacked=False`\ntemps_by_dates_df.plot(kind='area', stacked=False, x_compat=True, title=\"Daily Normals for Trip Dates\")\nplt.xticks(rotation=\"45\")\nplt.savefig((\"Output/Temp Frequency\"))\nplt.show()\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4abe9fa196278948f2656b7825d66b7bdf967020
| 4,501 |
ipynb
|
Jupyter Notebook
|
PythonProgramming.net/DeepLearningBasics/02-load_data_tf/load_data_tf.ipynb
|
dloperab/TensorFlow
|
5e13ceaf793501eb01c2b22859211c75529c054b
|
[
"MIT"
] | 1 |
2019-04-12T23:59:54.000Z
|
2019-04-12T23:59:54.000Z
|
PythonProgramming.net/DeepLearningBasics/02-load_data_tf/load_data_tf.ipynb
|
dloperab/TensorFlow
|
5e13ceaf793501eb01c2b22859211c75529c054b
|
[
"MIT"
] | null | null | null |
PythonProgramming.net/DeepLearningBasics/02-load_data_tf/load_data_tf.ipynb
|
dloperab/TensorFlow
|
5e13ceaf793501eb01c2b22859211c75529c054b
|
[
"MIT"
] | null | null | null | 24.730769 | 178 | 0.469451 |
[
[
[
"# import necessary packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nfrom tqdm import tqdm\nimport random\nimport pickle",
"_____no_output_____"
],
[
"# declare constants\nDATA_DIR = \"D:\\Dev\\Repos\\AI\\TensorFlow\\datasets\\PetImages\"\nCATEGORIES = [\"Dog\", \"Cat\"]\nIMG_SIZE = 50",
"_____no_output_____"
],
[
"# create training data\ntraining_data = []\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATA_DIR, category) # create path to dogs and cats dataset \n class_num = CATEGORIES.index(category) # get the classification (0 or 1). 0=dog 1=cat\n \n # iterate over each image per dogs and cats\n for img in tqdm(os.listdir(path)):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([new_array, class_num])\n except Exception as e: # in the interest in keeping the output clean...\n pass #print(\"[ERROR] Exception = {}\".format(e))\n\ncreate_training_data()\n\nprint(len(training_data))",
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12501/12501 [00:21<00:00, 593.96it/s]\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12501/12501 [00:17<00:00, 709.90it/s]\n"
],
[
"# shuffle data\nrandom.shuffle(training_data)\n\nfor sample in training_data[:10]:\n print(sample[1])",
"0\n1\n0\n1\n0\n0\n1\n1\n1\n0\n"
],
[
"# get features and label arrays\ntrainX = []\ntrainY = []\n\nfor features, label in training_data:\n trainX.append(features)\n trainY.append(label)\n\n# reshape data\ntrainX = np.array(trainX).reshape(-1, IMG_SIZE, IMG_SIZE, 1)",
"_____no_output_____"
],
[
"# save the data with pickle, so don't need to keep calculating it every time\npickle_out = open(\"trainX.pickle\", \"wb\")\npickle.dump(trainX, pickle_out)\npickle_out.close()\n\npickle_out = open(\"trainY.pickle\", \"wb\")\npickle.dump(trainY, pickle_out)\npickle_out.close()",
"_____no_output_____"
],
[
"# load data saved with pickle\npickle_in = open(\"trainY.pickle\", \"rb\")\ny = pickle.load(pickle_in)\nprint(y[:10])",
"[0, 1, 0, 1, 0, 0, 1, 1, 1, 0]\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abea4324cb68d3233bc795cbd4bc1e5b4e4e63f
| 104,226 |
ipynb
|
Jupyter Notebook
|
21_Dropout/21_Dropout.ipynb
|
zhengyul9/lecture
|
905b93ba713f8467887fe8de5a44a3d8a7cae45c
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
21_Dropout/21_Dropout.ipynb
|
zhengyul9/lecture
|
905b93ba713f8467887fe8de5a44a3d8a7cae45c
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
21_Dropout/21_Dropout.ipynb
|
zhengyul9/lecture
|
905b93ba713f8467887fe8de5a44a3d8a7cae45c
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null | 304.754386 | 53,432 | 0.913947 |
[
[
[
"# Ensemble Learning\n\n* The basic idea of ensemble learning is to have multiple learning algorithms for the same problem and combine their results to make a final prediction\n\n* There are multiple types on ensemble learning. Common approaches include:\n * Boosting \n * Bagging/Bootstrapping\n * Random Forests\n * Mixture of Experts\n \n## Boosting and Bagging\n\n* When you have one data set, usually you may train an algorithm and learn a single set of parameters. However, when we do this, we have no idea how stable/variable those parameters that we estimated are. \n* Bootstrapping can show us the variation in estimated parameter values given a particular data set. Sometimes, it can also help to improve our predictions. \n* Essentially, to perform bootstrapping, you sample from your data set *with replacement* and train your algorithm to estimate the parameters with each sampled subset. You can then look at how much the parameters vary with each sampled subset and you can also combine your estimates from each trained method by averaging over all of the results for regression:\n\\begin{equation}\ny_{com}(\\mathbf{x}) = \\frac{1}{M} \\sum_{m=1}^M y_m(\\mathbf{x})\n\\end{equation}\n* You can aggregate results over all your bootstrap samples using majority vote for classification.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math \nimport textwrap\n%matplotlib inline\n\n\ndef generateRandData(N, l, u, gVar):\n\t'''generateRandData(N, l, u, gVar): Generate N uniformly random data points in the range [l,u) with zero-mean Gaussian random noise with variance gVar'''\n\tx = np.random.uniform(l,u,N)\n\te = np.random.normal(0,gVar,N)\n\tt = np.sin(2*math.pi*x) + e\n\treturn x,t\n\ndef fitdataReg(x,t,M,la):\n\t'''fitdata(x,t,M): Fit a polynomial of order M to the data (x,t)'''\t\n\tX = np.array([x**m for m in range(M+1)]).T\n\tw = np.linalg.inv(X.T@X+(la*np.identity(M+1)))@X.T@t\n\treturn w\n\ndef plotPoly(x,t,xrange, y, esty, subplotloc,la=0):\n\t#plot everything\n\tplt.subplot(*subplotloc) #identify the subplot to use\n\t# plt.tight_layout()\n\tplt.ylim([-2,2])\n\tp1 = plt.plot(xrange, y, 'g') #plot true value\n\tp2 = plt.plot(x, t, 'bo') #plot training data\n\tp3 = plt.plot(xrange, esty, 'r') #plot estimated value\n\n\t#add title, legend and axes labels\n\tplt.ylabel('t') #label x and y axes\n\tplt.xlabel('x')\n\ndef bootstrapRegression(M, numData,percentSample,numSamples):\n \n #generate data\n\tx,t = generateRandData(numData,0,1,1) \n\tnumDataSamples = round(percentSample*numData)\n\tsubplotloc = [2, round(numSamples/2), 1]\n\tfig = plt.figure()\n\txrange = np.arange(0.05,.95,0.001) #get equally spaced points in the xrange\n\testy = np.empty([numSamples, xrange.shape[0]])\n \n\tfor iter in range(numSamples):\n #select a random subset of the data\n\t\trp = np.random.permutation(numData)\n\t\tx_sub = x[rp[0:numDataSamples-1]]\n\t\tt_sub = t[rp[0:numDataSamples-1]]\n \n #fit the random subset\n\t\tw = fitdataReg(x_sub,t_sub,M,0)\n \n #plot results\n\t\tsubplotloc[2] = iter+1\n\t\ty = np.sin(2*math.pi*xrange) #compute the true function value\n\t\tX = np.array([xrange**m for m in range(w.shape[0])]).T\n\t\testy[iter,:] = X@w #compute the predicted value\n\t\tplotPoly(x_sub,t_sub,xrange,y,esty[iter,:],subplotloc)\n \n \n #combine the bootstrapped results\n\tcomy = esty.mean(0)\n\tyerr = esty.var(0)\n\n # compare to full data set\n\tfig = plt.figure()\n\tplotPoly(x,t,xrange,y,comy,[1, 1, 1])\n\tplt.errorbar(xrange, comy, yerr=yerr, fmt='r.',ms=10,errorevery=10)\n\n\tfig = plt.figure()\n\tw = fitdataReg(x,t,M,0)\n\ty = np.sin(2*math.pi*xrange) #compute the true function value\n\tX = np.array([xrange**m for m in range(w.shape[0])]).T\n\tyy = X@w #compute the predicted value\n\tplotPoly(x,t,xrange,y,yy, [1, 1, 1])\n \n\n\n#Figure 1.7 from text\nbootstrapRegression(5, 50,.75,20)\n ",
"_____no_output_____"
]
],
[
[
"# Boosting: AdaBoost\n\n* Goal: Combine base (``weak'') classifiers to form a committee whose performance is better than any of the single base classifiers. \n* The base classifiers are trained in sequence (not in parallel like in bootstrapping)\n* Each base classifier is trained using a weighted data set (different weights for each base classifier)\n* Points that are misclassified by a base classifier are weighted more heavily while training the next base classifier\n\n* Consider a two class classification problem with $\\mathbf{X} = \\left\\{ \\mathbf{x}_1, \\mathbf{x}_2, \\ldots, \\mathbf{x}_n\\right\\}$ with corresponding labels $y_i \\in \\left\\{ -1,1\\right\\}$.\n* The goal is to construct a classifier of the form: \n\\begin{equation}\nf(\\mathbf{x}) = sign(F(\\mathbf{x}))\n\\end{equation}\nwhere\n\\begin{equation}\nF(\\mathbf{x}) = \\sum_{k=1}^K \\frac{1}{2}\\alpha_k \\phi(\\mathbf{x}; \\theta_k)\n\\end{equation}\nwhere $\\phi(\\mathbf{x}; \\theta_k)$ is the base classifier. \n",
"_____no_output_____"
],
[
"* We need to determine the parameter values for each base classifier:\n\\begin{eqnarray}\n\\arg \\min_{\\alpha_k, \\theta_k} \\sum_{i=1}^N \\exp\\left(-y_i F(\\mathbf{x}_i) \\right)\n\\end{eqnarray}\n* This cost function penalizes the samples that are incorrectly classified ($y_iF(\\mathbf{x}_i) < 0$) heavily \n* Direct optimization of all $\\alpha$s and $\\theta$s is difficult. So, we iteratively optimize (which is sub-optimal). At each stage, we train one base classifier holding fixed all those that have already been trained. \n",
"_____no_output_____"
],
[
"* Let:\n\\begin{eqnarray}\nF_m(\\mathbf{x}) &=& \\sum_{k=1}^m \\frac{1}{2}\\alpha_k \\phi(\\mathbf{x}; \\theta_k)\\\\\n&=& F_{m-1}(\\mathbf{x}) + \\frac{1}{2}\\alpha_m \\phi(\\mathbf{x}; \\theta_m)\n\\end{eqnarray}\n* At step $m$, we optimize for $\\alpha_m$ and $\\theta_m$ where $F_{m-1}(\\mathbf{x})$ is fixed:\n\\begin{eqnarray}\n(\\alpha_m, \\theta_m) &=& \\arg \\min_{\\alpha, \\theta} J(\\alpha, \\theta)\\\\\n&=& \\arg \\min_{\\alpha, \\theta} \\sum_{i=1}^N \\exp\\left( -y_i\\left( F_{m-1}(\\mathbf{x}_i) +\\frac{1}{2} \\alpha\\phi(\\mathbf{x}_i; \\theta)\\right)\\right)\n\\end{eqnarray}\n* So, let's optimize this in two steps: first $\\theta_m$ and then $\\alpha_m$\n\\begin{eqnarray}\n\\theta_m &=& \\arg \\min_{\\theta} \\sum_{i=1}^N \\exp\\left( -y_i\\left( F_{m-1}(\\mathbf{x}_i) + \\frac{1}{2}\\alpha\\phi(\\mathbf{x}_i; \\theta)\\right)\\right)\\\\\n&=& \\arg \\min_{\\theta} \\sum_{i=1}^N w_i^{(m)} \\exp\\left( -\\frac{1}{2}y_i\\alpha\\phi(\\mathbf{x}_i; \\theta)\\right)\n\\end{eqnarray}\nwhere\n\\begin{equation}\nw_i^{(m)} = \\exp\\left(-y_iF_{m-1}(\\mathbf{x}_i)\\right)\n\\end{equation}\n\n",
"_____no_output_____"
],
[
"* This can be re-written as: \n\n\\begin{eqnarray}\n\\theta_m &=& \\arg \\min_{\\theta} \\exp\\left(-\\alpha_m/2\\right)\\sum_{n \\in T_m}w_n^{(m)} + \\exp\\left(\\alpha_m/2\\right)\\sum_{n \\in M_m}w_n^{(m)} \\nonumber \\\\\n&=& \\left( \\exp\\left(\\alpha_m/2\\right) - \\exp\\left(-\\alpha_m/2\\right)\\right)\\sum_{i=1}^Nw_i^{(m)} I(\\phi_m(\\mathbf{x}_i;\\theta) \\ne y_i) + \\exp\\left(-\\alpha_m/2\\right)\\sum_{i=1}^Nw_i^{(m)}\n\\end{eqnarray}\n\n* This is equivalent to minimizing\n\n\\begin{equation}\n\\arg \\min_{\\theta} \\sum_{i=1}^N w_i^{(m)} I(\\phi_m(\\mathbf{x}_i;\\theta) \\ne y_i)\n\\end{equation}\n\n",
"_____no_output_____"
],
[
"\n* Once we have the optimal classifier at step $m$ (i.e., $\\theta_m$), then we determine the $\\alpha_m$ values\n\\begin{eqnarray}\n\\sum_{y_i\\phi(\\mathbf{x}_i;\\theta_m)<0}w_i^{(m)} = P_m\\\\\n\\sum_{y_i\\phi(\\mathbf{x}_i;\\theta_m)>0}w_i^{(m)} = 1 - P_m\n\\end{eqnarray}\n* Plugging this into J, we get:\n\\begin{eqnarray}\n\\alpha_m = \\arg\\min_{\\alpha} \\left\\{ \\exp(-\\alpha)(1-P_m) + \\exp(\\alpha)P_m\\right\\}\n\\end{eqnarray}\n* Take the derivative with respect to $\\alpha$, set to zero, we get: \n\\begin{equation}\n\\alpha_m = \\frac{1}{2}\\ln\\frac{1-P_m}{P_m}\n\\end{equation}\n* Once you get $\\theta_m$ and $\\alpha_m$, you compute the weights for the next step:\n\\begin{equation}\nw_i^{(m+1)} = \\frac{\\exp(-y_iF_m(\\mathbf{x}_i))}{Z_m} = \\frac{\\exp(-y_i\\alpha_m\\phi(\\mathbf{x}_i;\\theta_m))}{Z_m} \n\\end{equation}\nwhere\n\\begin{equation}\nZ_m = \\sum_{i=1}^N w_i^{(m)}\\exp\\left(-y_i\\alpha_m\\phi(\\mathbf{x}_i;\\phi_m)\\right)\n\\end{equation}\n* Notice that the weight corresponding to a sample is increased (or decreased) with respect to its value in the previous iteration\n* Notice that the amount of increase or decrease depends on $\\alpha_m$ which controls the relative importance of the $m^{th}$ term in building up the final classifier\n\n\n",
"_____no_output_____"
],
[
"## Random Forests\n* A forest is made up of many trees...\n* For classification/regression, put an input vector down each of the trees in the forest. For classification, classify the data point using majority vote. For regression, average the values\n* Each tree is grown using:\n * Sample $N$ data points (with replacement, i.e., a bootstrap sample) from the full training data set\n * Specify a number $d << D$. $d$ variables are selected at random out of all $D$ features to determine the split on the node. Select the best of the $d$ features to split at that node\n * Grow each tree as much as possible (i.e., no pruning or stopping early)\n\n* Error relates to correlation between the trees. Greater correlation leads to greater error. *Does this make sense?*\n* Error also relates to the strength of each individual tree. Better individual trees lead to lower error\n* https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n",
"_____no_output_____"
],
[
"# Dropout\n\n* This is a method to help prevent overfitting and regularize a network.\n* The approach attempts to minimize co-dependencies between neurons and enhance robustness of network\n* Dropout has one parameter $p$. In each iteration, you randomly exclude each neuron with probability $1-p$ during the training pass (in both forward and backward propagation). Each iteration, you resample which neurons to keep and which to dropout. \n* Dropout is related to the concept of ensemble learning with the unique case that the various models in the ensemble share parameters and these models are \"combined\" into a single model/network at test as opposed to training a fusion model or doing a simple average between outputs. \n* During test, you use all neurons all the time. \n* Please see and read: https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4abea4a9c9810306f38afc984a2714d5b8e7068b
| 161,619 |
ipynb
|
Jupyter Notebook
|
tutorial.ipynb
|
marcsingleton/pymix
|
8270ebdb9269bc802dc22d6112006280147fce88
|
[
"MIT"
] | null | null | null |
tutorial.ipynb
|
marcsingleton/pymix
|
8270ebdb9269bc802dc22d6112006280147fce88
|
[
"MIT"
] | null | null | null |
tutorial.ipynb
|
marcsingleton/pymix
|
8270ebdb9269bc802dc22d6112006280147fce88
|
[
"MIT"
] | null | null | null | 197.57824 | 19,564 | 0.904361 |
[
[
[
"# MixMod Tutorial\n\nWelcome to the MixMod tutorial! Here we'll go over the basic functionality of MixMod. It's a small package, so the explanation of the MixtureModel class will be brief and will largely focus on formatting the inputs correctly. (Mixture models are relatively parameter-rich, so the syntax for properly specifying all the components can be a little verbose!) The first portion of this tutorial is a brief introduction to mixture models, their use cases, and why parameter inference is a hard problem, so feel free to skip to the MixMod Class section if you're already familiar with mixture model theory.\n\n## Mixture Model Theory\n\n### What are mixture models and what are they good for?\n\nUnlike in introductory statistics courses where the data are typically clean examples of a single distribution, real data are messy. They contain outliers, missing values, and may represent the result of multiple overlapping random processes. One common example is a \"bimodal\" distribution of exam scores where there are two groups of students, those who understood the material and those who didn't. As an instructor, we'll likely want to calculate the means within groups and give students different instruction depending on whether we think they understood the previous material. In other words, we want to 1) understand the statistical properties of each group and 2) assign observations to these groups. More formally, these two goals are parameter estimation and class inference, and they are the major applications of mixture models. If the observations were labeled with their classes, these calculations would be trivial. The challenge is class labels are typically hidden in real-world data, so the observations from different classes are jumbled together. In most cases, class labels don't even exist since the mixture model is a statistical formalism rather than an accurate representation of the underlying data generation process. (See also \"All models are wrong, but some are useful.\")\n\n### A formal definition\n\nLet's now give a more formal definition of mixture models (which is adapted from [Wikipedia](https://en.wikipedia.org/wiki/Mixture_model)). A mixture model consists of the following components:\n\n- A set of *K* mixture components, each of which is a probability distribution.\n- A set of *K* parameters, each specifying the parameters of its corresponding mixture component. In many cases, each \"parameter\" is actually a set of parameters. For example, if the mixture components are normal distributions, each component will have a mean and variance.\n- A set of *K* mixture weights, which are probabilities that sum to 1.\n\nThe probability density function for a mixture model evaluated at $x_i$ is given by:\n\n$$ f(x_i) = \\sum_{k=1}^K \\phi_k f_k(x_i; \\theta_k) $$\n\nwhere $K$ is number of components, $\\phi_k$ is the weight, $f_k$ is the pdf, and $\\theta_k$ is the parameter set of each component.\n\n\n\nThe above equation applies to a mixture model for an observation with an unknown class label. If the class label, $z_i$, is known, then the density function is given by:\n\n$$ f(x_i) = \\sum_{k=1}^K \\delta_{kz_i} \\phi_k f_k(x_i; \\theta_k) $$\n\nwhere $\\delta_{ij}$ is the Kronecker delta function.\n\n\nSince $\\delta_{ij} = 0$ when $i \\ne j$, this equation reduces to the distribution corresponding to the class of the observation.\n\n### Fitting mixture models\n\nIf the class labels are known, then some algebra using the above equation will show the overall likelihood for the data is maximized when the component likelihoods are maximized for the data corresponding to that component. This checks out intuitively. If we knew the class labels, then we could treat the components separately and choose the best parameters for each using only the observations from that component.\n\nWhen the class labels are not known, parameter inference is a different beast entirely. The problem is a little like a chicken or egg question. If we knew the class labels, then we could easily infer the component parameters. If we knew the component parameters, then we could infer the class labels (and in turn use those labels to infer the component parameters). This is very nearly estimation-maximization (EM), the algorithm that yields parameter estimates for statistical models with unobserved variables (like the class labels in mixture models). The basic idea is by alternating between assigning class labels to observations using the current parameter estimates and then using those class assignments to update the parameters, the parameters will eventually converge to a local maximum of the likelihood function. The actual procedure is a little more subtle than making hard class assignments for each observation, but the basic idea is very similar.\n\nThe EM algorithm is highly flexible, so it is possible to implement the procedure for a generic mixture model. However, such an implementation would necessarily rely on general purpose numerical optimization routines, which can be somewhat finicky to use in practice. Thus, for both efficiency and robustness, this package limits the distributions to those where the EM equations are explicitly solved. More details are available in the section \"Creating mixtures of other distributions.\"\n\n## The MixtureModel Class\n\n### Importing the package and generating data\n\nWith all that out of the way, let's introduce the MixMod package! First we need to import it and some supporting libraries.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport mixmod\nimport numpy as np\nimport scipy.stats as stats",
"_____no_output_____"
]
],
[
[
"Now let's generate some data. We'll start with a simple mixture of two normal distributions. In the SciPy stats implementation, the mean and standard deviation are specified with the `loc` and `scale` parameters, respectively. This is standard practice within this module as well as in statistics more broadly. Distributions are often characterized by different, but related, parameters depending on the context. However, most of these can be expressed in a standard form as either a location or scale parameter. Location parameters shift the position of the distribution whereas scale parameters control the spread. Both of these have formal mathematical definitions which define these ideas precisely. The practical take-away, however, is the SciPy implementations of distributions express all location and scale parameters in their standard forms. These forms may differ from the conventional parametrizations, so be sure to read the documentation for each distribution thoroughly.",
"_____no_output_____"
]
],
[
[
"rvs0 = stats.norm.rvs(loc=1, scale=1.25, size=400)\nrvs1 = stats.norm.rvs(loc=5, scale=0.75, size=100)\nrvs = np.concatenate([rvs0, rvs1])",
"_____no_output_____"
]
],
[
[
"We can visualize these distributions separately. We'll manually set the bins, so the two histograms are drawn on the same intervals.",
"_____no_output_____"
]
],
[
[
"bins = np.linspace(rvs.min(), rvs.max(), num=50)\nplt.hist(rvs0, bins=bins, color='C0')\nplt.hist(rvs1, bins=bins, color='C1');",
"_____no_output_____"
]
],
[
[
"Usually, however, the observations from the two components are mixed together.",
"_____no_output_____"
]
],
[
[
"plt.hist(rvs, bins=bins, facecolor='white', edgecolor='black');",
"_____no_output_____"
]
],
[
[
"Clearly the overall distribution is bimodal, but the division between the two components isn't immediately obvious, even in a simple case like this. Let's now use a MixtureModel to try to extract the parameters.\n\n### Instantiating a MixtureModel and plotting its pdf\n\nThe easiest way of instantiating a MixtureModel is by simply passing a list of SciPy stats distributions.",
"_____no_output_____"
]
],
[
[
"mixture = mixmod.MixtureModel([stats.norm, stats.norm])\nmixture",
"_____no_output_____"
]
],
[
[
"This is the minimal amount of information needed, so most of the attributes of the instance are currently empty. Notice, however, the weights were set uniformly across components by default. Let's now make this mixture model more interesting by giving it some better initial parameters. It's not necessary to specify all the parameters for each component. Any parameters not defined in the `params` or `params_fix` dicts will use the default values specified by the distribution.",
"_____no_output_____"
]
],
[
[
"mixture = mixmod.MixtureModel([stats.norm, stats.norm],\n params=[{'loc': 1}, {'loc': 5}],\n weights=[0.6, 0.4])\nmixture",
"_____no_output_____"
]
],
[
[
"Let's look at how well the density function matches the histogram.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.pdf(x)\n\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nplt.plot(x, y, color='black');",
"_____no_output_____"
]
],
[
[
"We can also extract the pdfs of the individual components and plot them separately.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.pdf(x, component='all')\n\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nplt.plot(x, y[0], label='component 0', color='C0')\nplt.plot(x, y[1], label='component 1', color='C1')\nplt.legend(frameon=False);",
"_____no_output_____"
]
],
[
[
"### Fitting a MixtureModel\n\nOur initial parameters aren't bad, but let's see if we can do a little better. Let's call `fit` on our data to optimize the parameters.",
"_____no_output_____"
]
],
[
[
"mixture.fit(rvs)\nmixture",
"_____no_output_____"
]
],
[
[
"These new parameters look closer to their true values. You can also see each component has a `scale` parameter in its `params` dict now since they are now estimated from the data and not using the default values. Let's see if the pdfs match the histograms better.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.pdf(x, component='all')\n\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nplt.plot(x, y[0], label='component 0', color='C0')\nplt.plot(x, y[1], label='component 1', color='C1')\nplt.legend(frameon=False);",
"_____no_output_____"
]
],
[
[
"### Fitting a MixtureModel with fixed parameters\n\nOne downside of this approach is all the parameters associated with each component are fit to the data. In some cases, we might have existing estimates for certain parameters that we want to stay constant. We can communicate this information to a `MixtureModel` by passing these parameters in the `params_fix` dicts. For example, let's say we're confident the `loc` parameter of the second component is 5, but we're unsure about the remaining parameters.",
"_____no_output_____"
]
],
[
[
"mixture = mixmod.MixtureModel([stats.norm, stats.norm],\n params_fix=[{}, {'loc': 5}])\nmixture",
"_____no_output_____"
]
],
[
[
"Notice that an empty dict is supplied for the first component, so the correspondence between components and dicts is unambiguous.\n\nWhen we plot the pdfs of the components, we can see they use their default parameters (`loc=1`, `scale=1`) for any parameters not given in `params` or `params_fix`.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.pdf(x, component='all')\n\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nplt.plot(x, y[0], label='component 0', color='C0')\nplt.plot(x, y[1], label='component 1', color='C1')\nplt.legend(frameon=False);",
"_____no_output_____"
]
],
[
[
"Now let's fit the free parameters.",
"_____no_output_____"
]
],
[
[
"mixture.fit(rvs)\nmixture",
"_____no_output_____"
]
],
[
[
"As expected, the `loc` parameter of the second component has remained fixed at 5.",
"_____no_output_____"
],
[
"### Predicting class labels\n\nLet's now address the second major task of mixture models: inference of class labels. The `posterior` method returns a distribution across components for each observation.",
"_____no_output_____"
]
],
[
[
"posterior = mixture.posterior(rvs)\nposterior.shape",
"_____no_output_____"
]
],
[
[
"Let's look at an individual observation and its posterior distribution.",
"_____no_output_____"
]
],
[
[
"print(rvs[0])\nprint(posterior[:, 0])",
"0.7289686797878553\n[9.99999856e-01 1.43778851e-07]\n"
]
],
[
[
"This isn't the most intuitive way of visualizing the output, so let's try to plot it a few different ways.\n\nWe can first plot the posterior probability of a class by its position along the x-axis as a line graph.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.posterior(x)\n\nfig, ax1 = plt.subplots()\nax2 = ax1.twinx()\n\nax1.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nax2.plot(x, y[0], color='C0', label='component 0')\nax2.plot(x, y[1], color='C1', label='component 1')\nax1.set_ylabel('Density')\nax2.set_ylabel('Posterior probability')\nax2.legend(ncol=2, loc='upper center', bbox_to_anchor=(0.5, -0.1), frameon=False);",
"_____no_output_____"
]
],
[
[
"We can plot the same information as a heatmap.",
"_____no_output_____"
]
],
[
[
"aspect = 0.2 # Ratio of y-axis to x-axis in display units\n\nplt.imshow(y, vmin=0, vmax=1,\n aspect=aspect*(x.max() - x.min()) / y.shape[0],\n extent=[x.min(), x.max(), 0, y.shape[0]])\nplt.yticks([y + 0.5 for y in range(y.shape[0])], [f'component {y}' for y in range(y.shape[0])])\nplt.colorbar(location='bottom', orientation='horizontal');",
"_____no_output_____"
]
],
[
[
"### Creating mixtures of other distributions\n\nObviously this package wouldn't be very useful if it was limited to fitting mixture models with only two normal components. Fortunately, it can fit an arbitrary number of components. Unfortunately, these components are limited to a relatively small subset of the distributions defined in SciPy stats, as the EM equations are explicitly solved for these distributions. This makes fitting the parameters more efficient and robust than if general purpose numerical optimization algorithms were used. The cost, however, is the types of distributions available are somewhat limited.\n\nWe can view the supported distributions by examining the `mles` variable in `mixmod.estimators`. It stores the maximum-likelihood estimators for each distribution in a dictionary.",
"_____no_output_____"
]
],
[
[
"mixmod.estimators.mles.keys()",
"_____no_output_____"
]
],
[
[
"Let's now simulate a mixture of exponential, gamma, and normal components and fit a mixture model!",
"_____no_output_____"
]
],
[
[
"rvs0 = stats.expon.rvs(scale=0.5, size=100)\nrvs1 = stats.gamma.rvs(a=4, scale=2, size=300)\nrvs2 = stats.norm.rvs(loc=15, scale=0.75, size=200)\nrvs = np.concatenate([rvs0, rvs1, rvs2])\n\nbins = np.linspace(rvs.min(), rvs.max(), num=50)\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black');",
"_____no_output_____"
],
[
"mixture = mixmod.MixtureModel([stats.expon, stats.gamma, stats.norm])\nmixture",
"_____no_output_____"
],
[
"mixture.fit(rvs)\nmixture",
"_____no_output_____"
],
[
"x = np.linspace(rvs.min(), rvs.max(), 100)\ny = mixture.pdf(x, component='all')\n\nplt.hist(rvs, bins=bins, density=True, facecolor='white', edgecolor='black')\nplt.plot(x, y[0], label='component 0', color='C0')\nplt.plot(x, y[1], label='component 1', color='C1')\nplt.plot(x, y[2], label='component 2', color='C2')\nplt.legend(frameon=False);",
"_____no_output_____"
]
],
[
[
"## Conclusion\n\nThis brings us to the conclusion of the tutorial. We've covered the major parts of the MixtureModel class. There are a few optional arguments and methods we haven't touched on here, but they are straightforward and explained fully in the formal documentation. If you have any questions, please don't hesitate to reach out!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4abea8da7589277fc7a95565390ad118b8138fac
| 464,737 |
ipynb
|
Jupyter Notebook
|
Compare VGG Layers.ipynb
|
pschaldenbrand/HumanoidPainter
|
75fec4ad70d079338532d7b26d8256e6ee07ca98
|
[
"MIT"
] | 7 |
2021-04-21T11:37:48.000Z
|
2022-02-25T03:42:48.000Z
|
Compare VGG Layers.ipynb
|
pschaldenbrand/HumanoidPainter
|
75fec4ad70d079338532d7b26d8256e6ee07ca98
|
[
"MIT"
] | null | null | null |
Compare VGG Layers.ipynb
|
pschaldenbrand/HumanoidPainter
|
75fec4ad70d079338532d7b26d8256e6ee07ca98
|
[
"MIT"
] | 1 |
2021-12-15T04:53:07.000Z
|
2021-12-15T04:53:07.000Z
| 4,556.245098 | 461,848 | 0.962155 |
[
[
[
"from paint import *",
"_____no_output_____"
],
[
"output_stroke_inds = [5, 20, 50, 100, 200]\n\nrenderer_fn = 'renderer_constrained.pkl'\n\nactor_fns = [\n 'pretrained_models/vgg_layers/2/actor.pkl',\n 'pretrained_models/vgg_layers/8/actor.pkl',\n 'pretrained_models/vgg_layers/12/actor.pkl',\n 'pretrained_models/vgg_layers/17/actor.pkl',\n 'pretrained_models/vgg_layers/22/actor.pkl',\n 'pretrained_models/vgg_layers/31/actor.pkl'\n ]\nlabels = ['CM L=2', 'CM L=8', 'CM L=12', 'CM L=17', 'CM L=22', 'CM L=31']\n\nfn = 'data/img_align_celeba/171447.jpg'\n\ntarget = cv2.imread(fn, cv2.IMREAD_COLOR)\n\nall_canvas_progress = []\nfor actor_fn in actor_fns:\n all_canvas_progress.append(paint(actor_fn=actor_fn, renderer_fn=renderer_fn, \\\n max_step=int(output_stroke_inds[-1]/5)+1, img=fn, \\\n div=1, discrete_colors=False)[2])\n \nn_rows = len(all_canvas_progress)\nn_cols = len(output_stroke_inds)\n\nfig, ax = plt.subplots(n_rows, n_cols+1, figsize=(2*(n_cols+1), 2*n_rows))\n\nfor i in range(n_rows):\n for j in range(n_cols):\n if j==0: ax[i, j].set_ylabel(labels[i])\n ax[i, j].imshow(all_canvas_progress[i][output_stroke_inds[j]])\n ax[i, j].set_xticks([])\n ax[i, j].set_yticks([])\n if i == 0: ax[i, j].set_title(str(output_stroke_inds[j]) + ' Strokes')\n \n ax[i, n_cols].imshow(target[...,::-1])\n ax[i, n_cols].set_xticks([])\n ax[i, n_cols].set_yticks([])\n if i == 0: ax[i, n_cols].set_title('Target')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4abec215fee9bb567ef6e202f05aa50919f8c53d
| 19,784 |
ipynb
|
Jupyter Notebook
|
uci-pharmsci/lectures/SMIRNOFF_simulations/mixture_simulations.ipynb
|
matthagy/drug-computing
|
da98dc4526269ef2557e607467a8d4c58cc24a49
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
uci-pharmsci/lectures/SMIRNOFF_simulations/mixture_simulations.ipynb
|
matthagy/drug-computing
|
da98dc4526269ef2557e607467a8d4c58cc24a49
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
uci-pharmsci/lectures/SMIRNOFF_simulations/mixture_simulations.ipynb
|
matthagy/drug-computing
|
da98dc4526269ef2557e607467a8d4c58cc24a49
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null | 34.466899 | 413 | 0.605742 |
[
[
[
"# Automated setup of mixtures\n\nWe've been working on streamlining setup of simulations of arbitrary mixtures in AMBER/GROMACS/OpenMM and others for some of our own research. I thought I'd demo this really quick so you can get a feel for it and see if you're interested in contributing. It also allows quick setup and analysis of nontrivial liquid simulations, which can be a good opportunity to try out MDTraj and other analysis tools.\n\n*Before running the below*, you will need to have followed the [getting started instructions](https://github.com/MobleyLab/drug-computing/blob/master/uci-pharmsci/getting-started.md) for this course.",
"_____no_output_____"
]
],
[
[
"from solvationtoolkit.solvated_mixtures import *\n\n#In this particular instance I'll just look at six solutes/solvent mixtures (not an all-by-all combination) which are pre-specified\n#solute names\nsolutes = ['phenol', 'toluene', 'benzene', 'methane', 'ethanol', 'naphthalene']\n#Solvent names\nsolvents = ['cyclohexane', 'cyclohexane', 'cyclohexane', 'octanol', 'octanol', 'octanol']\n\n#Number of solute/solvent molecules\nNsolu = 3\nNsolv = 100\n\n#Construct systems\nfor idx in range( len( solutes) ):\n # Define new mixture\n mixture = MixtureSystem()\n # Add solute and solvent\n mixture.addComponent(name=solutes[idx], number=Nsolu)\n mixture.addComponent(name=solvents[idx], number=Nsolv)\n # Note you can optionally specify mole fraction instead, or a mix of numbers/mole fractions, etc.\n \n # Build system, including AMBER input files (but not GROMACS)\n mixture.build(amber=True, gromacs=True)",
"/Users/dmobley/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py:2138: FutureWarning: split() requires a non-empty pattern match.\n yield pat.split(line.strip())\n/Users/dmobley/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py:2140: FutureWarning: split() requires a non-empty pattern match.\n yield pat.split(line.strip())\n"
]
],
[
[
"## Let's try and see if we can do a quick visualization of one of the systems via mdtraj just to make sure it looks right",
"_____no_output_____"
]
],
[
[
"#Import MDTraj\nimport mdtraj as md\n#Load \"trajectory\" (structures)\n#You can load from either format (SolvationToolkit generates both)\n#traj = md.load( 'data/amber/phenol_cyclohexane_3_100.inpcrd', top = 'data/amber/phenol_cyclohexane_3_100.prmtop' )\ntraj = md.load( 'data/gromacs/phenol_cyclohexane_3_100.gro')\n\n#Input viewer\nimport nglview\n\n#Set up view of structure\nview = nglview.show_mdtraj(traj)\n\n#Try some of the following to modify representations\nview.clear_representations()\nview.add_licorice('all')\nview.add_licorice('1-3', color = \"blue\") #For selection info, see http://arose.github.io/ngl/doc/#User_manual/Usage/Selection_language\nview.add_surface('1', opacity=0.3)\nview.add_surface('2, 3', color = 'red', opacity=0.3)\n\n#Show the view. Note that this needs to be the last command used to manipulate the view, i.e. if you modify the\n#representation after this, your view will be empty.\nview\n\n#VIEWER USAGE:\n# - Use your typical zoom command/gesture (i.e. pinch) to zoom in and out \n# - Click and drag to reorient\n# - Click on specific atoms/residues to find out details of what they are (and how they could be selected)",
"_____no_output_____"
]
],
[
[
"## Other possibly interesting things to try:\n* Find the average distance from phenol to phenol\n* Calculate the density or volume of the system\n* etc.\n\n(Drawing on MDTraj - see docs online)",
"_____no_output_____"
]
],
[
[
"# Use this box to try additional things",
"_____no_output_____"
]
],
[
[
"# Let's use a SMIRNOFF forcefield to parameterize the system, minimize, and run dynamics\n\n(This requires `openforcefield`, which you will have conda-installed if you've followed the getting started info.)\n\nFirst we handle imports",
"_____no_output_____"
]
],
[
[
"# Import the SMIRNOFF forcefield engine and some useful tools\nfrom openforcefield.typing.engines import smirnoff\nfrom openforcefield.typing.engines.smirnoff import ForceField\nfrom openforcefield.utils import get_data_filename, extractPositionsFromOEMol, generateTopologyFromOEMol\n\n# At this point SMIRNOFF requires oechem, though an RDKit version is in the works\nfrom openeye import oechem\n\n# We use PDBFile to get OpenMM topologies from PDB files\nfrom simtk.openmm.app import PDBFile\n\n# We'll use OpenMM for simulations/minimization\nfrom simtk import openmm, unit\nfrom simtk.openmm import app\n# MDTraj for working with trajectories; time for timing\nimport time\nimport mdtraj",
"_____no_output_____"
]
],
[
[
"## Now we handle assignment of force field parameters and generation of an OpenMM System",
"_____no_output_____"
]
],
[
[
"# Specify names of molecules that are components of the system\nmol_filenames = ['phenol', 'cyclohexane']\n\n# Load OEMols of components of system - SMIRNOFF requires OEMols of the components\n# and an OpenMM topology as input\noemols = []\nflavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield\n #input flavor to use for reading mol2 files (so that it can understand GAFF atom names)\n# Loop over molecule files and load oemols\nfor name in mol_filenames:\n mol = oechem.OEGraphMol()\n filename = 'data/monomers/'+name+'.mol2'\n ifs = oechem.oemolistream(filename)\n ifs.SetFlavor( oechem.OEFormat_MOL2, flavor)\n oechem.OEReadMolecule(ifs, mol )\n oechem.OETriposAtomNames(mol) #Right now we have GAFF atom names, which OE doesn't like; reassign\n oemols.append(mol)\n ifs.close()\n \n# Load SMIRNOFF99Frosst force field (AMBER-family force field created by Christopher Bayly)\nforcefield = ForceField(get_data_filename('forcefield/smirnoff99Frosst.ffxml'))\n\n# Get OpenMM topology for mixture of phenol and cyclohexane from where SolvationToolkit created\n# it on disk\npdbfile = PDBFile('data/packmol_boxes/phenol_cyclohexane_3_100.pdb')\n\n# Assign SMIRNOFF parameters and create system; here we'll use PME with a 1.1 nm LJ cutoff.\nsystem = forcefield.createSystem( pdbfile.topology, oemols, nonbondedMethod = smirnoff.PME, nonbondedCutoff=1.1*unit.nanometer )\n",
"_____no_output_____"
]
],
[
[
"## Finally we energy minimize and run dynamics",
"_____no_output_____"
]
],
[
[
"# Set how many steps we'll run and other run parameters\nnum_steps=10000\ntrj_freq = 100 #Trajectory output frequency\ndata_freq = 100 #Energy/data output frequency\ntemperature = 300*unit.kelvin #Temperature\ntime_step = 2.*unit.femtoseconds\nfriction = 1./unit.picosecond #Langevin friction constant\n\n# Bookkeeping -- if you run this more than once and perhaps encountered an exception, we need to make sure the reporter is closed\ntry: \n reporter.close()\nexcept: pass\n \n# Set up integrator, platform for running simulation \nintegrator = openmm.LangevinIntegrator(temperature, friction, time_step)\nplatform = openmm.Platform.getPlatformByName('Reference')\nsimulation = app.Simulation(pdbfile.topology, system, integrator)\n# Set positions, velocities\nsimulation.context.setPositions(pdbfile.positions)\nsimulation.context.setVelocitiesToTemperature(temperature)\n\n# Before doing dynamics, energy minimize (initial geometry will be strained)\nsimulation.minimizeEnergy()\n\n# Set up reporter for output\nreporter = mdtraj.reporters.HDF5Reporter('mixture.h5', trj_freq)\nsimulation.reporters=[]\nsimulation.reporters.append(reporter)\nsimulation.reporters.append(app.StateDataReporter('data.csv', data_freq, step=True, potentialEnergy=True, temperature=True, density=True))\n\n# Run the dynamics\nprint(\"Starting simulation\")\nstart = time.clock()\nsimulation.step(num_steps)\nend = time.clock()\nprint(\"Elapsed time %.2f seconds\" % (end-start))\n#netcdf_reporter.close()\nreporter.close()\nprint(\"Done!\")\n",
"Starting simulation\nElapsed time 15.69 seconds\nDone!\n"
]
],
[
[
"## Let's make a movie of our simulation",
"_____no_output_____"
]
],
[
[
"import nglview\ntraj=mdtraj.load('mixture.h5')\nview = nglview.show_mdtraj(traj)\n\n#Try some of the following to modify representations\nview.clear_representations()\nview.add_licorice('all')\nview.add_licorice('1-3', color = \"blue\") #For selection info, see http://arose.github.io/ngl/doc/#User_manual/Usage/Selection_language\nview.add_surface('1', opacity=0.3)\nview.add_surface('2, 3', color = 'red', opacity=0.3)\n\nview #Note that if you view a movie and keep it playing, your notebook will run a hair slow...",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4abec33d6f3f2f029aa3bf0802b08f63d1e310f3
| 119,960 |
ipynb
|
Jupyter Notebook
|
cifar10_resnet/influence_cifar10_resnet_mislabel_all_layers.ipynb
|
giladcohen/darkon-examples
|
29b0a880cc2ed7ac1b463e9e1e633ab521e04dc0
|
[
"Apache-2.0"
] | 16 |
2017-11-26T12:31:11.000Z
|
2019-11-15T05:51:33.000Z
|
cifar10_resnet/influence_cifar10_resnet_mislabel_all_layers.ipynb
|
giladcohen/darkon-examples
|
29b0a880cc2ed7ac1b463e9e1e633ab521e04dc0
|
[
"Apache-2.0"
] | 6 |
2018-01-10T06:52:23.000Z
|
2018-03-02T09:34:56.000Z
|
cifar10_resnet/influence_cifar10_resnet_mislabel_all_layers.ipynb
|
giladcohen/darkon-examples
|
29b0a880cc2ed7ac1b463e9e1e633ab521e04dc0
|
[
"Apache-2.0"
] | 5 |
2018-01-12T03:54:48.000Z
|
2019-03-05T00:20:44.000Z
| 237.075099 | 70,444 | 0.904527 |
[
[
[
"# Mislabel detection using influence function with all of layers on Cifar-10, ResNet\n\n### Author\n[Neosapience, Inc.](http://www.neosapience.com)\n\n### Pre-train model conditions\n---\n- made mis-label from 1 percentage dog class to horse class\n- augumentation: on\n- iteration: 80000\n- batch size: 128\n\n#### cifar-10 train dataset\n| | horse | dog | airplane | automobile | bird | cat | deer | frog | ship | truck |\n|----------:|:-----:|:----:|:--------:|:----------:|:----:|:----:|:----:|:----:|:----:|:-----:|\n| label | 5000 | **4950** | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 |\n| mis-label | **50** | | | | | | | | | |\n| total | **5050** | 4950 | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 | 5000 |\n\n\n### License\n---\nApache License 2.0\n\n### References\n---\n- Darkon Documentation: <http://darkon.io>\n- Darkon Github: <https://github.com/darkonhub/darkon>\n- Resnet code: <https://github.com/wenxinxu/resnet-in-tensorflow>\n- More examples: <https://github.com/darkonhub/darkon-examples>\n\n### Index\n- [Load results and analysis](#Load-results-and-analysis)\n- [How to use upweight influence function for mis-label](#How-to-use-upweight-influence-function-for-mis-label)",
"_____no_output_____"
],
[
"## Load results and analysis",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nscores = np.load('mislabel-result-all.npy')\nprint('num tests: {}'.format(len(scores)))\n\nbegin_mislabel_idx = 5000\nsorted_indices = np.argsort(scores)\n\nprint('dogs in helpful: {} / 100'.format(np.sum(sorted_indices[-100:] >= begin_mislabel_idx)))\nprint('mean for all: {}'.format(np.mean(scores)))\nprint('mean for horse: {}'.format(np.mean(scores[:begin_mislabel_idx])))\nprint('mean for dogs: {}'.format(np.mean(scores[begin_mislabel_idx:])))\n\nmis_label_ranking = np.where(sorted_indices >= begin_mislabel_idx)[0]\nprint('all of mis-labels: {}'.format(mis_label_ranking))\n\ntotal = scores.size\ntotal_pos = mis_label_ranking.size\ntotal_neg = total - total_pos\n\ntpr = np.zeros([total_pos])\nfpr = np.zeros([total_pos])\nfor idx in range(total_pos):\n tpr[idx] = float(total_pos - idx)\n fpr[idx] = float(total - mis_label_ranking[idx] - tpr[idx])\n\ntpr /= total_pos\nfpr /= total_neg\n\nhistogram = sorted_indices >= begin_mislabel_idx\nhistogram = histogram.reshape([10, -1])\nhistogram = np.sum(histogram, axis=1)\nacc = np.cumsum(histogram[::-1])\n\nfig, ax = plt.subplots(1, 2, figsize=(20, 10))\nax[0].set_ylabel('true positive rate')\nax[0].set_xlabel('false positive rate')\nax[0].set_ylim(0.0, 1.0)\nax[0].set_xlim(0.0, 1.0)\nax[0].grid(True)\nax[0].plot(fpr, tpr)\n\nax[1].set_ylabel('num of mis-label')\nax[1].set_xlabel('threshold')\nax[1].grid(True)\nax[1].bar(range(10), acc)\n\nplt.sca(ax[1])\nplt.xticks(range(10), ['{}~{}%'.format(p, p + 10) for p in range(0, 100, 10)])\n",
"num tests: 5050\ndogs in helpful: 6 / 100\nmean for all: 2.45772849226e-06\nmean for horse: 2.44922575777e-06\nmean for dogs: 3.30800194102e-06\nall of mis-labels: [ 951 2130 2761 3050 3084 3170 3198 3210 3231 3315 3449 3489 3547 3644 3676\n 3765 3818 3876 3896 3912 4079 4276 4300 4320 4338 4372 4380 4429 4487 4519\n 4572 4584 4695 4753 4754 4762 4802 4819 4828 4831 4876 4879 4881 4939 4955\n 4978 5001 5011 5034 5048]\n"
],
[
"fig, ax = plt.subplots(figsize=(20, 5))\nax.grid(True)\nax.plot(scores)",
"_____no_output_____"
]
],
[
[
"<br><br><br><br>\n\n## How to use upweight influence function for mis-label \n\n### Import packages",
"_____no_output_____"
]
],
[
[
"# resnet: implemented by wenxinxu\nfrom cifar10_input import *\nfrom cifar10_train import Train\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport darkon\n\n# to enable specific GPU\n%set_env CUDA_VISIBLE_DEVICES=0\n\n# cifar-10 classes\n_classes = (\n 'airplane',\n 'automobile',\n 'bird',\n 'cat',\n 'deer',\n 'dog',\n 'frog',\n 'horse',\n 'ship',\n 'truck'\n)",
"env: CUDA_VISIBLE_DEVICES=0\n"
]
],
[
[
"### Download/Extract cifar10 dataset",
"_____no_output_____"
]
],
[
[
"maybe_download_and_extract()",
"_____no_output_____"
]
],
[
[
"### Implement dataset feeder",
"_____no_output_____"
]
],
[
[
"class MyFeeder(darkon.InfluenceFeeder):\n def __init__(self):\n # load train data\n # for ihvp\n data, label = prepare_train_data(padding_size=0)\n # update some label\n label = self.make_mislabel(label)\n\n self.train_origin_data = data / 256.\n self.train_label = label\n self.train_data = whitening_image(data)\n \n self.train_batch_offset = 0\n\n def make_mislabel(self, label):\n target_class_idx = 7\n correct_indices = np.where(label == target_class_idx)[0] \n self.correct_indices = correct_indices[:]\n \n # 1% dogs to horses.\n # In the mis-label model training, I used this script to choose random dogs.\n labeled_dogs = np.where(label == 5)[0]\n np.random.shuffle(labeled_dogs)\n mislabel_indices = labeled_dogs[:int(labeled_dogs.shape[0] * 0.01)]\n label[mislabel_indices] = 7.0\n self.mislabel_indices = mislabel_indices\n\n print('target class: {}'.format(_classes[target_class_idx]))\n print(self.mislabel_indices)\n return label\n\n def test_indices(self, indices):\n return self.train_data[indices], self.train_label[indices]\n\n def train_batch(self, batch_size):\n # for recursion part\n # calculate offset\n start = self.train_batch_offset\n end = start + batch_size\n self.train_batch_offset += batch_size\n\n return self.train_data[start:end, ...], self.train_label[start:end, ...]\n\n def train_one(self, idx):\n return self.train_data[idx, ...], self.train_label[idx, ...]\n\n def reset(self):\n self.train_batch_offset = 0\n\n# to fix shuffled data\nnp.random.seed(75)\nfeeder = MyFeeder()",
"Reading images from cifar10_data/cifar-10-batches-py/data_batch_1\nReading images from cifar10_data/cifar-10-batches-py/data_batch_2\nReading images from cifar10_data/cifar-10-batches-py/data_batch_3\nReading images from cifar10_data/cifar-10-batches-py/data_batch_4\nReading images from cifar10_data/cifar-10-batches-py/data_batch_5\ntarget class: horse\n[21961 21986 27093 41046 11712 16494 24378 42274 24006 43962 35684 36899\n 28777 37099 14932 6202 18096 5135 33765 44823 6358 42089 19335 11610\n 10737 38555 43315 19835 9665 25727 13960 13911 42538 29577 42578 324\n 42384 27401 1647 34188 17670 32919 45007 29459 4203 25826 22079 31240\n 13067 17121]\n"
]
],
[
[
"### Restore pre-trained model",
"_____no_output_____"
]
],
[
[
"# tf model checkpoint\ncheck_point = 'pre-trained-mislabel/model.ckpt-79999'\n\nnet = Train()\nnet.build_train_validation_graph()\n\nsaver = tf.train.Saver(tf.global_variables())\nsess = tf.InteractiveSession()\nsaver.restore(sess, check_point)",
"INFO:tensorflow:Restoring parameters from pre-trained-mislabel/model.ckpt-79999\n"
]
],
[
[
"### Upweight influence options",
"_____no_output_____"
]
],
[
[
"approx_params = {\n 'scale': 200,\n 'num_repeats': 3,\n 'recursion_depth': 50,\n 'recursion_batch_size': 100\n}\n\n# targets\ntest_indices = list(feeder.correct_indices) + list(feeder.mislabel_indices)\nprint('num test targets: {}'.format(len(test_indices)))",
"num test targets: 5050\n"
]
],
[
[
"### Run upweight influence function",
"_____no_output_____"
]
],
[
[
"# choose all of trainable layers\ntrainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n# initialize Influence function\ninspector = darkon.Influence(\n workspace='./influence-workspace',\n feeder=feeder,\n loss_op_train=net.full_loss,\n loss_op_test=net.loss_op,\n x_placeholder=net.image_placeholder,\n y_placeholder=net.label_placeholder,\n trainable_variables=trainable_variables)\n\n\nscores = list()\nfor i, target in enumerate(test_indices):\n score = inspector.upweighting_influence(\n sess,\n [target],\n 1,\n approx_params,\n [target],\n 10000000,\n force_refresh=True\n )\n scores += list(score)\n print('done: [{}] - {}'.format(i, score))\n\nprint(scores)\nnp.save('mislabel-result-all.npy', scores)",
"_____no_output_____"
]
],
[
[
"### License\n\n---\n<pre>\nCopyright 2017 Neosapience, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n</pre>\n\n---",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4abed60550aa5cd74a1cc877cef93a6df7b0e302
| 1,004,788 |
ipynb
|
Jupyter Notebook
|
rsnapsim/interactive_notebooks/Covariances.ipynb
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | 1 |
2022-01-28T18:17:37.000Z
|
2022-01-28T18:17:37.000Z
|
rsnapsim/interactive_notebooks/Covariances.ipynb
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | null | null | null |
rsnapsim/interactive_notebooks/Covariances.ipynb
|
MunskyGroup/rSNAPsim
|
af3e496d5252e1d2e1da061277123233a5d609b4
|
[
"MIT"
] | 1 |
2020-12-02T06:36:17.000Z
|
2020-12-02T06:36:17.000Z
| 1,947.263566 | 299,120 | 0.962319 |
[
[
[
"\nimport os\nos.chdir('..')\nos.chdir('..')\nprint(os.getcwd())\nimport rsnapsim as rss\nimport numpy as np\nos.chdir('rsnapsim')\nos.chdir('interactive_notebooks')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\n\npoi_strs, poi_objs, tagged_pois,raw_seq = rss.seqmanip.open_seq_file('../gene_files/H2B_withTags.txt')\npoi = tagged_pois['1'][0] #protein object\n\npoi.tag_epitopes['T_Flag'] = [10,20,30,40,50,60,70]\npoi.tag_epitopes['T_Hemagglutinin'] = [300,330,340,350]\nplt.style.use('dark_background')\nplt.rcParams['figure.dpi'] = 120\nplt.rcParams['lines.linewidth'] = 1\nplt.rcParams['axes.linewidth'] = 1.5\nplt.rcParams['font.size'] = 15\nplt.rcParams['axes.grid'] = False\n\ncolors = ['#00ff51', '#00f7ff']\n\n\nrss.solver.protein=poi\nt = np.linspace(0,500,501)",
"C:\\Users\\willi\\Documents\\GitHub\\rSNAPsim\nimporting C++ models\nc++ models loaded successfully\nimporting C++ tRNA models\nimporting C++ models\nc++ models loaded successfully\nimporting C++ tRNA models\nc++ models loaded successfully\n"
],
[
"poi.visualize_probe(colors=['#00ff51', '#00f7ff'])\n",
"C:\\Users\\willi\\Documents\\GitHub\\rSNAPsim\\rsnapsim\\poi.py:315: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n fig.show()\n"
],
[
"sttime = time.time()\nssa_soln = rss.solver.solve_ssa(poi.kelong,t,ki=.033,n_traj=20)\nsolvetime = time.time()-sttime\nprint(ssa_soln.intensity_vec.shape)\nplt.plot(np.mean(ssa_soln.intensity_vec[0],axis=1),color='#00ff51',alpha=.8)\nplt.plot(np.mean(ssa_soln.intensity_vec[1],axis=1),color='#00f7ff',alpha=.8)\nplt.xlabel('time')\nplt.ylabel('intensity')\nprint(\"Low memory, no recording: solved in %f seconds\" % solvetime)\n",
"(2, 501, 20)\nLow memory, no recording: solved in 0.066023 seconds\n"
]
],
[
[
"## Autocovariances with individual means",
"_____no_output_____"
]
],
[
[
"acov,err_acov = rss.inta.get_autocov(ssa_soln.intensity_vec,norm='ind')\nplt.plot(np.mean(acov[0],axis=1),color=colors[0]);plt.plot(np.mean(acov[1],axis=1),color=colors[1])\nplt.plot(np.mean(acov[0],axis=1) - err_acov[0],'--',color=colors[0]);plt.plot(np.mean(acov[1],axis=1)- err_acov[1],'--',color=colors[1])\nplt.plot(np.mean(acov[0],axis=1)+ err_acov[0],'--',color=colors[0]);plt.plot(np.mean(acov[1],axis=1)+ err_acov[1],'--',color=colors[1])\nplt.plot([0,500],[0,0],'r--')\nplt.xlim([0,100])\nplt.xlabel('tau')\nplt.ylabel('G(tau)')",
"_____no_output_____"
],
[
"#normalized by G0\n\nacc,acc_err = rss.inta.get_autocorr(acov)\nn_traj = acc.shape[-1]\n\nerr_acov = 1.0/np.sqrt(n_traj)*np.std(acc,ddof=1,axis=2)\n\n\nplt.plot(np.mean(acc[0],axis=1),color=colors[0]);plt.plot(np.mean(acc[1],axis=1),color=colors[1])\n\nplt.plot(np.mean(acc[0],axis=1) - err_acov[0],'--',color=colors[0]);plt.plot(np.mean(acc[1],axis=1)- err_acov[1],'--',color=colors[1])\nplt.plot(np.mean(acc[0],axis=1)+ err_acov[0],'--',color=colors[0]);plt.plot(np.mean(acc[1],axis=1)+ err_acov[1],'--',color=colors[1])\n\nplt.plot([0,500],[0,0],'r--')\nplt.xlim([0,100])\n\nplt.xlabel('tau')\nplt.ylabel('G(tau)')",
"_____no_output_____"
]
],
[
[
"## Global means",
"_____no_output_____"
]
],
[
[
"acov,err_acov = rss.inta.get_autocov(ssa_soln.intensity_vec,norm='global')\nplt.plot(np.mean(acov[0],axis=1),color='seagreen');plt.plot(np.mean(acov[1],axis=1),color='violet')\nplt.plot(np.mean(acov[0],axis=1) - err_acov[0],'--',color='seagreen');plt.plot(np.mean(acov[1],axis=1)- err_acov[1],'--',color='violet')\nplt.plot(np.mean(acov[0],axis=1)+ err_acov[0],'--',color='seagreen');plt.plot(np.mean(acov[1],axis=1)+ err_acov[1],'--',color='violet')\nplt.plot([0,500],[0,0],'r--')\nplt.xlim([0,100])",
"_____no_output_____"
],
[
"#normalized by G0\n\nacc,acc_error = rss.inta.get_autocorr(acov,g0='G1')\nmean_acc = np.mean(acc,axis=2)\n\nplt.plot(mean_acc[0],color='seagreen');plt.plot(mean_acc[1],color='violet')\nplt.plot(np.mean(acc[0],axis=1) - acc_error[0],'--',color='seagreen');plt.plot(np.mean(acc[1],axis=1)- acc_error[1],'--',color='violet')\nplt.plot(np.mean(acc[0],axis=1)+ acc_error[0],'--',color='seagreen');plt.plot(np.mean(acc[1],axis=1)+ acc_error[1],'--',color='violet')\nplt.plot([0,500],[0,0],'r--')\nplt.xlim([0,100])",
"_____no_output_____"
]
],
[
[
"## Cross correlations",
"_____no_output_____"
]
],
[
[
"cross_corr,err_cc,inds = rss.inta.get_crosscorr(ssa_soln.intensity_vec,norm='indiv')\nplt.figure()\n\ns11_cc = np.mean(cross_corr[0],axis=1)\ns12_cc = np.mean(cross_corr[1],axis=1)\ns21_cc = np.mean(cross_corr[2],axis=1)\ns22_cc = np.mean(cross_corr[3],axis=1)\n\nplt.plot(s11_cc/s11_cc[500],color=colors[0] ); \nplt.plot(s21_cc/s21_cc[500],color='#ff00ee');\nplt.plot(s22_cc/s22_cc[500],color=colors[1]);\n\nplt.plot(s11_cc/s11_cc[500] - err_cc[0]/s11_cc[500],'--',color=colors[0] ); \nplt.plot(s11_cc/s11_cc[500] + err_cc[0]/s11_cc[500],'--',color=colors[0] ); \n\n\nplt.plot(s21_cc/s21_cc[500] - err_cc[2]/s21_cc[500] ,'--',color='#ff00ee' ); \nplt.plot(s21_cc/s21_cc[500] + err_cc[2]/s21_cc[500] ,'--',color='#ff00ee'); \n\nplt.plot(s22_cc/s22_cc[500] - s22_cc[3]/s22_cc[500],'--',color=colors[1] ); \nplt.plot(s22_cc/s22_cc[500] + s22_cc[3]/s22_cc[500],'--',color=colors[1] ); \n\nplt.plot([500,500],[0,1.1],'r--')\nplt.plot([400,600],[0,0],'r--')\n\nplt.legend(['00','10','11' ])\nplt.xlim([400,600])\nplt.xlabel('tau')\nplt.ylabel('G(tau)')",
"_____no_output_____"
]
],
[
[
"## normalization modes\n\n| norm | effect |\n| :- | :-: |\n| global | subtract all intensities by the global mean intensity before correlation |\n| individual | subtract all intensities by the trajectory mean intensity before correlation |\n| raw | do nothing, correlate the intensities as they are |\n\n\n## G0 \n\n| norm | effect |\n| :- | :-: |\n| global_max | divide correlations by the global maximum point |\n| individual_max | divide correlations by the individual trajectory maximum point |\n| global_center | divide correlations by the global average of the center point of the correlation |\n| individual_center | divide all correlations by the trajectory center point value |\n| None | do nothing, do not normalize the correlations by anything|\n",
"_____no_output_____"
]
],
[
[
"cross_corr,err_cc,inds = rss.inta.get_crosscorr(ssa_soln.intensity_vec,norm='indiv',g0='indiv_max')\nplt.figure()\n\nplt.plot(cross_corr[0], color = colors[0],alpha=.5)\nplt.plot(cross_corr[2],color = '#ff00ee',alpha=.5)\nplt.plot(cross_corr[3], color = colors[1],alpha=.5)\n\n\ns11_cc = np.mean(cross_corr[0],axis=1)\ns12_cc = np.mean(cross_corr[1],axis=1)\ns21_cc = np.mean(cross_corr[2],axis=1)\ns22_cc = np.mean(cross_corr[3],axis=1)\n\n\nplt.plot([500,500],[0,1.1],'r--')\nplt.plot([400,600],[0,0],'r--')\n\nplt.legend(['00','10','11' ])\nplt.xlim([400,600])\nplt.xlabel('tau')\nplt.ylabel('G(tau)')",
"_____no_output_____"
],
[
"cross_corr,err_cc,inds = rss.inta.get_crosscorr(ssa_soln.intensity_vec,norm='global',g0='indiv_max')\nplt.figure()\n\nplt.plot(cross_corr[0], color = colors[0],alpha=.5)\nplt.plot(cross_corr[2],color = '#ff00ee',alpha=.5)\nplt.plot(cross_corr[3], color = colors[1],alpha=.5)\n\n\ns11_cc = np.mean(cross_corr[0],axis=1)\ns12_cc = np.mean(cross_corr[1],axis=1)\ns21_cc = np.mean(cross_corr[2],axis=1)\ns22_cc = np.mean(cross_corr[3],axis=1)\n\n\nplt.plot([500,500],[0,1.1],'r--')\nplt.plot([400,600],[0,0],'r--')\n\nplt.legend(['00','10','11' ])\nplt.xlim([400,600])\nplt.xlabel('tau')\nplt.ylabel('G(tau)')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4abee967a09a86da38c9ab08372ce3abe9b26856
| 46,295 |
ipynb
|
Jupyter Notebook
|
tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
|
vilmar-hillow/tensorflow
|
7034d6b3bc3fa5a8194b75f37960378f2873f6e2
|
[
"Apache-2.0"
] | 27 |
2019-01-02T09:36:57.000Z
|
2022-02-21T06:41:51.000Z
|
tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
|
vilmar-hillow/tensorflow
|
7034d6b3bc3fa5a8194b75f37960378f2873f6e2
|
[
"Apache-2.0"
] | 3 |
2019-01-23T11:01:22.000Z
|
2022-02-24T02:53:31.000Z
|
tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
|
vilmar-hillow/tensorflow
|
7034d6b3bc3fa5a8194b75f37960378f2873f6e2
|
[
"Apache-2.0"
] | 11 |
2019-03-02T12:42:23.000Z
|
2021-02-04T12:20:10.000Z
| 54.982185 | 823 | 0.610282 |
[
[
[
"##### Copyright 2020 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Question Answer with TensorFlow Lite Model Maker",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/tutorials/model_maker_question_answer\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"The TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.\n\nThis notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used question answer model for question answer task.",
"_____no_output_____"
],
[
"# Introduction to Question Answer Task",
"_____no_output_____"
],
[
"The supported task in this library is extractive question answer task, which means given a passage and a question, the answer is the span in the passage. The image below shows an example for question answer.\n\n\n<p align=\"center\"><img src=\"https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_squad_showcase.png\" width=\"500\"></p>\n\n<p align=\"center\">\n <em>Answers are spans in the passage (image credit: <a href=\"https://rajpurkar.github.io/mlx/qa-and-squad/\">SQuAD blog</a>) </em>\n</p>\n\nAs for the model of question answer task, the inputs should be the passage and question pair that are already preprocessed, the outputs should be the start logits and end logits for each token in the passage.\nThe size of input could be set and adjusted according to the length of passage and question.",
"_____no_output_____"
],
[
"## End-to-End Overview\n",
"_____no_output_____"
],
[
"The following code snippet demonstrates how to get the model within a few lines of code. The overall process includes 5 steps: (1) choose a model, (2) load data, (3) retrain the model, (4) evaluate, and (5) export it to TensorFlow Lite format.",
"_____no_output_____"
],
[
"```python\n# Chooses a model specification that represents the model.\nspec = model_spec.get('mobilebert_qa')\n\n# Gets the training data and validation data.\ntrain_data = QuestionAnswerDataLoader.from_squad(train_data_path, spec, is_training=True)\nvalidation_data = QuestionAnswerDataLoader.from_squad(validation_data_path, spec, is_training=False)\n\n# Fine-tunes the model.\nmodel = question_answer.create(train_data, model_spec=spec)\n\n# Gets the evaluation result.\nmetric = model.evaluate(validation_data)\n\n# Exports the model to the TensorFlow Lite format in the export directory.\nmodel.export(export_dir)\n```",
"_____no_output_____"
],
[
"The following sections explain the code in more detail.",
"_____no_output_____"
],
[
"## Prerequisites\n\nTo run this example, install the required packages, including the Model Maker package from the [GitHub repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).",
"_____no_output_____"
]
],
[
[
"!pip install tflite-model-maker",
"Collecting tflite-model-maker\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/13/bc/4c23b9cb9ef612a1f48bac5543bd531665de5eab8f8231111aac067f8c30/tflite_model_maker-0.1.2-py3-none-any.whl (104kB)\n\r\u001b[K |███▏ | 10kB 28.4MB/s eta 0:00:01\r\u001b[K |██████▎ | 20kB 1.8MB/s eta 0:00:01\r\u001b[K |█████████▍ | 30kB 2.4MB/s eta 0:00:01\r\u001b[K |████████████▋ | 40kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 51kB 2.1MB/s eta 0:00:01\r\u001b[K |██████████████████▉ | 61kB 2.4MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 71kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████████████▏ | 81kB 2.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 92kB 3.1MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 102kB 3.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 112kB 3.0MB/s \n\u001b[?25hRequirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from tflite-model-maker) (0.10.0)\nCollecting tf-nightly\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/33/d4/61c47ae889b490b9c5f07f4f61bdc057c158a1a1979c375fa019d647a19e/tf_nightly-2.4.0.dev20200914-cp36-cp36m-manylinux2010_x86_64.whl (390.1MB)\n\u001b[K |████████████████████████████████| 390.2MB 43kB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.6/dist-packages (from tflite-model-maker) (1.18.5)\nRequirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from tflite-model-maker) (7.0.0)\nCollecting tf-models-nightly\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d3/e9/c4e5a451c268a5a75a27949562364f6086f6bb33b226a065a8beceefa9ba/tf_models_nightly-2.3.0.dev20200914-py2.py3-none-any.whl (993kB)\n\u001b[K |████████████████████████████████| 1.0MB 57.6MB/s \n\u001b[?25hCollecting flatbuffers==1.12\n Downloading https://files.pythonhosted.org/packages/eb/26/712e578c5f14e26ae3314c39a1bdc4eb2ec2f4ddc89b708cf8e0a0d20423/flatbuffers-1.12-py2.py3-none-any.whl\nRequirement already satisfied: tensorflow-hub>=0.8.0 in /usr/local/lib/python3.6/dist-packages (from tflite-model-maker) (0.9.0)\nCollecting fire\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/34/a7/0e22e70778aca01a52b9c899d9c145c6396d7b613719cd63db97ffa13f2f/fire-0.3.1.tar.gz (81kB)\n\u001b[K |████████████████████████████████| 81kB 11.5MB/s \n\u001b[?25hCollecting sentencepiece\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d4/a4/d0a884c4300004a78cca907a6ff9a5e9fe4f090f5d95ab341c53d28cbc58/sentencepiece-0.1.91-cp36-cp36m-manylinux1_x86_64.whl (1.1MB)\n\u001b[K |████████████████████████████████| 1.1MB 50.9MB/s \n\u001b[?25hCollecting tflite-support==0.1.0rc3.dev2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fa/c5/5e9ee3abd5b4ef8294432cd714407f49a66befa864905b66ee8bdc612795/tflite_support-0.1.0rc3.dev2-cp36-cp36m-manylinux2010_x86_64.whl (1.0MB)\n\u001b[K |████████████████████████████████| 1.0MB 50.9MB/s \n\u001b[?25hRequirement already satisfied: tensorflow-datasets>=2.1.0 in /usr/local/lib/python3.6/dist-packages (from tflite-model-maker) (2.1.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from absl-py->tflite-model-maker) (1.15.0)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (1.1.0)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (3.3.0)\nCollecting tb-nightly<3.0.0a0,>=2.4.0a0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fc/cb/4dfe0d65bffb5e9663261ff664e6f5a2d37672b31dae27a0f14721ac00d3/tb_nightly-2.4.0a20200914-py3-none-any.whl (10.1MB)\n\u001b[K |████████████████████████████████| 10.1MB 51.4MB/s \n\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4.2 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (3.7.4.3)\nRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (0.35.1)\nCollecting tf-estimator-nightly\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/bd/9a/3bfb9994eda11e426c809ebdf434e2ac5824a0784d980018bb53fd1620ec/tf_estimator_nightly-2.4.0.dev2020091401-py2.py3-none-any.whl (460kB)\n\u001b[K |████████████████████████████████| 460kB 36.0MB/s \n\u001b[?25hRequirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (0.2.0)\nRequirement already satisfied: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (2.10.0)\nRequirement already satisfied: keras-preprocessing<1.2,>=1.1.1 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (1.1.2)\nRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (1.12.1)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (1.32.0)\nRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (3.12.4)\nRequirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (0.3.3)\nRequirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tf-nightly->tflite-model-maker) (1.6.3)\nRequirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (1.4.1)\nCollecting pyyaml>=5.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz (269kB)\n\u001b[K |████████████████████████████████| 276kB 59.8MB/s \n\u001b[?25hCollecting tensorflow-model-optimization>=0.4.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/55/38/4fd48ea1bfcb0b6e36d949025200426fe9c3a8bfae029f0973d85518fa5a/tensorflow_model_optimization-0.5.0-py2.py3-none-any.whl (172kB)\n\u001b[K |████████████████████████████████| 174kB 51.0MB/s \n\u001b[?25hRequirement already satisfied: pandas>=0.22.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (1.0.5)\nRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (0.7)\nRequirement already satisfied: Cython in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (0.29.21)\nCollecting opencv-python-headless\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/b6/2a/496e06fd289c01dc21b11970be1261c87ce1cc22d5340c14b516160822a7/opencv_python_headless-4.4.0.42-cp36-cp36m-manylinux2014_x86_64.whl (36.6MB)\n\u001b[K |████████████████████████████████| 36.6MB 83kB/s \n\u001b[?25hRequirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (1.5.8)\nRequirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (2.0.2)\nRequirement already satisfied: oauth2client in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (4.1.3)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (3.2.2)\nCollecting tf-slim>=1.1.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/97/b0f4a64df018ca018cc035d44f2ef08f91e2e8aa67271f6f19633a015ff7/tf_slim-1.1.0-py2.py3-none-any.whl (352kB)\n\u001b[K |████████████████████████████████| 358kB 55.9MB/s \n\u001b[?25hCollecting seqeval\n Downloading https://files.pythonhosted.org/packages/34/91/068aca8d60ce56dd9ba4506850e876aba5e66a6f2f29aa223224b50df0de/seqeval-0.0.12.tar.gz\nRequirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (5.4.8)\nCollecting py-cpuinfo>=3.3.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f6/f5/8e6e85ce2e9f6e05040cf0d4e26f43a4718bcc4bce988b433276d4b1a5c1/py-cpuinfo-7.0.0.tar.gz (95kB)\n\u001b[K |████████████████████████████████| 102kB 13.5MB/s \n\u001b[?25hRequirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (1.7.12)\nRequirement already satisfied: gin-config in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (0.3.0)\nRequirement already satisfied: tensorflow-addons in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (0.8.3)\nRequirement already satisfied: google-cloud-bigquery>=0.31.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-nightly->tflite-model-maker) (1.21.0)\nCollecting pybind11>=2.4\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/89/e3/d576f6f02bc75bacbc3d42494e8f1d063c95617d86648dba243c2cb3963e/pybind11-2.5.0-py2.py3-none-any.whl (296kB)\n\u001b[K |████████████████████████████████| 296kB 47.9MB/s \n\u001b[?25hRequirement already satisfied: promise in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (2.3)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (0.24.0)\nRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (2.23.0)\nRequirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (0.3.2)\nRequirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (20.2.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (4.41.1)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets>=2.1.0->tflite-model-maker) (0.16.0)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (1.0.1)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (50.3.0)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (1.7.0)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (0.4.1)\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (1.17.2)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (3.2.2)\nRequirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-nightly->tflite-model-maker) (0.1.5)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.22.0->tf-models-nightly->tflite-model-maker) (2018.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.22.0->tf-models-nightly->tflite-model-maker) (2.8.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-nightly->tflite-model-maker) (2020.6.20)\nRequirement already satisfied: python-slugify in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-nightly->tflite-model-maker) (4.0.1)\nRequirement already satisfied: slugify in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-nightly->tflite-model-maker) (0.0.1)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-nightly->tflite-model-maker) (1.24.3)\nRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client->tf-models-nightly->tflite-model-maker) (0.4.8)\nRequirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client->tf-models-nightly->tflite-model-maker) (4.6)\nRequirement already satisfied: httplib2>=0.9.1 in /usr/local/lib/python3.6/dist-packages (from oauth2client->tf-models-nightly->tflite-model-maker) (0.17.4)\nRequirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client->tf-models-nightly->tflite-model-maker) (0.2.8)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->tf-models-nightly->tflite-model-maker) (1.2.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->tf-models-nightly->tflite-model-maker) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->tf-models-nightly->tflite-model-maker) (0.10.0)\nRequirement already satisfied: Keras>=2.2.4 in /usr/local/lib/python3.6/dist-packages (from seqeval->tf-models-nightly->tflite-model-maker) (2.4.3)\nRequirement already satisfied: google-auth-httplib2>=0.0.3 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-nightly->tflite-model-maker) (0.0.4)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-nightly->tflite-model-maker) (3.0.1)\nRequirement already satisfied: typeguard in /usr/local/lib/python3.6/dist-packages (from tensorflow-addons->tf-models-nightly->tflite-model-maker) (2.7.1)\nRequirement already satisfied: google-cloud-core<2.0dev,>=1.0.3 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-nightly->tflite-model-maker) (1.0.3)\nRequirement already satisfied: google-resumable-media!=0.4.0,<0.5.0dev,>=0.3.1 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-nightly->tflite-model-maker) (0.4.1)\nRequirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-metadata->tensorflow-datasets>=2.1.0->tflite-model-maker) (1.52.0)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->tensorflow-datasets>=2.1.0->tflite-model-maker) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.19.0->tensorflow-datasets>=2.1.0->tflite-model-maker) (2.10)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (1.3.0)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (4.1.1)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (1.7.0)\nRequirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.6/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-nightly->tflite-model-maker) (1.3)\nRequirement already satisfied: google-api-core<2.0.0dev,>=1.14.0 in /usr/local/lib/python3.6/dist-packages (from google-cloud-core<2.0dev,>=1.0.3->google-cloud-bigquery>=0.31.0->tf-models-nightly->tflite-model-maker) (1.16.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (3.1.0)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tb-nightly<3.0.0a0,>=2.4.0a0->tf-nightly->tflite-model-maker) (3.1.0)\nBuilding wheels for collected packages: fire, pyyaml, seqeval, py-cpuinfo\n Building wheel for fire (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for fire: filename=fire-0.3.1-py2.py3-none-any.whl size=111005 sha256=f0b82e6b31e21d6db3591478a37188c727533acefe415b16b456c85ef9bef47c\n Stored in directory: /root/.cache/pip/wheels/c1/61/df/768b03527bf006b546dce284eb4249b185669e65afc5fbb2ac\n Building wheel for pyyaml (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyyaml: filename=PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl size=44619 sha256=cdbc63ead8369d7403f47b1adff163ebde2636c9f0c2a5ebd6413d156b2b7a9f\n Stored in directory: /root/.cache/pip/wheels/a7/c1/ea/cf5bd31012e735dc1dfea3131a2d5eae7978b251083d6247bd\n Building wheel for seqeval (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for seqeval: filename=seqeval-0.0.12-cp36-none-any.whl size=7423 sha256=3ac4a1cc3b88a9b1a1ed8217f2b8d3abb7f936e853383025888b94019d98a856\n Stored in directory: /root/.cache/pip/wheels/4f/32/0a/df3b340a82583566975377d65e724895b3fad101a3fb729f68\n Building wheel for py-cpuinfo (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for py-cpuinfo: filename=py_cpuinfo-7.0.0-cp36-none-any.whl size=20071 sha256=b5491e6fcabbf9ae464c0def53ec6ec27bbf01230ff96f4e34c6a7c44d55d5c9\n Stored in directory: /root/.cache/pip/wheels/f1/93/7b/127daf0c3a5a49feb2fecd468d508067c733fba5192f726ad1\nSuccessfully built fire pyyaml seqeval py-cpuinfo\nInstalling collected packages: tb-nightly, flatbuffers, tf-estimator-nightly, tf-nightly, pyyaml, tensorflow-model-optimization, opencv-python-headless, sentencepiece, tf-slim, seqeval, py-cpuinfo, tf-models-nightly, fire, pybind11, tflite-support, tflite-model-maker\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\nSuccessfully installed fire-0.3.1 flatbuffers-1.12 opencv-python-headless-4.4.0.42 py-cpuinfo-7.0.0 pybind11-2.5.0 pyyaml-5.3.1 sentencepiece-0.1.91 seqeval-0.0.12 tb-nightly-2.4.0a20200914 tensorflow-model-optimization-0.5.0 tf-estimator-nightly-2.4.0.dev2020091401 tf-models-nightly-2.3.0.dev20200914 tf-nightly-2.4.0.dev20200914 tf-slim-1.1.0 tflite-model-maker-0.1.2 tflite-support-0.1.0rc3.dev2\n"
]
],
[
[
"Import the required packages.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport os\n\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\n\nfrom tflite_model_maker import configs\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import question_answer\nfrom tflite_model_maker import QuestionAnswerDataLoader",
"_____no_output_____"
]
],
[
[
"The \"End-to-End Overview\" demonstrates a simple end-to-end example. The following sections walk through the example step by step to show more detail.",
"_____no_output_____"
],
[
"## Choose a model_spec that represents a model for question answer\n\nEach `model_spec` object represents a specific model for question answer. The Model Maker currently supports MobileBERT and BERT-Base models.\n\nSupported Model | Name of model_spec | Model Description\n--- | --- | ---\n[MobileBERT](https://arxiv.org/pdf/2004.02984.pdf) | 'mobilebert_qa' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device scenario.\n[MobileBERT-SQuAD](https://arxiv.org/pdf/2004.02984.pdf) | 'mobilebert_qa_squad' | Same model architecture as MobileBERT model and the initial model is already retrained on [SQuAD1.1](https://rajpurkar.github.io/SQuAD-explorer/).\n[BERT-Base](https://arxiv.org/pdf/1810.04805.pdf) | 'bert_qa' | Standard BERT model that widely used in NLP tasks.\n\nIn this tutorial, [MobileBERT-SQuAD](https://arxiv.org/pdf/2004.02984.pdf) is used as an example. Since the model is already retrained on [SQuAD1.1](https://rajpurkar.github.io/SQuAD-explorer/), it could coverage faster for question answer task.\n",
"_____no_output_____"
]
],
[
[
"spec = model_spec.get('mobilebert_qa_squad')",
"_____no_output_____"
]
],
[
[
"## Load Input Data Specific to an On-device ML App and Preprocess the Data\n\nThe [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) is a reading comprehension dataset containing over 650K question-answer-evidence triples. In this tutorial, you will use a subset of this dataset to learn how to use the Model Maker library.\n\nTo load the data, convert the TriviaQA dataset to the [SQuAD1.1](https://rajpurkar.github.io/SQuAD-explorer/) format by running the [converter Python script](https://github.com/mandarjoshi90/triviaqa#miscellaneous) with `--sample_size=8000` and a set of `web` data. Modify the conversion code a little bit by:\n* Skipping the samples that couldn't find any answer in the context document;\n* Getting the original answer in the context without uppercase or lowercase.\n\nDownload the archived version of the already converted dataset.",
"_____no_output_____"
]
],
[
[
"train_data_path = tf.keras.utils.get_file(\n fname='triviaqa-web-train-8000.json',\n origin='https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-web-train-8000.json')\nvalidation_data_path = tf.keras.utils.get_file(\n fname='triviaqa-verified-web-dev.json',\n origin='https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-verified-web-dev.json')",
"Downloading data from https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-web-train-8000.json\n32571392/32570663 [==============================] - 1s 0us/step\nDownloading data from https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-verified-web-dev.json\n1171456/1167744 [==============================] - 0s 0us/step\n"
]
],
[
[
"You can also train the MobileBERT model with your own dataset. If you are running this notebook on Colab, upload your data by using the left sidebar.\n\n<img src=\"https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_question_answer.png\" alt=\"Upload File\" width=\"800\" hspace=\"100\">\n\nIf you prefer not to upload your data to the cloud, you can also run the library offline by following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).",
"_____no_output_____"
],
[
"Use the `QuestionAnswerDataLoader.from_squad` method to load and preprocess the [SQuAD format](https://rajpurkar.github.io/SQuAD-explorer/) data according to a specific `model_spec`. You can use either SQuAD2.0 or SQuAD1.1 formats. Setting parameter `version_2_with_negative` as `True` means the formats is SQuAD2.0. Otherwise, the format is SQuAD1.1. By default, `version_2_with_negative` is `False`.",
"_____no_output_____"
]
],
[
[
"train_data = QuestionAnswerDataLoader.from_squad(train_data_path, spec, is_training=True)\nvalidation_data = QuestionAnswerDataLoader.from_squad(validation_data_path, spec, is_training=False)",
"_____no_output_____"
]
],
[
[
"## Customize the TensorFlow Model\n\nCreate a custom question answer model based on the loaded data. The `create` function comprises the following steps:\n\n1. Creates the model for question answer according to `model_spec`.\n2. Train the question answer model. The default epochs and the default batch size are set according to two variables `default_training_epochs` and `default_batch_size` in the `model_spec` object.",
"_____no_output_____"
]
],
[
[
"model = question_answer.create(train_data, model_spec=spec)",
"INFO:tensorflow:Retraining the models...\n"
]
],
[
[
"Have a look at the detailed model structure.",
"_____no_output_____"
]
],
[
[
"model.summary()",
"_____no_output_____"
]
],
[
[
"## Evaluate the Customized Model\n\nEvaluate the model on the validation data and get a dict of metrics including `f1` score and `exact match` etc. Note that metrics are different for SQuAD1.1 and SQuAD2.0.",
"_____no_output_____"
]
],
[
[
"model.evaluate(validation_data)",
"_____no_output_____"
]
],
[
[
"## Export to TensorFlow Lite Model\n\nConvert the existing model to TensorFlow Lite model format that you can later use in an on-device ML application.",
"_____no_output_____"
],
[
"Since MobileBERT is too big for on-device applications, use dynamic range quantization on the model to compress MobileBERT by 4x with the minimal loss of performance. First, define the quantization configuration:",
"_____no_output_____"
]
],
[
[
"config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.OPTIMIZE_FOR_LATENCY])\nconfig._experimental_new_quantizer = True",
"_____no_output_____"
]
],
[
[
"Export the quantized TFLite model according to the quantization config and save the vocabulary to a vocab file. The default TFLite model filename is `model.tflite`, and the default vocab filename is `vocab`.",
"_____no_output_____"
]
],
[
[
"model.export(export_dir='.', quantization_config=config)",
"_____no_output_____"
]
],
[
[
"You can use the TensorFlow Lite model file and vocab file in the [bert_qa](https://github.com/tensorflow/examples/tree/master/lite/examples/bert_qa/android) reference app by downloading it from the left sidebar on Colab.",
"_____no_output_____"
],
[
"You can also evalute the tflite model with the `evaluate_tflite` method. This step is expected to take a long time.",
"_____no_output_____"
]
],
[
[
"model.evaluate_tflite('model.tflite', validation_data)",
"_____no_output_____"
]
],
[
[
"## Advanced Usage\n\nThe `create` function is the critical part of this library in which the `model_spec` parameter defines the model specification. The `BertQAModelSpec` class is currently supported. There are 2 models: MobileBERT model, BERT-Base model. The `create` function comprises the following steps:\n\n1. Creates the model for question answer according to `model_spec`.\n2. Train the question answer model.\n\nThis section describes several advanced topics, including adjusting the model, tuning the training hyperparameters etc.",
"_____no_output_____"
],
[
"### Adjust the model\n\nYou can adjust the model infrastructure like parameters `seq_len` and `query_len` in the `BertQAModelSpec` class.\n\nAdjustable parameters for model:\n\n* `seq_len`: Length of the passage to feed into the model.\n* `query_len`: Length of the question to feed into the model.\n* `doc_stride`: The stride when doing a sliding window approach to take chunks of the documents.\n* `initializer_range`: The stdev of the truncated_normal_initializer for initializing all weight matrices.\n* `trainable`: Boolean, whether pre-trained layer is trainable.\n\nAdjustable parameters for training pipeline:\n\n* `model_dir`: The location of the model checkpoint files. If not set, temporary directory will be used.\n* `dropout_rate`: The rate for dropout.\n* `learning_rate`: The initial learning rate for Adam.\n* `predict_batch_size`: Batch size for prediction.\n* `tpu`: TPU address to connect to. Only used if using tpu.\n",
"_____no_output_____"
],
[
"For example, you can train the model with a longer sequence length. If you change the model, you must first construct a new `model_spec`.",
"_____no_output_____"
]
],
[
[
"new_spec = model_spec.get('mobilebert_qa')\nnew_spec.seq_len = 512",
"_____no_output_____"
]
],
[
[
"The remaining steps are the same. Note that you must rerun both the `dataloader` and `create` parts as different model specs may have different preprocessing steps.\n",
"_____no_output_____"
],
[
"### Tune training hyperparameters\nYou can also tune the training hyperparameters like `epochs` and `batch_size` to impact the model performance. For instance,\n\n* `epochs`: more epochs could achieve better performance, but may lead to overfitting.\n* `batch_size`: number of samples to use in one training step.\n\nFor example, you can train with more epochs and with a bigger batch size like:\n\n```python\nmodel = question_answer.create(train_data, model_spec=spec, epochs=5, batch_size=64)\n```",
"_____no_output_____"
],
[
"### Change the Model Architecture\n\nYou can change the base model your data trains on by changing the `model_spec`. For example, to change to the BERT-Base model, run:\n\n```python\nspec = model_spec.get('bert_qa')\n```",
"_____no_output_____"
],
[
"The remaining steps are the same.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4abf1ba36deb007f07a3febdcfd3d561acb326cd
| 18,294 |
ipynb
|
Jupyter Notebook
|
notebooks/neural_network_with_tfds_data.ipynb
|
proteneer/jax
|
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
notebooks/neural_network_with_tfds_data.ipynb
|
proteneer/jax
|
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
notebooks/neural_network_with_tfds_data.ipynb
|
proteneer/jax
|
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2020-03-29T04:19:27.000Z
|
2020-03-29T04:19:27.000Z
| 34.003717 | 453 | 0.60938 |
[
[
[
"##### Copyright 2018 Google LLC.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"_____no_output_____"
],
[
"Licensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.",
"_____no_output_____"
],
[
"# Training a Simple Neural Network, with tensorflow/datasets Data Loading\n\n_Forked from_ `neural_network_and_data_loading.ipynb`\n\n_Dougal Maclaurin, Peter Hawkins, Matthew Johnson, Roy Frostig, Alex Wiltschko, Chris Leary_\n\n\n\nLet's combine everything we showed in the [quickstart notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/quickstart.ipynb) to train a simple neural network. We will first specify and train a simple MLP on MNIST using JAX for the computation. We will use `tensorflow/datasets` data loading API to load images and labels (because it's pretty great, and the world doesn't need yet another data loading library :P).\n\nOf course, you can use JAX with any API that is compatible with NumPy to make specifying the model a bit more plug-and-play. Here, just for explanatory purposes, we won't use any neural network libraries or special APIs for builidng our model.",
"_____no_output_____"
]
],
[
[
"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\.//' -e 's/\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\n!pip install --upgrade -q jax",
"Collecting jaxlib\n Using cached https://files.pythonhosted.org/packages/06/af/c0d5f539820e97e8ec27f05a0ee50327fe34a35369e4e02ea45ce2a45c01/jaxlib-0.1.8-cp36-none-manylinux1_x86_64.whl\nCollecting scipy (from jaxlib)\n Using cached https://files.pythonhosted.org/packages/67/e6/6d4edaceee6a110ecf6f318482f5229792f143e468b34a631f5a0899f56d/scipy-1.2.0-cp36-cp36m-manylinux1_x86_64.whl\nRequirement already satisfied: protobuf>=3.6.0 in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jaxlib) (3.6.1)\nRequirement already satisfied: six in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jaxlib) (1.12.0)\nRequirement already satisfied: numpy>=1.12 in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jaxlib) (1.16.1)\nRequirement already satisfied: absl-py in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jaxlib) (0.7.0)\nRequirement already satisfied: setuptools in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from protobuf>=3.6.0->jaxlib) (40.8.0)\nInstalling collected packages: scipy, jaxlib\nSuccessfully installed jaxlib-0.1.6 scipy-1.2.0\nRequirement already up-to-date: jax in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (0.1.16)\nRequirement already satisfied, skipping upgrade: six in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jax) (1.12.0)\nRequirement already satisfied, skipping upgrade: opt-einsum in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jax) (2.3.2)\nRequirement already satisfied, skipping upgrade: protobuf>=3.6.0 in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jax) (3.6.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.12 in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jax) (1.16.1)\nRequirement already satisfied, skipping upgrade: absl-py in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from jax) (0.7.0)\nRequirement already satisfied, skipping upgrade: setuptools in /usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages (from protobuf>=3.6.0->jax) (40.8.0)\n"
],
[
"from __future__ import print_function, division, absolute_import\nimport jax.numpy as np\nfrom jax import grad, jit, vmap\nfrom jax import random",
"_____no_output_____"
]
],
[
[
"### Hyperparameters\nLet's get a few bookkeeping items out of the way.",
"_____no_output_____"
]
],
[
[
"# A helper function to randomly initialize weights and biases\n# for a dense neural network layer\ndef random_layer_params(m, n, key, scale=1e-2):\n w_key, b_key = random.split(key)\n return scale * random.normal(w_key, (n, m)), scale * random.normal(b_key, (n,))\n\n# Initialize all layers for a fully-connected neural network with sizes \"sizes\"\ndef init_network_params(sizes, key):\n keys = random.split(key, len(sizes))\n return [random_layer_params(m, n, k) for m, n, k in zip(sizes[:-1], sizes[1:], keys)]\n\nlayer_sizes = [784, 512, 512, 10]\nparam_scale = 0.1\nstep_size = 0.0001\nnum_epochs = 10\nbatch_size = 128\nn_targets = 10\nparams = init_network_params(layer_sizes, random.PRNGKey(0))",
"/usr/local/google/home/rsepassi/python/fresh/lib/python3.6/site-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.\n warnings.warn('No GPU found, falling back to CPU.')\n"
]
],
[
[
"### Auto-batching predictions\n\nLet us first define our prediction function. Note that we're defining this for a _single_ image example. We're going to use JAX's `vmap` function to automatically handle mini-batches, with no performance penalty.",
"_____no_output_____"
]
],
[
[
"from jax.scipy.misc import logsumexp\n\ndef relu(x):\n return np.maximum(0, x)\n\ndef predict(params, image):\n # per-example predictions\n activations = image\n for w, b in params[:-1]:\n outputs = np.dot(w, activations) + b\n activations = relu(outputs)\n \n final_w, final_b = params[-1]\n logits = np.dot(final_w, activations) + final_b\n return logits - logsumexp(logits)",
"_____no_output_____"
]
],
[
[
"Let's check that our prediction function only works on single images.",
"_____no_output_____"
]
],
[
[
"# This works on single examples\nrandom_flattened_image = random.normal(random.PRNGKey(1), (28 * 28,))\npreds = predict(params, random_flattened_image)\nprint(preds.shape)",
"(10,)\n"
],
[
"# Doesn't work with a batch\nrandom_flattened_images = random.normal(random.PRNGKey(1), (10, 28 * 28))\ntry:\n preds = predict(params, random_flattened_images)\nexcept TypeError:\n print('Invalid shapes!')",
"Invalid shapes!\n"
],
[
"# Let's upgrade it to handle batches using `vmap`\n\n# Make a batched version of the `predict` function\nbatched_predict = vmap(predict, in_axes=(None, 0))\n\n# `batched_predict` has the same call signature as `predict`\nbatched_preds = batched_predict(params, random_flattened_images)\nprint(batched_preds.shape)",
"(10, 10)\n"
]
],
[
[
"At this point, we have all the ingredients we need to define our neural network and train it. We've built an auto-batched version of `predict`, which we should be able to use in a loss function. We should be able to use `grad` to take the derivative of the loss with respect to the neural network parameters. Last, we should be able to use `jit` to speed up everything.",
"_____no_output_____"
],
[
"### Utility and loss functions",
"_____no_output_____"
]
],
[
[
"def one_hot(x, k, dtype=np.float32):\n \"\"\"Create a one-hot encoding of x of size k.\"\"\"\n return np.array(x[:, None] == np.arange(k), dtype)\n \ndef accuracy(params, images, targets):\n target_class = np.argmax(targets, axis=1)\n predicted_class = np.argmax(batched_predict(params, images), axis=1)\n return np.mean(predicted_class == target_class)\n\ndef loss(params, images, targets):\n preds = batched_predict(params, images)\n return -np.sum(preds * targets)\n\n@jit\ndef update(params, x, y):\n grads = grad(loss)(params, x, y)\n return [(w - step_size * dw, b - step_size * db)\n for (w, b), (dw, db) in zip(params, grads)]",
"_____no_output_____"
]
],
[
[
"### Data Loading with `tensorflow/datasets`\n\nJAX is laser-focused on program transformations and accelerator-backed NumPy, so we don't include data loading or munging in the JAX library. There are already a lot of great data loaders out there, so let's just use them instead of reinventing anything. We'll use the `tensorflow/datasets` data loader.",
"_____no_output_____"
]
],
[
[
"# Install tensorflow-datasets\n# TODO(rsepassi): Switch to stable version on release\n!pip install -q --upgrade tfds-nightly tf-nightly",
"_____no_output_____"
],
[
"import tensorflow_datasets as tfds\n\ndata_dir = '/tmp/tfds'\n\n# Fetch full datasets for evaluation\n# tfds.load returns tf.Tensors (or tf.data.Datasets if batch_size != -1)\n# You can convert them to NumPy arrays (or iterables of NumPy arrays) with tfds.dataset_as_numpy\nmnist_data, info = tfds.load(name=\"mnist\", batch_size=-1, data_dir=data_dir, with_info=True)\nmnist_data = tfds.as_numpy(mnist_data)\ntrain_data, test_data = mnist_data['train'], mnist_data['test']\nnum_labels = info.features['label'].num_classes\nh, w, c = info.features['image'].shape\nnum_pixels = h * w * c\n\n# Full train set\ntrain_images, train_labels = train_data['image'], train_data['label']\ntrain_images = np.reshape(train_images, (len(train_images), num_pixels))\ntrain_labels = one_hot(train_labels, num_labels)\n\n# Full test set\ntest_images, test_labels = test_data['image'], test_data['label']\ntest_images = np.reshape(test_images, (len(test_images), num_pixels))\ntest_labels = one_hot(test_labels, num_labels)",
"_____no_output_____"
],
[
"print('Train:', train_images.shape, train_labels.shape)\nprint('Test:', test_images.shape, test_labels.shape)",
"Train: (60000, 784) (60000, 10)\nTest: (10000, 784) (10000, 10)\n"
]
],
[
[
"### Training Loop",
"_____no_output_____"
]
],
[
[
"import time\n\ndef get_train_batches():\n # as_supervised=True gives us the (image, label) as a tuple instead of a dict\n ds = tfds.load(name='mnist', split='train', as_supervised=True, data_dir=data_dir)\n # You can build up an arbitrary tf.data input pipeline\n ds = ds.batch(128).prefetch(1)\n # tfds.dataset_as_numpy converts the tf.data.Dataset into an iterable of NumPy arrays\n return tfds.as_numpy(ds)\n\nfor epoch in range(num_epochs):\n start_time = time.time()\n for x, y in get_train_batches():\n x = np.reshape(x, (len(x), num_pixels))\n y = one_hot(y, num_labels)\n params = update(params, x, y)\n epoch_time = time.time() - start_time\n\n train_acc = accuracy(params, train_images, train_labels)\n test_acc = accuracy(params, test_images, test_labels)\n print(\"Epoch {} in {:0.2f} sec\".format(epoch, epoch_time))\n print(\"Training set accuracy {}\".format(train_acc))\n print(\"Test set accuracy {}\".format(test_acc))",
"Epoch 0 in 4.93 sec\nTraining set accuracy 0.9690666794776917\nTest set accuracy 0.9631999731063843\nEpoch 1 in 3.91 sec\nTraining set accuracy 0.9807999730110168\nTest set accuracy 0.97079998254776\nEpoch 2 in 4.02 sec\nTraining set accuracy 0.9878833293914795\nTest set accuracy 0.9763000011444092\nEpoch 3 in 4.03 sec\nTraining set accuracy 0.992733359336853\nTest set accuracy 0.9787999987602234\nEpoch 4 in 3.95 sec\nTraining set accuracy 0.9907500147819519\nTest set accuracy 0.9745000004768372\nEpoch 5 in 4.01 sec\nTraining set accuracy 0.9953666925430298\nTest set accuracy 0.9782000184059143\nEpoch 6 in 3.90 sec\nTraining set accuracy 0.9984833598136902\nTest set accuracy 0.9815000295639038\nEpoch 7 in 3.93 sec\nTraining set accuracy 0.9991166591644287\nTest set accuracy 0.9824000000953674\nEpoch 8 in 4.16 sec\nTraining set accuracy 0.999833345413208\nTest set accuracy 0.982200026512146\nEpoch 9 in 4.03 sec\nTraining set accuracy 0.999916672706604\nTest set accuracy 0.9829999804496765\n"
]
],
[
[
"We've now used the whole of the JAX API: `grad` for derivatives, `jit` for speedups and `vmap` for auto-vectorization.\nWe used NumPy to specify all of our computation, and borrowed the great data loaders from `tensorflow/datasets`, and ran the whole thing on the GPU.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4abf25af25a5900bbc445d056116c3092d34aa04
| 43,574 |
ipynb
|
Jupyter Notebook
|
starter_code/model_3.ipynb
|
abartczak/Repo_21_machine-learning-challenge_HW
|
0d868e47b2234f81f2c709295639687bbb1080b3
|
[
"ADSL"
] | null | null | null |
starter_code/model_3.ipynb
|
abartczak/Repo_21_machine-learning-challenge_HW
|
0d868e47b2234f81f2c709295639687bbb1080b3
|
[
"ADSL"
] | null | null | null |
starter_code/model_3.ipynb
|
abartczak/Repo_21_machine-learning-challenge_HW
|
0d868e47b2234f81f2c709295639687bbb1080b3
|
[
"ADSL"
] | null | null | null | 39.648772 | 258 | 0.445839 |
[
[
[
"# K Nearest Neighbor (KNN) Model",
"_____no_output_____"
]
],
[
[
"# Update sklearn to prevent version mismatches\n!pip install sklearn --upgrade",
"Requirement already up-to-date: sklearn in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (0.0)\nRequirement already satisfied, skipping upgrade: scikit-learn in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from sklearn) (0.23.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.13.3 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from scikit-learn->sklearn) (1.18.5)\nRequirement already satisfied, skipping upgrade: threadpoolctl>=2.0.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from scikit-learn->sklearn) (2.1.0)\nRequirement already satisfied, skipping upgrade: joblib>=0.11 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from scikit-learn->sklearn) (0.15.1)\nRequirement already satisfied, skipping upgrade: scipy>=0.19.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from scikit-learn->sklearn) (1.4.1)\n"
],
[
"# Update sklearn to prevent version mismatches\n!pip install tensorflow==2.2 --upgrade\n!pip install keras --upgrade",
"Requirement already up-to-date: tensorflow==2.2 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (2.2.0)\nRequirement already satisfied, skipping upgrade: six>=1.12.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.15.0)\nRequirement already satisfied, skipping upgrade: tensorflow-estimator<2.3.0,>=2.2.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (2.2.0)\nRequirement already satisfied, skipping upgrade: tensorboard<2.3.0,>=2.2.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (2.2.1)\nRequirement already satisfied, skipping upgrade: astunparse==1.6.3 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.6.3)\nRequirement already satisfied, skipping upgrade: wheel>=0.26; python_version >= \"3\" in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (0.34.2)\nRequirement already satisfied, skipping upgrade: google-pasta>=0.1.8 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (0.2.0)\nRequirement already satisfied, skipping upgrade: absl-py>=0.7.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (0.9.0)\nRequirement already satisfied, skipping upgrade: wrapt>=1.11.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.12.1)\nRequirement already satisfied, skipping upgrade: grpcio>=1.8.6 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.27.2)\nRequirement already satisfied, skipping upgrade: keras-preprocessing>=1.1.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.1.0)\nRequirement already satisfied, skipping upgrade: numpy<2.0,>=1.16.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.18.5)\nRequirement already satisfied, skipping upgrade: opt-einsum>=2.3.2 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (3.3.0)\nRequirement already satisfied, skipping upgrade: scipy==1.4.1; python_version >= \"3\" in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.4.1)\nRequirement already satisfied, skipping upgrade: gast==0.3.3 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (0.3.3)\nRequirement already satisfied, skipping upgrade: termcolor>=1.1.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (1.1.0)\nRequirement already satisfied, skipping upgrade: protobuf>=3.8.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (3.12.3)\nRequirement already satisfied, skipping upgrade: h5py<2.11.0,>=2.10.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorflow==2.2) (2.10.0)\nRequirement already satisfied, skipping upgrade: setuptools>=41.0.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (47.3.1.post20200622)\nRequirement already satisfied, skipping upgrade: google-auth-oauthlib<0.5,>=0.4.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (0.4.1)\nRequirement already satisfied, skipping upgrade: google-auth<2,>=1.6.3 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (1.14.1)\nRequirement already satisfied, skipping upgrade: markdown>=2.6.8 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (3.1.1)\nRequirement already satisfied, skipping upgrade: requests<3,>=2.21.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (2.24.0)\nRequirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (0.14.1)\nRequirement already satisfied, skipping upgrade: tensorboard-plugin-wit>=1.6.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (1.6.0)\nRequirement already satisfied, skipping upgrade: requests-oauthlib>=0.7.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (1.3.0)\nRequirement already satisfied, skipping upgrade: cachetools<5.0,>=2.0.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (4.1.0)\nRequirement already satisfied, skipping upgrade: rsa<4.1,>=3.1.4 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (4.0)\nRequirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (0.2.7)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (2020.6.20)\nRequirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (3.0.4)\nRequirement already satisfied, skipping upgrade: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (1.25.9)\nRequirement already satisfied, skipping upgrade: idna<3,>=2.5 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (2.9)\nRequirement already satisfied, skipping upgrade: oauthlib>=3.0.0 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (3.1.0)\nRequirement already satisfied, skipping upgrade: pyasn1>=0.1.3 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from rsa<4.1,>=3.1.4->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2) (0.4.8)\nRequirement already up-to-date: keras in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (2.4.3)\nRequirement already satisfied, skipping upgrade: scipy>=0.14 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from keras) (1.4.1)\nRequirement already satisfied, skipping upgrade: h5py in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from keras) (2.10.0)\nRequirement already satisfied, skipping upgrade: pyyaml in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from keras) (5.3.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.1 in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from keras) (1.18.5)\nRequirement already satisfied, skipping upgrade: six in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (from h5py->keras) (1.15.0)\n"
],
[
"# Install joblib. This will be used to save your model. \n# Restart your kernel after installing \n!pip install joblib",
"Requirement already satisfied: joblib in c:\\users\\bartc\\.conda\\envs\\pythonadv\\lib\\site-packages (0.15.1)\n"
],
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"# Read the CSV and Perform Basic Data Cleaning",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"exoplanet_data.csv\")\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n# Drop the null rows\ndf = df.dropna()\ndf.head()",
"_____no_output_____"
]
],
[
[
"# Select your features (columns)",
"_____no_output_____"
]
],
[
[
"# Set features. This will also be used as your x values.\nselected_features = df[['koi_fpflag_nt','koi_fpflag_ss','koi_fpflag_co','koi_fpflag_ec',\n 'koi_period','koi_period_err1','koi_period_err2',\n 'koi_time0bk','koi_time0bk_err1','koi_time0bk_err2',\n 'koi_impact','koi_impact_err1','koi_impact_err2',\n 'koi_duration','koi_duration_err1','koi_duration_err2',\n 'koi_depth','koi_depth_err1','koi_depth_err2',\n 'koi_prad','koi_prad_err1','koi_prad_err2',\n 'koi_teq','koi_insol','koi_insol_err1','koi_insol_err2',\n 'koi_model_snr','koi_steff','koi_steff_err1','koi_steff_err2',\n 'koi_slogg','koi_slogg_err1','koi_slogg_err2',\n 'koi_srad','koi_srad_err1','koi_srad_err2',\n 'ra','dec','koi_kepmag']]\nselected_features.head()",
"_____no_output_____"
]
],
[
[
"# Create a Train Test Split\n\nUse `koi_disposition` for the y values",
"_____no_output_____"
]
],
[
[
"# Define target dataframe, target_names array, and X and y variables\ntarget = df[\"koi_disposition\"]\ntarget_names = [\"Confirmed\", \"False Positive\", \"Candidate\"]\nX = selected_features\ny = target\n\n# Derive X and y training and testing variables\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y)",
"_____no_output_____"
],
[
"X_train.head()",
"_____no_output_____"
]
],
[
[
"# Pre-processing\n\nScale the data using the MinMaxScaler and perform some feature selection",
"_____no_output_____"
]
],
[
[
"# Scale your data\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom tensorflow.keras.utils import to_categorical\nX_scaler = MinMaxScaler().fit(X_train)\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)\n\n# Label-encode data set and print the encoded_y_test\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(y_train)\nencoded_y_train = label_encoder.transform(y_train)\nencoded_y_test = label_encoder.transform(y_test)\ny_train_categorical = to_categorical(encoded_y_train)\ny_test_categorical = to_categorical(encoded_y_test)\nprint(y_test_categorical)",
"[[1. 0. 0.]\n [0. 0. 1.]\n [0. 0. 1.]\n ...\n [0. 0. 1.]\n [0. 0. 1.]\n [0. 1. 0.]]\n"
]
],
[
[
"# Train the Model\n\n",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\ntrain_scores = []\ntest_scores = []\nfor k in range(1, 20, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train_scaled, encoded_y_train)\n train_score = knn.score(X_train_scaled, encoded_y_train)\n test_score = knn.score(X_test_scaled, encoded_y_test)\n train_scores.append(train_score)\n test_scores.append(test_score)\n print(f\"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}\")\nmodel3 = KNeighborsClassifier(n_neighbors=13)\nmodel3",
"k: 1, Train/Test Score: 1.000/0.820\nk: 3, Train/Test Score: 0.902/0.824\nk: 5, Train/Test Score: 0.877/0.827\nk: 7, Train/Test Score: 0.864/0.836\nk: 9, Train/Test Score: 0.858/0.831\nk: 11, Train/Test Score: 0.852/0.827\nk: 13, Train/Test Score: 0.848/0.824\nk: 15, Train/Test Score: 0.848/0.831\nk: 17, Train/Test Score: 0.846/0.831\nk: 19, Train/Test Score: 0.838/0.831\n"
],
[
"# Fit the data and print Training Data Scores\nmodel3.fit(X_train_scaled, encoded_y_train)\nprint(f\"Training Data Score: {knn.score(X_train_scaled, encoded_y_train)}\")\nprint(f\"Testing Data Score: {knn.score(X_test_scaled, encoded_y_test)}\")",
"Training Data Score: 0.8384512683578104\nTesting Data Score: 0.8312356979405034\n"
]
],
[
[
"# Test KNN Model",
"_____no_output_____"
]
],
[
[
"# Make predictions\npredictions = model3.predict(X_test_scaled)\npredictions",
"_____no_output_____"
],
[
"# Calculate classification report\nfrom sklearn.metrics import classification_report\nprint(classification_report(encoded_y_test, predictions,\n target_names=target_names))",
" precision recall f1-score support\n\n Confirmed 0.70 0.52 0.60 422\nFalse Positive 0.63 0.77 0.69 450\n Candidate 0.99 1.00 0.99 876\n\n accuracy 0.82 1748\n macro avg 0.77 0.76 0.76 1748\n weighted avg 0.83 0.82 0.82 1748\n\n"
]
],
[
[
"# Save the Model",
"_____no_output_____"
]
],
[
[
"# save your model by updating \"your_name\" with your name\n# and \"your_model\" with your model variable\n# be sure to turn this in to BCS\n# if joblib fails to import, try running the command to install in terminal/git-bash\nimport joblib\nfilename = 'knn.sav'\njoblib.dump(model3, filename)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4abf4093e53c083d27ed5cfeafd49bab510f6751
| 2,871 |
ipynb
|
Jupyter Notebook
|
detection_apple_all_imgs.ipynb
|
NobleBumblebee/YOLOv3_TF2
|
f8d9353c487fe06ade1678009e6c7eaf1be07925
|
[
"MIT"
] | null | null | null |
detection_apple_all_imgs.ipynb
|
NobleBumblebee/YOLOv3_TF2
|
f8d9353c487fe06ade1678009e6c7eaf1be07925
|
[
"MIT"
] | null | null | null |
detection_apple_all_imgs.ipynb
|
NobleBumblebee/YOLOv3_TF2
|
f8d9353c487fe06ade1678009e6c7eaf1be07925
|
[
"MIT"
] | null | null | null | 25.184211 | 145 | 0.571578 |
[
[
[
"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\ntry:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\nexcept RuntimeError as e:\n print(e)\n\nfrom yolov3.yolov3 import Create_Yolov3\nfrom yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime\nfrom yolov3.configs import *",
"0 Physical GPUs, 0 Logical GPUs\n"
],
[
"input_size = YOLO_INPUT_SIZE\nDarknet_weights = YOLO_DARKNET_WEIGHTS\nif TRAIN_YOLO_TINY:\n Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS\n\nimage_path = \"./Dataset/test/\"\nfiles = [f for f in os.listdir(image_path) if os.path.isfile(os.path.join(image_path, f)) and f.endswith(\".png\")]\n",
"_____no_output_____"
],
[
"yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)\nyolo.load_weights(\"./checkpoints/yolov3_custom\") # use keras weights",
"_____no_output_____"
],
[
"folderOut = \"./IMAGES/\"\nfor f in files:\n detect_image(yolo, image_path + f, folderOut + f, input_size=input_size, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4abf50a23198444382a2ba70651603f5799acb4d
| 12,965 |
ipynb
|
Jupyter Notebook
|
pre_processing.ipynb
|
Hecramco/NLP_Violencia-en-musica-
|
38c904046cfd927b449ea3fa36c5608d1dbd99fa
|
[
"MIT"
] | 1 |
2020-09-29T00:20:56.000Z
|
2020-09-29T00:20:56.000Z
|
pre_processing.ipynb
|
Hecramco/NLP_Violencia-en-musica-
|
38c904046cfd927b449ea3fa36c5608d1dbd99fa
|
[
"MIT"
] | null | null | null |
pre_processing.ipynb
|
Hecramco/NLP_Violencia-en-musica-
|
38c904046cfd927b449ea3fa36c5608d1dbd99fa
|
[
"MIT"
] | 1 |
2020-10-15T01:40:30.000Z
|
2020-10-15T01:40:30.000Z
| 32.575377 | 173 | 0.366217 |
[
[
[
"import pandas as pd\nimport os\nimport glob",
"_____no_output_____"
],
[
"raw_data_path = os.path.join('data', 'raw')\nclean_filename = os.path.join('data', 'clean', 'data.csv')",
"_____no_output_____"
]
],
[
[
"# Read data",
"_____no_output_____"
]
],
[
[
"all_files = glob.glob(raw_data_path + \"/top_songs_with_lyrics.csv\")\nraw_data = pd.concat(pd.read_csv(f) for f in all_files)\nraw_data.head()",
"_____no_output_____"
]
],
[
[
"# Pre processing",
"_____no_output_____"
]
],
[
[
"import re\nimport string\n\nimport nltk\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nnltk.download('punkt')\nnltk.download('stopwords')\n",
"[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\leow_\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\leow_\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
],
[
"#Puntuaction Removing [!”#$%&’()*+,-./:;<=>?@[\\]^_`{|}~]:\ndef clean_puntuaction(input_df):\n result=input_df\n for idx in range(result.shape[0]):\n result[idx]=result[idx].replace(\"'\",\"\")\n result[idx]=result[idx].replace(\"\\r\",\" \")\n result[idx]=result[idx].replace(\"\\n\",\"\")\n result[idx]= re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", result[idx])\n result[idx]= re.sub(r'[^\\w\\s]', '', result[idx])\n return result",
"_____no_output_____"
],
[
"def remove_accents(input_str):\n \"\"\"\n remueve acentos, aunque al ser un texto en inglés no deberían existir acentos\n \"\"\"\n nfkd_form = unicodedata.normalize('NFKD', input_str )\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\n\n#Puntuaction Removing [!”#$%&’()*+,-./:;<=>?@[\\]^_`{|}~]:\ndef clean_puntuaction(input_df):\n result=input_df\n print(result.shape)\n #for idx in range(result.shape[0]):\n# for idx in result:\n# idx=idx+1\n# if(result[idx]==\"\"):\n# continue\n# result[idx]=result[idx].replace(\"'\",\"\")\n# result[idx]=result[idx].replace(\"\\r\",\" \")\n# result[idx]=result[idx].replace(\"\\n\",\"\")\n# result[idx]= re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", result[idx])\n# result[idx]= re.sub(r'[^\\w\\s]', '', result[idx])\n# result[idx]= remove_accents(result[idx])\n cont=1\n for idx in result.values:\n \n #idx=idx+1\n #if(result[idx]==\"\"):\n # continue\n idx=idx.replace(\"'\",\"\")\n idx=idx.replace(\"\\r\",\" \")\n idx=idx.replace(\"\\n\",\" \")\n idx= re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", idx)\n idx= re.sub(r'[^\\w\\s]', '', idx)\n idx= remove_accents(idx)\n print(cont)\n print (idx)\n result[cont]=idx\n cont=cont+1\n return result\n\ndef clean_str_puntuaction(input_df):\n input_df=input_df.replace(\"'\",\"\")\n input_df=input_df.replace(\"\\r\",\" \")\n input_df=input_df.replace(\"\\n\",\" \")\n input_df=input_df.replace(\"-\",\" \")\n input_df= re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", input_df)\n input_df= re.sub(r'[^\\w\\s]', '', input_df)\n input_df= remove_accents(input_df)\n return input_df",
"_____no_output_____"
],
[
"def remove_stopwords(input_df):\n result=input_df\n for idx in range(result.shape[0]):\n tokens = word_tokenize(result[idx])\n stop_words = stopwords.words('spanish')\n more_stopwords = ['si', 'pa', 'sé', 'solo', 'yeah', 'yeh', 'oh', 'i', 'to', 'va', 'the', 'aunque', 'you', 'eh', 'cómo','ma']\n total_stopwords = stop_words + more_stopwords\n result[idx] = [i for i in tokens if not i in total_stopwords]\n return result",
"_____no_output_____"
],
[
"# TODO: Perform cleaning\n\ndata_filename = 'data/raw/top_songs_with_lyrics.csv'\ndataset = pd.read_csv(data_filename)\ndataset.columns.tolist()\n#dataset.iloc[:,0]\n#df=dataset[['artists ','title','lyric \n# lowercase ']]\ndf=dataset['lyric '].str.lower()\n\ndf=clean_puntuaction(df)\ndf=remove_stopwords(df)\n#df[1]\n\n#freq = nltk.FreqDist(tokens)\n\n",
"_____no_output_____"
],
[
"clean_data = pd.DataFrame(data={'dummy': [1, 2]})\nclean_data.to_csv(clean_filename, index=False)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abf62378a3ef0d239293a462f6aa00c025a0c0e
| 4,280 |
ipynb
|
Jupyter Notebook
|
Ch1.ipynb
|
IanFla/Bayesian-Analysis
|
b3ae43571b81423ab18291253e2b46c51926eeb0
|
[
"MIT"
] | null | null | null |
Ch1.ipynb
|
IanFla/Bayesian-Analysis
|
b3ae43571b81423ab18291253e2b46c51926eeb0
|
[
"MIT"
] | null | null | null |
Ch1.ipynb
|
IanFla/Bayesian-Analysis
|
b3ae43571b81423ab18291253e2b46c51926eeb0
|
[
"MIT"
] | null | null | null | 25.47619 | 78 | 0.51028 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm, poisson, uniform",
"_____no_output_____"
]
],
[
[
"# Ex 4",
"_____no_output_____"
]
],
[
[
"outcomes = np.array([-7, -5, -3, -3, 1, 6, 7, 13, 15, 16, 20, 21])\napprox = norm(loc=8, scale=np.std(outcomes))\nPr1 = np.mean(outcomes >= 0.5)\nprint(Pr1, 1 - approx.cdf(0.5))\nPr2 = np.mean(outcomes >= 8)\nprint(Pr2, 1 - approx.cdf(8))\nPr3 = Pr2 / Pr1\nprint(Pr3, (1 - approx.cdf(8)) / (1 - approx.cdf(0.5)))",
"0.6666666666666666 0.7809911978646268\n0.4166666666666667 0.5\n0.6250000000000001 0.6402120809646661\n"
]
],
[
[
"# Ex 9",
"_____no_output_____"
]
],
[
[
"def queuing(num):\n number = poisson(mu=42).rvs()\n patients = np.sort(uniform(scale = 7 * 60).rvs(size=number))\n doctors = uniform(loc=5, scale=15).rvs(size=number)\n waits = np.zeros(number)\n for i, patient in enumerate(patients):\n leaves = patients[:i] + waits[:i] + doctors[:i]\n waitings = leaves[leaves > patient]\n if waitings.size >= num:\n waits[i] = waitings[np.argsort(waitings)[-num]] - patient\n \n waitor = np.sum(waits != 0)\n average = 0 if waitor == 0 else waits[waits != 0].mean()\n close = np.max(patients + waits + doctors)\n return number, waitor, average, close\n\ndef summary(data):\n q1 = np.quantile(data, 0.25)\n q2 = np.quantile(data, 0.5)\n q3 = np.quantile(data, 0.75)\n print(q1, q2, q3)\n \ndef experiment(K, num):\n N1 = np.zeros(K)\n N2 = np.zeros(K)\n T1 = np.zeros(K)\n T2 = np.zeros(K)\n for k in range(K):\n N1[k], N2[k], T1[k], T2[k] = queuing(num)\n\n summary(N1)\n summary(N2)\n summary(T1)\n summary(T2)",
"_____no_output_____"
],
[
"experiment(2000, 1)\nexperiment(2000, 2)\nexperiment(2000, 3)\nexperiment(2000, 4)",
"37.0 41.0 46.0\n34.0 39.0 44.0\n43.884871064237224 68.79742412481158 95.92058995511172\n493.90927785685227 546.0419636516217 598.3200131045669\n37.0 42.0 47.0\n13.0 18.0 25.0\n5.784516886715411 7.47497991685136 9.997353093383943\n421.2846668118887 428.38186534059207 435.2618549803673\n38.0 42.0 46.0\n3.0 5.0 9.0\n2.435167446008382 3.7719857505795766 5.1839975366573\n418.66789285282755 425.7814354884403 431.19928604236156\n38.0 41.0 46.0\n0.0 1.0 2.0\n0.0 0.8993646685572685 2.847994577233556\n418.39955946136763 425.5411658075214 431.15842037990177\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4abf65d65da945c5033882c14a24cb4c85681b9e
| 6,066 |
ipynb
|
Jupyter Notebook
|
linkedList/list_array.ipynb
|
linnndachen/data-structure-and-algorithms
|
84749fb41eefa7efbc65ecc42b00a63e819ea1b5
|
[
"MIT"
] | null | null | null |
linkedList/list_array.ipynb
|
linnndachen/data-structure-and-algorithms
|
84749fb41eefa7efbc65ecc42b00a63e819ea1b5
|
[
"MIT"
] | null | null | null |
linkedList/list_array.ipynb
|
linnndachen/data-structure-and-algorithms
|
84749fb41eefa7efbc65ecc42b00a63e819ea1b5
|
[
"MIT"
] | null | null | null | 32.438503 | 362 | 0.60633 |
[
[
[
"# Linked List vs. Array",
"_____no_output_____"
],
[
"## Node and Linked list\nlinked list is based on node structure.\n\nWe can imagine a node to be an indivdual pod. \n\nIn each pod, we store different types of data - numbers, strings\n\nA linked list also store pointers in each pod on top of the data.\n\nIf the linked list only has 1 pointer, which points forward, then it is a singly linked list. \n\nOn the other hand, if it has 2 pointers, pointing forward and backward, then this is a double linked list. ",
"_____no_output_____"
],
[
"## What's the difference between a singly linked list and array?\n\nSingly linked list and array sounds similar because both of them indicate a sequence of data that can only move forward. \n\nThe difference is, each element in the array is not an individual pod. \n\nWe can use family and houses as an anology. \n\nLinked list is like an apartment, from the 1st floor to the 10th floor. Each floor has a different household.\n\nArray is like one of the households in the aparment of a linked list. An array is the family, all elements must be from the same family (same data type). Just like in a family, you have people from the youngest to the oldest. ",
"_____no_output_____"
],
[
"## When implementing Stack, should we use linked-list or array?\n\nStack can be implemented by using either method. The main concern is about the memory space.\n\n- Space:\n - array-based data structure is a lot more space efficient. The whole stack only requires 1 reference-sized array cell, while in a linked-list, each node has 2 references.\n\n - However, a linked-list can grow and shrink dynamically. Therefore, if the Stack is empty, linked-list can free up the space. Also, dynamic array can run into the memory allocation issues. \n\n\nIn Python, Stack is normally being implemented by using dynamic arrays, which the built-in list type.\n\n\"The list Built-in\nPython’s built-in list type makes a decent stack data structure as it supports push and pop operations in amortized O(1) time.\n\nPython’s lists are implemented as dynamic arrays internally which means they occasionally need to resize the storage space for elements stored in them whenever they are added or removed. The storage space is allocated more than required, so that not every push or pop requires resizing and you get an amortized O(1) time complexity for these operations.\n\nAlthough this makes their performance less consistent than the stable O(1) inserts and deletes provided by a linked list-based implementation. On the other hand, lists provide fast O(1) time random access to elements on the stack which can be an added benefit.\"\n\n-- From [Eruka.co](https://www.edureka.co/blog/stack-in-python/)\n",
"_____no_output_____"
]
],
[
[
"#array based\nmyStack = []\n\nmyStack.append('This')\nmyStack.append('is')\nmyStack.append('array')\n\nprint(myStack) #return ['This', 'is', 'array']\n\nmyStack.pop()\n#'array'",
"_____no_output_____"
],
[
"#use node/double linked-list based\n\nfrom collections import deque\nmyStack = deque()\n\nmyStack.append('This')\nmyStack.append('is')\nmyStack.append('linked-list')\n\nprint(myStack) #return ['This', 'is', 'linked-list']\n\nmyStack.pop()\n#'linked-list'",
"_____no_output_____"
]
],
[
[
"As we can see both methods have the exact same interface. The time cost for push() and pop() operation are also the same. However, the data strucure behind both methods are different. According to [Real Python](https://realpython.com/how-to-implement-python-stack/), it would better to use deque when we don' need to consider threading.\n\nWhen we need to consider threading, we should use LifoQueue, in which all methods (not just push and pop) are desinged to be threaded safe. This design is in the expense of extra running time.",
"_____no_output_____"
]
],
[
[
"from queue import LifoQueue\nmyStack = LifoQueue()\n\nmyStack.put('This')\nmyStack.put('is')\nmyStack.put('list')\n\nprint(myStack) #return ['This', 'is', 'list']\n\nmyStack.get()\n#'linked-list'\n",
"<queue.LifoQueue object at 0x7fcc08282a90>\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4abf6708f686747da838e72c7c9b2fad7ca4f0d3
| 103,630 |
ipynb
|
Jupyter Notebook
|
ML_GuassianProcessClassifier.ipynb
|
lqiqiqi/graduate-thesis
|
9180d04d7301c2b4bb83d6a6d059a6119bc5f512
|
[
"MIT"
] | null | null | null |
ML_GuassianProcessClassifier.ipynb
|
lqiqiqi/graduate-thesis
|
9180d04d7301c2b4bb83d6a6d059a6119bc5f512
|
[
"MIT"
] | null | null | null |
ML_GuassianProcessClassifier.ipynb
|
lqiqiqi/graduate-thesis
|
9180d04d7301c2b4bb83d6a6d059a6119bc5f512
|
[
"MIT"
] | null | null | null | 104.676768 | 69,508 | 0.799064 |
[
[
[
"import matplotlib.pyplot as plt\nfrom sklearn.neighbors import LocalOutlierFactor",
"_____no_output_____"
],
[
"import pandas as pd\nimport os\nimport numpy as np\n\n# file_dir = os.getcwd()\n# raw_data_dir = os.path.join(file_dir, '/raw_data')\nfile_list = []\n\nfor root, dirs, files in os.walk('./raw_data'):\n for file in files:\n if os.path.splitext(file)[1] == '.csv':\n # 排除掉readme.md等非csv文件\n file_list.append(file)\n\n# print(file_list) \n\ndf = pd.DataFrame()\n\nfor index, csv in enumerate(file_list):\n \n df_temp = pd.read_csv('./raw_data/'+csv)\n if int(csv[-5]) == 0:\n file_list[index] = csv[:-5] + '1' +csv[-4:]\n print('changed csv: ', csv)\n target_column = pd.DataFrame(np.array([int(file_list[index][-5])]*df_temp.shape[0]))\n # 构造target列,注意要使用二维的array [[1],[1]]这样是列 [[1,1]]这样是行\n df_temp = pd.concat([df_temp, target_column], axis=1, ignore_index=True)\n # 连接样本和target列\n df = pd.concat([df, df_temp], ignore_index=True)\n # 连接所有样本\n \nprint(file_list)",
"changed csv: data_20190303_1035_0.csv\n['data_20190301_1035_2.csv', 'data_20190303_1035_1.csv', 'data_20190309_1008_3.csv', 'data_20190309_1334_3.csv', 'data_20190309_1336_3.csv', 'data_20190309_1342_2.csv', 'data_20190309_1358_2.csv', 'data_20190309_1405_2.csv', 'data_20190309_1409_1.csv', 'data_20190309_1411_1.csv', 'data_20190309_1413_1.csv']\n"
],
[
"pd.set_option('display.max_rows', 10)\nprint(df)",
" 0 1 2 3 4 5 6 \\\n0 22.62949 23.34601 22.84607 24.27225 24.22751 24.99457 27.39221 \n1 23.16067 23.59763 25.04761 24.74619 27.40509 27.20163 28.14008 \n2 23.03558 24.24997 23.59360 24.87021 25.99951 27.53504 27.61453 \n3 22.25803 23.60641 22.50025 24.63440 23.02280 26.32965 24.39380 \n4 22.11905 23.36346 22.37064 22.95017 22.00073 22.74686 22.73328 \n.. ... ... ... ... ... ... ... \n759 25.18289 25.38110 25.18024 25.48029 25.63098 25.73325 24.86350 \n760 25.06046 25.24048 25.53430 24.99799 25.09933 24.95724 25.81555 \n761 24.67053 24.99020 25.28839 25.46359 25.40866 24.84756 24.85449 \n762 25.29895 25.60233 25.40549 25.22012 24.87308 25.38123 25.17206 \n763 25.17365 25.10172 24.81860 25.33408 24.65823 24.94394 25.27774 \n\n 7 8 9 ... 759 760 761 \\\n0 26.94012 27.34689 27.80975 ... 25.27432 22.86929 22.75967 \n1 27.87192 28.14847 27.80102 ... 23.29489 22.53711 22.20065 \n2 28.60287 27.45639 27.70782 ... 22.45697 21.56348 22.20950 \n3 27.77655 26.36633 27.61267 ... 23.10901 21.89667 22.56369 \n4 22.88919 22.98193 23.40122 ... 23.62778 22.02560 21.54199 \n.. ... ... ... ... ... ... ... \n759 25.42084 25.64047 25.47333 ... 25.27060 23.98160 24.03442 \n760 25.19739 25.74136 25.16293 ... 24.87146 23.03009 24.03830 \n761 24.98889 25.43405 25.06406 ... 25.66187 23.23767 23.91659 \n762 25.29022 25.73035 25.05389 ... 27.25143 23.64246 24.23828 \n763 25.39166 25.43402 24.65769 ... 33.65805 24.15677 26.04568 \n\n 762 763 764 765 766 767 768 \n0 22.38684 22.88013 22.94119 22.99555 22.63037 23.33066 2 \n1 22.37799 22.64114 21.91843 22.99634 23.05240 22.60111 2 \n2 22.61514 22.40997 22.42819 22.61395 23.05298 23.48731 2 \n3 22.38712 22.90918 22.18204 22.10751 22.77466 23.50760 2 \n4 22.05508 22.78049 21.82272 22.62360 22.51157 23.20691 2 \n.. ... ... ... ... ... ... ... \n759 24.19855 24.76816 24.14426 24.95731 24.59708 24.86298 1 \n760 23.95770 25.79822 22.66364 25.08643 24.03143 25.14664 1 \n761 24.18204 24.75763 23.88477 24.82208 24.30557 25.40750 1 \n762 24.05740 24.75720 24.23682 24.57175 24.56815 25.12949 1 \n763 23.48694 24.75809 23.01379 24.82254 23.73499 25.54645 1 \n\n[764 rows x 769 columns]\n"
],
[
"target_column = df.iloc[:, -1]\ndf = df.iloc[:, :-1]\n\n# 让脸部温度单独保存,环境温度设计为统一值\nta = df.min(axis=1)\n\ndf_face = pd.DataFrame()\n# df_face 脸部温度+其他区域温度置换为环境温度\ndf_onlyface = pd.DataFrame()\n# df_onlyface 只有脸部温度点\n\nfor i, minTa in zip(df.values, ta):\n face = []\n onlyface = []\n for j in i:\n try:\n # 因为检查到有一些数字不是float是str,像21.42346.1,不知是什么原因导致的,\n if j - minTa > 7:\n face.append(j)\n# onlyface.append(j)\n else:\n face.append(minTa)\n ave_ta ....\n except:\n j = float(j[:6])\n if j - minTa > 7:\n face.append(j)\n# onlyface.append(j)\n else:\n face.append(minTa)\n face_todf = pd.DataFrame(face).T\n# onlyface_todf = pd.DataFrame(onlyface).T\n df_face = pd.concat([df_face, face_todf], axis = 0, ignore_index=True)\n# df_onlyface = pd.concat([df_onlyface, onlyface_todf], axis = 0, ignore_index=True)",
"_____no_output_____"
],
[
"# fit the model for outlier detection (default)\nclf = LocalOutlierFactor(n_neighbors=40, contamination=0.05)\n# use fit_predict to compute the predicted labels of the training samples\n# (when LOF is used for outlier detection, the estimator has no predict,\n# decision_function and score_samples methods).\ny_pred = clf.fit_predict(df_face)\n# n_errors = (y_pred != ground_truth).sum()\nX_scores = clf.negative_outlier_factor_",
"_____no_output_____"
],
[
"min_index = np.argpartition(X_scores, int(df_face.shape[0]*0.05))[:int(df_face.shape[0]*0.05)]\n\n# 去除掉5%的异常样本\ndf = df.drop(min_index)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"ta = ta.iloc[df.index]",
"_____no_output_____"
],
[
"ta.index = np.arange(ta.shape[0])",
"_____no_output_____"
],
[
"for i, minTa in zip(df.values, ta):\n# face = []\n onlyface = []\n for j in i:\n try:\n # 因为检查到有一些数字不是float是str,像21.42346.1,不知是什么原因导致的,\n if j - minTa > 7:\n# face.append(j)\n onlyface.append(j)\n# else:\n# face.append(minTa)\n except:\n j = float(j[:6])\n if j - minTa > 7:\n# face.append(j)\n onlyface.append(j)\n# else:\n# face.append(minTa)\n# face_todf = pd.DataFrame(face).T\n onlyface_todf = pd.DataFrame(onlyface).T\n# df_face = pd.concat([df_face, face_todf], axis = 0, ignore_index=True)\n df_onlyface = pd.concat([df_onlyface, onlyface_todf], axis = 0, ignore_index=True)",
"_____no_output_____"
],
[
"skewness = pd.DataFrame(df_onlyface.skew(axis=1))\nmaxTemp = pd.DataFrame(df_onlyface.max(axis=1))\nminTemp = pd.DataFrame(df_onlyface.min(axis=1))\nmeanTemp = pd.DataFrame(df_onlyface.mean(axis=1))",
"_____no_output_____"
],
[
"# 指定划分bin的点\nbins = [28.3, 28.6, 28.9, 29.2, 29.5,\n 29.8, 30.1, 30.4, 30.7, 31.0, 31.3,\n 31.6, 31.9, 32.2, 32.5, 32.8, 33.1,\n 33.4, 33.7, 34.0, 34.3, 34.6, 34.9,\n 35.2, 35.5, 35.8, 36.1, 36.4, 36.7]\n\nhighest_bin_list = []\nfor i in df_onlyface.values:\n i = [j for j in i if not np.isnan(j)]\n N, _ = np.histogram(np.clip(i,28.3,36.7), bins=bins)\n highest_bin = (bins[N.argmax()]+bins[N.argmax()+1])/2\n # 返回各区域频数N\n highest_bin_list.append(highest_bin)\n\nmodeTemp = pd.DataFrame(highest_bin_list, index=df_onlyface.index)",
"_____no_output_____"
],
[
"features = pd.concat([skewness, maxTemp, minTemp, meanTemp, modeTemp, ta], axis=1)\nfeatures.columns = ['skewness', 'maxTemp', 'minTemp', 'meanTemp', 'modeTemp', 'ta']",
"_____no_output_____"
],
[
"features['max_minus_min'] = features['maxTemp'] - features['minTemp']\nfeatures['mode_minus_ta'] = features['modeTemp'] - features['ta']\n# features['mean_minus_ta'] = features['meanTemp'] - features['ta']\n# features['max_minus_ta'] = features['maxTemp'] - features['ta']\nfeatures['min_minus_ta'] = features['minTemp'] - features['ta']\n# features['mode_minus_min'] = features['modeTemp'] - features['minTemp']\n# features['mean_minus_min'] = features['meanTemp'] - features['minTemp']\n# features['max_minus_mean'] = features['maxTemp'] - features['meanTemp']\n# features['mode_squa'] = features['modeTemp'] ** 2\n# features['mean_squa'] = features['meanTemp'] ** 2\n# features['max_squa'] = features['maxTemp'] ** 2\n# features['mode_cub'] = features['modeTemp'] ** 3\n# features['mean_cub'] = features['meanTemp'] ** 3\n# features['max_cub'] = features['maxTemp'] ** 3\nfeatures = features.drop([\"minTemp\"], axis=1)",
"_____no_output_____"
],
[
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\nfeaturesCorr = features.corr('spearman')\nfig = plt.figure(figsize=(20, 20))\n# plt.subplots((1,1,1)) # 设置画面大小\nsns.heatmap(featuresCorr, annot=True, vmax=1, square=True)\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n\nstd = StandardScaler()\nfeatures_scaler = std.fit_transform(features)",
"_____no_output_____"
],
[
"target_column = target_column.iloc[df.index]",
"_____no_output_____"
],
[
"features_scaler.shape",
"_____no_output_____"
],
[
"target_column.shape",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\ntrain = features_scaler\ntarget = target_column.values\n\ntrain_X,test_X, train_y, test_y = train_test_split(train,\n target,\n test_size = 0.1,\n random_state = 0)",
"_____no_output_____"
],
[
"from sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import ExtraTreesClassifier\nclf = ExtraTreesClassifier()\nclf = clf.fit(train_X, train_y)\nprint(clf.feature_importances_ )\n\nmodel = SelectFromModel(clf, prefit=True)\nprint(features.columns[model.get_support(indices=True)])\n# print(model.get_support(indices=True))\nX_new = model.transform(train_X)",
"[0.1647388 0.08745432 0.16251091 0.07479519 0.1882895 0.18536004\n 0.10174216 0.03510908]\nIndex(['skewness', 'meanTemp', 'ta', 'max_minus_min'], dtype='object')\n"
],
[
"from sklearn.gaussian_process.kernels import RBF\nfrom sklearn.gaussian_process import GaussianProcessClassifier\n\ngpc = GaussianProcessClassifier(1.0 * RBF(1.0), max_iter_predict=500, n_restarts_optimizer=5, warm_start=True, random_state=1, multi_class='one_vs_rest', n_jobs=-1)\ngpc.fit(X_new, train_y)",
"_____no_output_____"
],
[
"test_X_com = np.concatenate((test_X[:, 0].reshape(-1,1), test_X[:, 2].reshape(-1,1), test_X[:, 4:6]), axis=1)\n\ngpc_score = round(gpc.score(test_X_com, test_y) * 100, 2)\nprint(gpc_score)\n\nfrom sklearn.model_selection import cross_val_score\ncross_val_score(gpc, test_X_com, test_y)",
"98.63\n"
],
[
"gpc.log_marginal_likelihood()",
"_____no_output_____"
],
[
"gpc_notrain = GaussianProcessClassifier(1.0 * RBF(1.0))\ngpc_notrain.predict(X_new, return_std=True)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abf74a9990f3a4c9d89726d45de64c558f77513
| 5,320 |
ipynb
|
Jupyter Notebook
|
LiveStats/Testing.ipynb
|
krypticmouse/LiveStats
|
dcdd4456f2d3c9195f6124ec524ebf3f96e03606
|
[
"MIT"
] | 1 |
2021-01-23T19:03:49.000Z
|
2021-01-23T19:03:49.000Z
|
LiveStats/Testing.ipynb
|
krypticmouse/LiveStats
|
dcdd4456f2d3c9195f6124ec524ebf3f96e03606
|
[
"MIT"
] | null | null | null |
LiveStats/Testing.ipynb
|
krypticmouse/LiveStats
|
dcdd4456f2d3c9195f6124ec524ebf3f96e03606
|
[
"MIT"
] | null | null | null | 25.454545 | 101 | 0.471617 |
[
[
[
"from livestats import run_bessels_correction",
"_____no_output_____"
],
[
"import numpy as np\nimport warnings\n\nclass BC:\n def __init__(self):\n self.normal_dist = np.random.standard_normal(10000)\n self.avl_dist = {\n 'normal': self.normal_dist,\n 'uniform': np.random.uniform(-10, 10, 10000),\n 't': np.random.standard_t(1000, size = 10000),\n 'gamma': np.random.standard_gamma(0.5, size = 10000),\n 'exponential': np.random.standard_exponential(size = 10000)\n }\n \n def generate_sample_distribution(self, data = 'normal'):\n if str(type(data)) != \"<class 'str'>\":\n self.data = np.array(data)\n \n else:\n if data in self.avl_dist.keys():\n self.data = self.avl_dist[data]\n else:\n raise Exception(f'Distribution available are {tuple(self.avl_dist.keys())}')\n \n if self.sample_size is None or self.sample_size > len(self.data):\n warnings.warn('Sample Size exceeds Population Size changing it to 50%')\n self.sample_size = int(0.5 * len(self.data))\n\n sample = self.data.copy()\n np.random.shuffle(sample)\n return sample[:self.sample_size]\n\n def run(self, data = 'normal', sample_size = None):\n self.sample_size = sample_size\n sample = self.generate_sample_distribution(data)\n \n print(f'Sample Size: {len(sample)}')\n print(f'Population Size: {len(self.data)}')\n print(f'S.D. of Population: {np.std(self.data)}')\n print(f'S.D. of Sample without Bessel\\'s Correction : {np.std(sample)}')\n print(f'S.D. of Sample with Bessel\\'s Correction : {np.std(sample, ddof = 1)}')",
"_____no_output_____"
],
[
"run_bessels_correction(list(range(100000)), 10000)",
"Sample Size: 10000\nPopulation Size: 100000\nS.D. of Population: 28867.513458037913\nS.D. of Sample without Bessel's Correction : 28997.4313881686\nS.D. of Sample with Bessel's Correction : 28998.88136848744\n"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"b = np.random.choice(50,40)",
"_____no_output_____"
],
[
"a = np.array(range(20))\na",
"_____no_output_____"
],
[
"b = a.copy()\nnp.random.shuffle(b)\nb",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abf7e205f2cf6346e99e82ff101eb4761444e00
| 4,910 |
ipynb
|
Jupyter Notebook
|
Trie/1110/720. Longest Word in Dictionary.ipynb
|
YuHe0108/Leetcode
|
90d904dde125dd35ee256a7f383961786f1ada5d
|
[
"Apache-2.0"
] | 1 |
2020-08-05T11:47:47.000Z
|
2020-08-05T11:47:47.000Z
|
Trie/1110/720. Longest Word in Dictionary.ipynb
|
YuHe0108/LeetCode
|
b9e5de69b4e4d794aff89497624f558343e362ad
|
[
"Apache-2.0"
] | null | null | null |
Trie/1110/720. Longest Word in Dictionary.ipynb
|
YuHe0108/LeetCode
|
b9e5de69b4e4d794aff89497624f558343e362ad
|
[
"Apache-2.0"
] | null | null | null | 24.79798 | 104 | 0.423829 |
[
[
[
"说明:\n 给出一个字符串数组words组成的一本英语词典。从中找出最长的一个单词,该单词是由words词典中其他单词逐步添加一个字母组成。\n 若其中有多个可行的答案,则返回答案中字典序最小的单词。\n 若无答案,则返回空字符串。\n\n示例 1:\n 输入:\n words = [\"w\",\"wo\",\"wor\",\"worl\", \"world\"]\n 输出:\"world\"\n 解释: \n 单词\"world\"可由\"w\", \"wo\", \"wor\", 和 \"worl\"添加一个字母组成。\n\n示例 2:\n 输入:\n words = [\"a\", \"banana\", \"app\", \"appl\", \"ap\", \"apply\", \"apple\"]\n 输出:\"apple\"\n 解释:\n \"apply\"和\"apple\"都能由词典中的单词组成。但是\"apple\"的字典序小于\"apply\"。\n\n提示:\n 所有输入的字符串都只包含小写字母。\n words数组长度范围为[1,1000]。\n words[i]的长度范围为[1,30]。",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self):\n self.children = {}\n self.isWord = False\n\nclass Trie:\n def __init__(self):\n self.root = Node()\n \n def insert(self, word):\n node = self.root\n for char in word:\n if char not in node.children:\n node.children[char] = Node()\n node = node.children[char]\n node.isWord = True\n\nclass Solution:\n def longestWord(self, words) -> str:\n trie = Trie()\n for w in words:\n trie.insert(w)\n \n self.words = set(words)\n self.res, self.maxLen = None, 0\n self.dfs(trie.root, '')\n return self.res\n \n def dfs(self, node, path):\n if node.isWord:\n if len(path) > self.maxLen:\n self.maxLen = len(path)\n self.res = path[:]\n elif len(path) == self.maxLen:\n self.res = min(path[:], self.res)\n node.isWord = False\n \n for c, v in node.children.items():\n if not v.isWord:\n continue\n self.dfs(v, path + c) ",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.longestWord(words = [\"a\", \"banana\", \"app\", \"appl\", \"ap\", \"apply\", \"apple\"])",
"_____no_output_____"
],
[
"print(min('apple', 'apply'))",
"apple\n"
],
[
"\n self.words = set(words)\n self.res, self.maxLen, self.maxCount = None, 0, 0\n self.dfs(trie.root, 0, '')\n return self.res\n\ndef dfs(self, node, count, path):\n if node.isWord:\n if count > self.maxCount:\n if len(path) > self.maxLen:\n self.maxLen = len(path)\n self.maxCount = count\n self.res = path[:]\n elif len(path) == self.maxLen:\n self.res = min(path[:], self.res)\n node.isWord = False\n\n for c, v in node.children.items():\n if path in self.words:\n count += 1\n self.dfs(v, count, path + c) ",
"_____no_output_____"
],
[
"min('eyj', 'eyjoui')",
"_____no_output_____"
]
]
] |
[
"raw",
"code"
] |
[
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4abf97531386bee2cd22e11d3344c307b6655e30
| 8,287 |
ipynb
|
Jupyter Notebook
|
Example Notebooks/.ipynb_checkpoints/Samples and Features-checkpoint.ipynb
|
adeepak7/example-notebooks
|
723bd30ea5617f3a7c0df76f13c5190ae81e9f6d
|
[
"BSD-3-Clause"
] | null | null | null |
Example Notebooks/.ipynb_checkpoints/Samples and Features-checkpoint.ipynb
|
adeepak7/example-notebooks
|
723bd30ea5617f3a7c0df76f13c5190ae81e9f6d
|
[
"BSD-3-Clause"
] | null | null | null |
Example Notebooks/.ipynb_checkpoints/Samples and Features-checkpoint.ipynb
|
adeepak7/example-notebooks
|
723bd30ea5617f3a7c0df76f13c5190ae81e9f6d
|
[
"BSD-3-Clause"
] | 1 |
2021-08-11T14:37:29.000Z
|
2021-08-11T14:37:29.000Z
| 39.461905 | 280 | 0.62049 |
[
[
[
"# Working with Samples and Features\n\nFrom a combined dataset of cancer and normal samples, extract the normal samples. Within the normal samples, find the genes coexpressed with LRPPRC (Affymetrix probe M92439_at), a gene with mitochondrial function.\n\n## Before you begin\n\n* Sign in to GenePattern by entering your username and password into the form below. If you are seeing a block of code instead of the login form, go to the menu above and select Cell > Run All.\n* The data will will use in this exercise is from the Global Cancer Map, published along with the paper *[Multi-Class Cancer Diagnosis Using Tumor Gene Expression Signatures](http://www.broadinstitute.org/cgi-bin/cancer/publications/pub_paper.cgi?mode=view&paper_id=61)*.\n* Links to the data files used in this exercise are below:\n * RES file: [GCM_Total.res](https://software.broadinstitute.org/cancer/software/genepattern/data/gcm/GCM_Total.res)\n * CLS file: [GCM_Total.cls](https://software.broadinstitute.org/cancer/software/genepattern/data/gcm/GCM_Total.cls)\n * CHIP file: [HU6800.chip](https://software.broadinstitute.org/cancer/software/genepattern/data/gcm/HU6800.chip)",
"_____no_output_____"
]
],
[
[
"# Requires GenePattern Notebook: pip install genepattern-notebook\nimport gp\nimport genepattern\n\n# Username and password removed for security reasons.\ngenepattern.GPAuthWidget(genepattern.register_session(\"https://genepattern.broadinstitute.org/gp\", \"\", \"\"))",
"Widget Javascript not detected. It may not be installed or enabled properly.\n"
]
],
[
[
"## Step 1: Selecting a Subset of an Expression File\n\n1. Insert an analysis cell for the *SelectFeaturesColumns* module and move it below this set of instructions.\n2. Set the following parameters:\n 1. Drag-and-drop the *[GCM_Total.res](https://software.broadinstitute.org/cancer/software/genepattern/data/gcm/GCM_Total.res)* file linked above into the *input.filename* parameter.\n 2. Set the columns parameter to *190-279*.\n 3. Set the *output.file* paremeter to *GCM_Normals.res*.\n3. Click the *Run* button.",
"_____no_output_____"
],
[
"## Step 2: Finding Coexpressed Genes\n\n1. Insert an analysis cell for the *GeneNeighbors* module and move it below this set of instructions.\n2. Set the following parameters:\n 1. Send the *GCM_Normals.res* file produced by the *SelectFeaturesColumns* job above to the *data.filename* parameter.\n 2. Set the *gene.accession* parameter to *M92439_at*.\n3. Click the *Run* button.",
"_____no_output_____"
],
[
"## Step 3: Viewing Coexpressed Genes\n\n1. Look for the *GCM_Normals.markerdata.gct* file produced by the GeneNeighbors job above. \n2. Click it and look for *Send to New GenePattern Cell* in the menu, then select *HeatMapViewer*.\n3. Move the new *HeatMapViewer* cell below these instructions.\n4. Click the *Run* button.",
"_____no_output_____"
],
[
"## Step 4: Collapse the Expression File\n\n1. Insert an analysis cell for the *CollapseDataset* module and move it below this set of instructions.\n2. Set the following parameters:\n 1. Send the *GCM_Normals.markerdata.gct* file produced by the *GeneNeighbors* job above to the *dataset.file* parameter.\n 2. Drag-and-drop *[HU6800.chip](https://software.broadinstitute.org/cancer/software/genepattern/data/gcm/HU6800.chip)* to the *chip.platform* parameter.\n3. Click the *Run* button.",
"_____no_output_____"
],
[
"## Step 5: Converting an Affy Expression File to a List of Genes\n\n1. Look for the *GCM_Normals.markerdata.collapsed.gct* file produced by the *CollapseDataset* job above. \n2. Click it and look for *Send to New GenePattern Cell* in the menu, then select *ExtractRowNames*.\n3. Move the new *ExtractRowNames* cell below these instructions.\n4. Click the *Run* button.\n5. View the resulting gene list by clicking *GCM_Normals.markerdata.collapsed.row.names.txt* and selecting *Open in New Tab*.\n",
"_____no_output_____"
],
[
"## Find pathways associated with gene list\nThe following code will search the [mygene.info](http://mygene.info) gene database service and query each result gene to determine which Reactome pathways are associated with it.\n\n<div class=\"alert alert-info\">\n<p>Executing the cells below will read in a list of genes, similar to the list created earlier in the main Samples and Features exercise. Each gene in this list will then be sent to [mygene.info](http://mygene.info), a gene database service.</p>\n</div>\n\n<div class=\"alert alert-info\">\n- Click on the i icon next to the `GCM_Normals.markerdata.collapsed.row.names.txt` file in the last step\n- Select \"Send to Code\"\n- Select and copy the reference to the output file, for example `job1306740.get_output_files()[1]` (do NOT include the \"this file = \" part)\n- Paste the result into the code below to replace **INSERT PASTED CODE HERE**\n- The resulting line should look like `gene_list_filename = job1306740.get_output_files()[1]`\n- Execute the cell below",
"_____no_output_____"
]
],
[
[
"gene_list_filename = **INSERT PASTED CODE HERE**\ngene_list_file = gene_list_filename.open()\ngene_list = gene_list_file.readlines()",
"_____no_output_____"
],
[
"import requests\nimport json\n\nfor gene in gene_list:\n gene = gene.decode(\"utf-8\")\n if \" \" in gene:\n gene=gene[0:gene.find(\" \")]\n gene_results = requests.get(\"https://mygene.info/v2/query?q=\"+gene+\"&fields=pathway.reactome\").content\n gene_results_json = json.loads(gene_results)\n print(gene)\n pathways = list()\n for h in range(len(gene_results_json[\"hits\"])):\n for k in gene_results_json[\"hits\"][h].keys():\n if u'pathway' == k:\n for i in range(len(gene_results_json[\"hits\"][h][\"pathway\"][\"reactome\"])):\n pathways.append(gene_results_json[\"hits\"][h][\"pathway\"][\"reactome\"][i][\"name\"])\n if (len(pathways) == 0):\n print(\"\\tNo pathways found\")\n else:\n for p in sorted(set(pathways)):\n print(\"\\t\" + p)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
4abf975f8962836b9629c36e24969d01a7bb6b7f
| 12,939 |
ipynb
|
Jupyter Notebook
|
Docs/Prashanth/.ipynb_checkpoints/Import-checkpoint.ipynb
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | null | null | null |
Docs/Prashanth/.ipynb_checkpoints/Import-checkpoint.ipynb
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | null | null | null |
Docs/Prashanth/.ipynb_checkpoints/Import-checkpoint.ipynb
|
infapy/infapy
|
0cb11310130be70ce1b647aa5ede929c1eb9b2ce
|
[
"Apache-2.0"
] | 1 |
2021-09-23T10:31:56.000Z
|
2021-09-23T10:31:56.000Z
| 36.447887 | 2,000 | 0.570755 |
[
[
[
"# Import",
"_____no_output_____"
],
[
"Use this resource with the export resource to migrate objects from one organization to another.",
"_____no_output_____"
],
[
"# Prerequisite - Need to get the source and target object ids",
"_____no_output_____"
],
[
"When we are performing an import operation, we need to map the source connection with the target connection, similartly map the source runtime environment with the target runtime env. As a first step, we need to get the object ids of the source dependant objects and the target dependant objects.",
"_____no_output_____"
]
],
[
[
"# First fetching source depenant object ids\nimport infapy\n\ninfapy.setFileLogger(name=\"DEV Logger\",level=\"DEBUG\")\ndevInfaHandler = infapy.connect(profile=\"DEV\")\nv3 = devInfaHandler.v3()\n\n# Get the connection object id\nlookupObj = v3.lookup(path=\"__ff\",objectType=\"connection\")\nprint(lookupObj)\nsrcConnectionID = lookupObj[\"objects\"][0][\"id\"]\nprint(\"srcConnection ID: \" + srcConnectionID)\n\n# Get the agent group object details\nlookupObj = v3.lookup(path=\"prashanth-sbx\",objectType=\"agentgroup\")\nprint(lookupObj)\nsrcRuntimeID = lookupObj[\"objects\"][0][\"id\"]\nprint(\"srcRuntime ID: \" + srcRuntimeID)\n",
"{'objects': [{'id': '848Au1yuOzAcdxJMgPkdqy', 'path': '__ff', 'type': 'Connection', 'description': None, 'updatedBy': 'prashanth-p', 'updateTime': '2021-08-12T14:40:54Z'}]}\nsrcConnection ID: 848Au1yuOzAcdxJMgPkdqy\n{'objects': [{'id': '95OeUg6sjYVhH6zxQUB76k', 'path': 'prashanth-sbx', 'type': 'AgentGroup', 'description': None, 'updatedBy': 'prashanth-p', 'updateTime': '2021-02-15T07:50:30Z'}]}\nsrcRuntime ID: 95OeUg6sjYVhH6zxQUB76k\n"
],
[
"# First fetching target depenant object ids\nimport infapy\n\ninfapy.setFileLogger(name=\"QA Logger\",level=\"DEBUG\")\nqaInfaHandler = infapy.connect(profile=\"QA\")\nv3 = qaInfaHandler.v3()\n\n# Get the connection object id\nlookupObj = v3.lookup(path=\"FF\",objectType=\"connection\")\nprint(lookupObj)\ntgtConnectionID = lookupObj[\"objects\"][0][\"id\"]\nprint(\"tgtConnection ID: \" + tgtConnectionID)\n\n# Get the agent group object details\nlookupObj = v3.lookup(path=\"prashanth-redhat-sbx\",objectType=\"agentgroup\")\nprint(lookupObj)\ntgtRuntimeID = lookupObj[\"objects\"][0][\"id\"]\nprint(\"tgtRuntime ID: \" + tgtRuntimeID)",
"{'objects': [{'id': '9YGTW8zLVaAb6O15bcjbyk', 'path': 'FF', 'type': 'Connection', 'description': None, 'updatedBy': 'prash1234', 'updateTime': '2021-09-25T07:12:53Z'}]}\ntgtConnection ID: 9YGTW8zLVaAb6O15bcjbyk\n{'objects': [{'id': 'iwvniZZPdG6cltC3Uzcf2i', 'path': 'prashanth-redhat-sbx', 'type': 'AgentGroup', 'description': None, 'updatedBy': 'prash1234', 'updateTime': '2021-09-25T07:03:24Z'}]}\ntgtRuntime ID: iwvniZZPdG6cltC3Uzcf2i\n"
]
],
[
[
"## Function: uploadZipToGetJobID()",
"_____no_output_____"
],
[
"> Use this function to import the zip file to fetch the job id\n> This function initiates the process \n>\n> Args:\n> filePath (str, optional): Defaults to os.getcwd().\n> fileName (str, optional): Defaults to \"infapyExportDownloaded.zip\".\n>\n> Raises:\n> InvalidArgumentsError: if invalid arguments are provided\n>\n> Returns:\n> json: response after the upload zip has been initiated",
"_____no_output_____"
]
],
[
[
"v3 = qaInfaHandler.v3()\nimportObj = v3.importObject()\n\nresponse = importObj.uploadZipToGetJobID()\nprint(response)\nprint()\n\nimportJobID = response[\"jobId\"]\nprint(\"Import Job ID: \" + importJobID)",
"{'jobId': '8bSlx81r2GSlX417G9XNsq', 'jobStatus': {'state': 'NOT_STARTED', 'message': None}, 'checksumValid': True}\n\nImport Job ID: 8bSlx81r2GSlX417G9XNsq\n"
]
],
[
[
"## Function: startImportByJobID()",
"_____no_output_____"
],
[
"> This function initiates the job once the\n> zip is uploaded\n>\n> Args:\n> jobID (str): From response of uploadZipToGetJobID\n> importBody (dict): Read the docs for understanding the import body\n>\n> Raises:\n> InvalidArgumentsError: if invalid body sent\n>\n> Returns:\n> json: import job success response",
"_____no_output_____"
]
],
[
[
"jsonObject = {\n \"name\" : \"ImportNameFromScript\",\n \"importSpecification\" : {\n \"defaultConflictResolution\" : \"OVERWRITE\",\n \"objectSpecification\" : [\n {\n \"sourceObjectId\" : \"848Au1yuOzAcdxJMgPkdqy\",\n \"targetObjectId\" : \"9YGTW8zLVaAb6O15bcjbyk\"\n },\n {\n \"sourceObjectId\" : \"95OeUg6sjYVhH6zxQUB76k\",\n \"targetObjectId\" : \"iwvniZZPdG6cltC3Uzcf2i\"\n }]\n }\n}\n\n# using importObj created above\nresponse = importObj.startImportByJobID(jobID=\"8bSlx81r2GSlX417G9XNsq\",importBody=jsonObject)\nprint(response)",
"{'id': '8bSlx81r2GSlX417G9XNsq', 'createTime': '2021-09-26T11:08:30.000Z', 'updateTime': '2021-09-26T11:09:46.183Z', 'name': 'ImportNameFromScript', 'startTime': '2021-09-26T11:09:46.079Z', 'endTime': None, 'status': {'state': 'IN_PROGRESS', 'message': 'In Progress'}, 'objects': None, 'sourceOrgId': 'fg1dzqDZ1K3lbHp8uq5vQB', 'checksumValid': True}\n"
]
],
[
[
"## Function: getStatusOfImportByImportID()",
"_____no_output_____"
],
[
"> use this method to get the status of the import\n> if it is a success or a failure\n>\n> Args:\n> importID (importID): provide the import id you recieved\n> from uploadZipToGetJobID Method used before this\n>\n> Returns:\n> json: import operation status",
"_____no_output_____"
]
],
[
[
"response = importObj.getStatusOfImportByImportID(importID=\"8bSlx81r2GSlX417G9XNsq\")\nprint(response)",
"{'id': '8bSlx81r2GSlX417G9XNsq', 'createTime': '2021-09-26T11:08:30.000Z', 'updateTime': '2021-09-26T11:09:49.000Z', 'name': 'ImportNameFromScript', 'startTime': '2021-09-26T11:09:46.000Z', 'endTime': '2021-09-26T11:09:49.000Z', 'status': {'state': 'SUCCESSFUL', 'message': 'Import completed successfully'}, 'objects': [{'sourceObject': {'id': '1QcjcQ9hXwwcqW1P93354V', 'name': 'mt_infapy_test', 'path': '/infapy', 'type': 'MTT', 'description': ''}, 'targetObject': {'id': None, 'name': 'mt_infapy_test', 'path': '/infapy', 'type': 'MTT', 'description': None, 'status': None}, 'status': {'state': 'SUCCESSFUL', 'message': 'Create object'}}, {'sourceObject': {'id': '3vVj4xdOpKsgAqwRSyhQM3', 'name': 'm_infapy_test', 'path': '/infapy', 'type': 'DTEMPLATE', 'description': ''}, 'targetObject': {'id': None, 'name': 'm_infapy_test', 'path': '/infapy', 'type': 'DTEMPLATE', 'description': None, 'status': None}, 'status': {'state': 'SUCCESSFUL', 'message': 'Create object'}}, {'sourceObject': {'id': '848Au1yuOzAcdxJMgPkdqy', 'name': '__ff', 'path': None, 'type': 'Connection', 'description': None}, 'targetObject': {'id': None, 'name': 'FF', 'path': None, 'type': 'Connection', 'description': None, 'status': None}, 'status': {'state': 'SUCCESSFUL', 'message': 'Reuse existing object'}}, {'sourceObject': {'id': '95OeUg6sjYVhH6zxQUB76k', 'name': 'prashanth-sbx', 'path': None, 'type': 'AgentGroup', 'description': None}, 'targetObject': {'id': None, 'name': 'prashanth-redhat-sbx', 'path': None, 'type': 'AgentGroup', 'description': None, 'status': None}, 'status': {'state': 'SUCCESSFUL', 'message': 'Reuse existing object'}}, {'sourceObject': {'id': 'aeOwF2U4Uauf5fdiFwaLCz', 'name': 'infapy', 'path': '/', 'type': 'Project', 'description': ''}, 'targetObject': {'id': None, 'name': 'infapy', 'path': '/', 'type': 'Project', 'description': None, 'status': None}, 'status': {'state': 'SUCCESSFUL', 'message': 'Create object'}}], 'sourceOrgId': 'fg1dzqDZ1K3lbHp8uq5vQB', 'checksumValid': True}\n"
]
],
[
[
"## Function: getImportLogsByImportID()",
"_____no_output_____"
],
[
"> use this method to get the import\n> logs\n>\n> Args:\n> importID (importID): provide the import id you recieved\n> from uploadZipToGetJobID Method used before this\n>\n> Returns:\n> string text: import logs in text",
"_____no_output_____"
]
],
[
[
"response = importObj.getImportLogsByImportID(importID=\"8bSlx81r2GSlX417G9XNsq\")\nprint(response)",
"> OIE_002 INFO 2021-09-26T11:09:46.194Z Starting import operation.\nExecution Client: API\nJob Name: ImportNameFromScript\nOrganization: Informatica\nRequestId: 8bSlx81r2GSlX417G9XNsq\nUser: prash1234\n> OIE_006 INFO 2021-09-26T11:09:46.499Z Successfully imported object [/Explore/infapy] of type [Project] id [aeOwF2U4Uauf5fdiFwaLCz] to [/Explore/infapy]\n> OIE_006 INFO 2021-09-26T11:09:47.473Z Successfully imported object [/Explore/infapy/m_infapy_test] of type [DTEMPLATE] id [3vVj4xdOpKsgAqwRSyhQM3] to [/Explore/infapy/m_infapy_test]\n> OIE_006 INFO 2021-09-26T11:09:48.569Z Successfully imported object [/Explore/infapy/mt_infapy_test] of type [MTT] id [1QcjcQ9hXwwcqW1P93354V] to [/Explore/infapy/mt_infapy_test]\n> OIE_003 INFO 2021-09-26T11:09:48.569Z Finished import operation.\nJob Name: ImportNameFromScript\nStart Time: 9/26/21 11:09 AM\nEnd Time: 9/26/21 11:09 AM\nStarted by: prash1234\nStart Method: API\nSource Organization: GCS IICS\nStatus: SUCCESSFUL\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4abf9f9d9f7271851672a4a209d179bfe31a3d2a
| 20,387 |
ipynb
|
Jupyter Notebook
|
aata/boolean-sage.ipynb
|
johnperry-math/cocalc-examples
|
394479e972dc2b74211113bbb43bc1ec4ec9978c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 13 |
2017-09-06T23:04:59.000Z
|
2021-04-05T11:08:51.000Z
|
aata/boolean-sage.ipynb
|
johnperry-math/cocalc-examples
|
394479e972dc2b74211113bbb43bc1ec4ec9978c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 9 |
2018-02-01T15:58:28.000Z
|
2021-07-14T15:18:35.000Z
|
aata/boolean-sage.ipynb
|
johnperry-math/cocalc-examples
|
394479e972dc2b74211113bbb43bc1ec4ec9978c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 10 |
2017-10-26T17:30:03.000Z
|
2021-12-11T07:25:28.000Z
| 261.371795 | 1,003 | 0.67651 |
[
[
[
"%%html\n<link href=\"http://mathbook.pugetsound.edu/beta/mathbook-content.css\" rel=\"stylesheet\" type=\"text/css\" />\n<link href=\"https://aimath.org/mathbook/mathbook-add-on.css\" rel=\"stylesheet\" type=\"text/css\" />\n<style>.subtitle {font-size:medium; display:block}</style>\n<link href=\"https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic\" rel=\"stylesheet\" type=\"text/css\" />\n<link href=\"https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext\" rel=\"stylesheet\" type=\"text/css\" /><!-- Hide this cell. -->\n<script>\nvar cell = $(\".container .cell\").eq(0), ia = cell.find(\".input_area\")\nif (cell.find(\".toggle-button\").length == 0) {\nia.after(\n $('<button class=\"toggle-button\">Toggle hidden code</button>').click(\n function (){ ia.toggle() }\n )\n )\nia.hide()\n}\n</script>\n",
"_____no_output_____"
]
],
[
[
"**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the \"Run\" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.",
"_____no_output_____"
],
[
"$\\newcommand{\\identity}{\\mathrm{id}}\n\\newcommand{\\notdivide}{\\nmid}\n\\newcommand{\\notsubset}{\\not\\subset}\n\\newcommand{\\lcm}{\\operatorname{lcm}}\n\\newcommand{\\gf}{\\operatorname{GF}}\n\\newcommand{\\inn}{\\operatorname{Inn}}\n\\newcommand{\\aut}{\\operatorname{Aut}}\n\\newcommand{\\Hom}{\\operatorname{Hom}}\n\\newcommand{\\cis}{\\operatorname{cis}}\n\\newcommand{\\chr}{\\operatorname{char}}\n\\newcommand{\\Null}{\\operatorname{Null}}\n\\newcommand{\\lt}{<}\n\\newcommand{\\gt}{>}\n\\newcommand{\\amp}{&}\n$",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><h2 class=\"heading hide-type\" alt=\"Section 19.7 Sage\"><span class=\"type\">Section</span><span class=\"codenumber\">19.7</span><span class=\"title\">Sage</span></h2><a href=\"boolean-sage.ipynb\" class=\"permalink\">¶</a></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3037\">Sage has support for both partially ordered sets (“posets”) and lattices, and does an excellent job of providing visual depictions of both.</p></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><h3 class=\"heading hide-type\" alt=\"Subsection Creating Partially Ordered Sets\"><span class=\"type\">Subsection</span><span class=\"codenumber\" /><span class=\"title\">Creating Partially Ordered Sets</span></h3></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3038\">Example <a href=\"section-boolean-lattices.ipynb#example-boolean-poset-divisors-24\" class=\"xref\" alt=\"Example 19.6 \" title=\"Example 19.6 \">19.6</a> in the text is a good example to replicate as a demonstration of Sage commands. We first define the elements of the set $X\\text{.}$</p></div>",
"_____no_output_____"
]
],
[
[
"X = (24).divisors()\nX",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3039\">One approach to creating the relation is to specify <em class=\"emphasis\">every</em> instance where one element is comparable to the another. So we build a list of pairs, where each pair contains comparable elements, with the lesser one first. This is the set of relations.</p></div>",
"_____no_output_____"
]
],
[
[
"R = [(a,b) for a in X for b in X if a.divides(b)]; R",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3040\">We construct the poset by giving the the <code class=\"code-inline tex2jax_ignore\">Poset</code> constructor a list containing the elements and the relations. We can then easily get a “plot” of the poset. Notice the plot just shows the “cover relations” — a minimal set of comparisons which the assumption of transitivity would expand into the set of all the relations.</p></div>",
"_____no_output_____"
]
],
[
[
"D = Poset([X, R])\nD.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3041\">Another approach to creating a <code class=\"code-inline tex2jax_ignore\">Poset</code> is to let the poset constructor run over all the pairs of elements, and all we do is give the constructor a way to test if two elements are comparable. Our comparison function should expect two elements and then return <code class=\"code-inline tex2jax_ignore\">True</code> or <code class=\"code-inline tex2jax_ignore\">False</code>. A “lambda” function is one way to quickly build such a function. This may be a new idea for you, but mastering lambda functions can be a great convenience. Notice that “lambda” is a word reserved for just this purpose (so, for example, <code class=\"code-inline tex2jax_ignore\">lambda</code> is a bad choice for the name of an eigenvalue of a matrix). There are other ways to make functions in Sage, but a lambda function is quickest when the function is simple.</p></div>",
"_____no_output_____"
]
],
[
[
"divisible = lambda x, y: x.divides(y)\nL = Poset([X, divisible])\nL == D",
"_____no_output_____"
],
[
"L.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3042\">Sage also has a collection of stock posets. Some are one-shot constructions, while others are members of parameterized families. Use tab-completion on <code class=\"code-inline tex2jax_ignore\">Posets.</code> to see the full list. Here are some examples.</p></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3043\">A one-shot construction. Perhaps what you would expect, though there might be other, equally plausible, alternatives.</p></div>",
"_____no_output_____"
]
],
[
[
"Q = Posets.PentagonPoset()\nQ.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3044\">A parameterized family. This is the classic example where the elements are subsets of a set with $n$ elements and the relation is “subset of.”</p></div>",
"_____no_output_____"
]
],
[
[
"S = Posets.BooleanLattice(4)\nS.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3045\">And random posets. These can be useful for testing and experimenting, but are unlikely to exhibit special cases that may be important. You might run the following command many times and vary the second argument, which is a rough upper bound on the probability any two elements are comparable. Remember that the plot only shows the cover relations. The more elements that are comparable, the more “vertically stretched” the plot will be.</p></div>",
"_____no_output_____"
]
],
[
[
"T = Posets.RandomPoset(20,0.05)\nT.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><h3 class=\"heading hide-type\" alt=\"Subsection Properties of a Poset\"><span class=\"type\">Subsection</span><span class=\"codenumber\" /><span class=\"title\">Properties of a Poset</span></h3></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3046\">Once you have a poset, what can you do with it? Let's return to our first example, <code class=\"code-inline tex2jax_ignore\">D</code>. We can of course determine if one element is less than another, which is the fundamental structure of a poset.</p></div>",
"_____no_output_____"
]
],
[
[
"D.is_lequal(4, 8)",
"_____no_output_____"
],
[
"D.is_lequal(4, 4)",
"_____no_output_____"
],
[
"D.is_less_than(4, 8)",
"_____no_output_____"
],
[
"D.is_less_than(4, 4)",
"_____no_output_____"
],
[
"D.is_lequal(6, 8)",
"_____no_output_____"
],
[
"D.is_lequal(8, 6)",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3047\">Notice that <code class=\"code-inline tex2jax_ignore\">6</code> and <code class=\"code-inline tex2jax_ignore\">8</code> are not comparable in this poset (it is a <em class=\"emphasis\">partial</em> order). The methods <code class=\"code-inline tex2jax_ignore\">.is_gequal()</code> and <code class=\"code-inline tex2jax_ignore\">.is_greater_than()</code> work similarly, but returns <code class=\"code-inline tex2jax_ignore\">True</code> if the first element is greater (or equal).</p></div>",
"_____no_output_____"
]
],
[
[
"D.is_gequal(8, 4)",
"_____no_output_____"
],
[
"D.is_greater_than(4, 8)",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3048\">We can find the largest and smallest elements of a poset. This is a random poset built with a 10%probability, but copied here to be repeatable.</p></div>",
"_____no_output_____"
]
],
[
[
"X = range(20)\nC = [[18, 7], [9, 11], [9, 10], [11, 8], [6, 10],\n [10, 2], [0, 2], [2, 1], [1, 8], [8, 12],\n [8, 3], [3, 15], [15, 7], [7, 16], [7, 4],\n [16, 17], [16, 13], [4, 19], [4, 14], [14, 5]]\nP = Poset([X, C])\nP.plot()",
"_____no_output_____"
],
[
"P.minimal_elements()",
"_____no_output_____"
],
[
"P.maximal_elements()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3049\">Elements of a poset can be partioned into level sets. In plots of posets, elements at the same level are plotted vertically at the same height. Each level set is obtained by removing all of the previous level sets and then taking the minimal elements of the result.</p></div>",
"_____no_output_____"
]
],
[
[
"P.level_sets()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3050\">If we make two elements in <code class=\"code-inline tex2jax_ignore\">R</code> comparable when they had not previously been, this is an extension of <code class=\"code-inline tex2jax_ignore\">R</code>. Consider all possible extensions of one poset — we can make a poset from all of these, where set inclusion is the relation. A linear extension is a maximal element in this poset of posets. Informally, we are adding as many new relations as possible, consistent with the original poset and so that the result is a total order. In other words, there is an ordering of the elements that is consistent with the order in the poset. We can build such a thing, but the output is just a list of the elements in the linear order. A computer scientist would be inclined to call this a “topological sort.”</p></div>",
"_____no_output_____"
]
],
[
[
"linear = P.linear_extension(); linear",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3051\">We can construct subposets by giving a set of elements to induce the new poset. Here we take roughly the “bottom half” of the random poset <code class=\"code-inline tex2jax_ignore\">P</code> by inducing the subposet on a union of some of the level sets.</p></div>",
"_____no_output_____"
]
],
[
[
"level = P.level_sets()\nbottomhalf = sum([level[i] for i in range(5)], [])\nB = P.subposet(bottomhalf)\nB.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3052\">The dual of a poset retains the same set of elements, but reverses any comparisons.</p></div>",
"_____no_output_____"
]
],
[
[
"Pdual = P.dual()\nPdual.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3053\">Taking the dual of the divisibility poset from Example <a href=\"section-boolean-lattices.ipynb#example-boolean-poset-divisors-24\" class=\"xref\" alt=\"Example 19.6 \" title=\"Example 19.6 \">19.6</a> would be like changing the relation to “is a multiple of.”</p></div>",
"_____no_output_____"
]
],
[
[
"Ddual = D.dual()\nDdual.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><h3 class=\"heading hide-type\" alt=\"Subsection Lattices\"><span class=\"type\">Subsection</span><span class=\"codenumber\" /><span class=\"title\">Lattices</span></h3></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3054\">Every lattice is a poset, so all the commands above will perform equally well for a lattice. But how do you create a lattice? Simple — first create a poset and then feed it into the <code class=\"code-inline tex2jax_ignore\">LatticePoset()</code> constructor. But realize that just because you give this constructor a poset, it does not mean a lattice will always come back out. Only if the poset is <em class=\"emphasis\">already</em> a lattice will it get upgraded from a poset to a lattice for Sage's purposes, and you will get a <code class=\"code-inline tex2jax_ignore\">ValueError</code> if the upgrade is not possible. Finally, notice that some of the posets Sage constructs are already recognized as lattices, such as the prototypical <code class=\"code-inline tex2jax_ignore\">BooleanLattice</code>.</p></div>",
"_____no_output_____"
]
],
[
[
"P = Posets.AntichainPoset(8)\nP.is_lattice()",
"_____no_output_____"
],
[
"LatticePoset(P)",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3055\">An integer composition of $n$ is a list of positive integers that sum to $n\\text{.}$ A composition $C_1$ covers a composition $C_2$ if $C_2$ can be formed from $C_1$ by adding consecutive parts. For example, $C_1 = [2, 1, 2] \\succeq [3, 2] = C_2\\text{.}$ With this relation, the set of all integer compositions of a fixed integer $n$ is a poset that is also a lattice.</p></div>",
"_____no_output_____"
]
],
[
[
"CP = Posets.IntegerCompositions(5)\nC = LatticePoset(CP)\nC.plot()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3056\">A meet or a join is a fundamental operation in a lattice.</p></div>",
"_____no_output_____"
]
],
[
[
"par = C.an_element().parent()\na = par([1, 1, 1, 2])\nb = par([2, 1, 1, 1])\na, b",
"_____no_output_____"
],
[
"C.meet(a, b)",
"_____no_output_____"
],
[
"c = par([1, 4])\nd = par([2, 3])\nc, d",
"_____no_output_____"
],
[
"C.join(c, d)",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3057\">Once a poset is upgraded to lattice status, then additional commands become available, or the character of their results changes.</p></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3058\">An example of the former is the <code class=\"code-inline tex2jax_ignore\">.is_distributive()</code> method.</p></div>",
"_____no_output_____"
]
],
[
[
"C.is_distributive()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3059\">An example of the latter is the <code class=\"code-inline tex2jax_ignore\">.top()</code> method. What your text calls a largest element and a smallest element of a lattice, Sage calls a top and a bottom. For a poset, <code class=\"code-inline tex2jax_ignore\">.top()</code> and <code class=\"code-inline tex2jax_ignore\">.bottom()</code> may return an element or may not (returning <code class=\"code-inline tex2jax_ignore\">None</code>), but for a lattice it is guaranteed to return exactly one element.</p></div>",
"_____no_output_____"
]
],
[
[
"C.top()",
"_____no_output_____"
],
[
"C.bottom()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3060\">Notice that the returned values are all elements of the lattice, in this case ordered lists of integers summing to $5\\text{.}$</p></div>",
"_____no_output_____"
],
[
"<div class=\"mathbook-content\"><p id=\"p-3061\">Complements now make sense in a lattice. The result of the <code class=\"code-inline tex2jax_ignore\">.complements()</code> method is a dictionary that uses elements of the lattice as the keys. We say the dictionary is “indexed” by the elements of the lattice. The result is a list of the complements of the element. We call this the “value” of the key-value pair. (You may know dictionaries as “associative arrays”, but they are really just fancy functions.)</p></div>",
"_____no_output_____"
]
],
[
[
"comp = C.complements()\ncomp[par([1, 1, 1, 2])]",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3062\">The lattice of integer compositions is a complemented lattice, as we can see by the result that each element has a single (unique) complement, evidenced by the lists of length $1$ in the values of the dictionary. Or we can just ask Sage via <code class=\"code-inline tex2jax_ignore\">.is_complemented()</code>. Dictionaries have no inherent order, so you may get different output each time you inspect the dictionary.</p></div>",
"_____no_output_____"
]
],
[
[
"comp",
"_____no_output_____"
],
[
"[len(e[1]) for e in comp.items()]",
"_____no_output_____"
],
[
"C.is_complemented()",
"_____no_output_____"
]
],
[
[
"<div class=\"mathbook-content\"><p id=\"p-3063\">There are many more commands which apply to posets and lattices, so build a few and use tab-completion liberally to explore. There is more to discover than we can cover in just a single chapter, but you now have the basic tools to profitably study posets and lattices in Sage.</p></div>",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4abfa2d5e243e4c1593ad3b7d29aac18ca718bd3
| 135,272 |
ipynb
|
Jupyter Notebook
|
reports/C11-lsf-Deploy_2021-11-10T10:52:01.ipynb
|
LeandroSoaresFaria/Insiders
|
68e612e39105509a5e2ea7562d0b600e23d4f119
|
[
"FSFAP"
] | null | null | null |
reports/C11-lsf-Deploy_2021-11-10T10:52:01.ipynb
|
LeandroSoaresFaria/Insiders
|
68e612e39105509a5e2ea7562d0b600e23d4f119
|
[
"FSFAP"
] | null | null | null |
reports/C11-lsf-Deploy_2021-11-10T10:52:01.ipynb
|
LeandroSoaresFaria/Insiders
|
68e612e39105509a5e2ea7562d0b600e23d4f119
|
[
"FSFAP"
] | null | null | null | 34.437882 | 1,743 | 0.526583 |
[
[
[
"<span style=\"color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;\">An Exception was encountered at '<a href=\"#papermill-error-cell\">In [40]</a>'.</span>",
"_____no_output_____"
],
[
"# PA005: High Value Customer Identification",
"_____no_output_____"
],
[
"# 0.0 Imports",
"_____no_output_____"
]
],
[
[
"import os\nimport joblib\nimport s3fs\nimport pickle\nimport re\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport umap.umap_ as umap\n\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import cluster as c\nfrom sklearn import metrics as m\nfrom sklearn import ensemble as en\nfrom sklearn import preprocessing as pp\nfrom sklearn import decomposition as dd\nfrom sklearn import manifold as mn\nfrom sklearn import mixture as mx\n\nfrom plotly import express as px\nfrom scipy.cluster import hierarchy as hc\nfrom sqlalchemy import create_engine",
"_____no_output_____"
],
[
"AWS_ACCESS_KEY_ID=os.environ.get( 'AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY=os.environ.get( 'AWS_SECRET_ACCESS_KEY')",
"_____no_output_____"
]
],
[
[
"## 0.2. Load Dataset",
"_____no_output_____"
]
],
[
[
"# load data\n#'path_local' = '/home/leandro/repos/insiders_clustering/'\npath_s3 = 's3://insiders-datasett/'\ndf_raw = pd.read_csv(path_s3 + 'Ecommerce.csv' , encoding = 'iso-8859-1')\n\ndf_raw.head()",
"_____no_output_____"
],
[
"df_raw.shape",
"_____no_output_____"
]
],
[
[
"# 1.0. Descrição dos dados",
"_____no_output_____"
]
],
[
[
"df1 = df_raw.copy()",
"_____no_output_____"
],
[
"df1.head()",
"_____no_output_____"
]
],
[
[
"## 1.1. Rename Columns",
"_____no_output_____"
]
],
[
[
"# Rename Columns\ncols_new = ['invoice_no','stock_code','description','quantity','invoice_date','unit_price','customer_id','country']\n\ndf1.columns = cols_new\n\ndf1.sample()",
"_____no_output_____"
],
[
"df_raw.sample()",
"_____no_output_____"
]
],
[
[
"## 1.2. Data Dimensions",
"_____no_output_____"
]
],
[
[
"print( 'Number of rows: {}'.format ( df1.shape[0] ) )\nprint( 'Number of cols: {}'.format ( df1.shape[1] ) )\n",
"Number of rows: 541909\nNumber of cols: 8\n"
]
],
[
[
"## 1.3. Data Types",
"_____no_output_____"
]
],
[
[
"df1.dtypes",
"_____no_output_____"
]
],
[
[
"## 1.4. Check NA",
"_____no_output_____"
]
],
[
[
"df1.isna().sum()",
"_____no_output_____"
]
],
[
[
"## 1.5. Replace NA",
"_____no_output_____"
]
],
[
[
"df_missing = df1.loc[ df1['customer_id'].isna(), : ]\ndf_not_missing = df1.loc[~df1['customer_id'].isna(), : ]",
"_____no_output_____"
],
[
"# Create Reference\ndf_backup = pd.DataFrame( df_missing['invoice_no'].drop_duplicates())\ndf_backup['customer_id'] = np.arange( 19000, 19000 +len( df_backup),1)\n\n# Merge original with reference dataframe\ndf1 = pd.merge( df1, df_backup, on= 'invoice_no', how= 'left' )\n\n# Coalesce\ndf1[ 'customer_id'] = df1['customer_id_x'].combine_first( df1[ 'customer_id_y' ] )\n\n# Drop extra columns\n\ndf1 = df1.drop( columns = ['customer_id_x', 'customer_id_y'], axis = 1)\n",
"_____no_output_____"
],
[
"df1.isna().sum()",
"_____no_output_____"
]
],
[
[
"## 1.6. Change Dtypes",
"_____no_output_____"
]
],
[
[
"# Invoice Date\n\ndf1['invoice_date'] = pd.to_datetime( df1['invoice_date'], format = '%d-%b-%y')\n\n# Customer Id\n\ndf1['customer_id'] = df1['customer_id'].astype(int)\n\ndf1.head()",
"_____no_output_____"
],
[
"df1.dtypes",
"_____no_output_____"
]
],
[
[
"## 1.7. Descriptive Statistics",
"_____no_output_____"
]
],
[
[
"num_attributes = df1.select_dtypes( include = [ 'int64', 'float64'] )\ncat_attributes = df1.select_dtypes( exclude = [ 'int64', 'float64','datetime64[ns]'])",
"_____no_output_____"
]
],
[
[
"### 1.7.1 Numerical Attributes",
"_____no_output_____"
]
],
[
[
"# Central tendency - mean, median\nct1 = pd.DataFrame(num_attributes.apply( np.mean )).T\nct2 = pd.DataFrame(num_attributes.apply( np.median )).T\n\n# Dispersion - desvio padrão, mínimo, máximo, range, skew, kurtosis\nd1 = pd.DataFrame( num_attributes.apply( np.std ) ).T\nd2 = pd.DataFrame( num_attributes.apply( np.min ) ).T\nd3 = pd.DataFrame( num_attributes.apply( np.max ) ).T\nd4 = pd.DataFrame( num_attributes.apply( lambda x: x.max( ) - x.min() ) ).T\nd5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew( ) ) ).T\nd6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() ) ).T\n\n\n# Concatenate\n\nm1 = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index()\nm1.columns = ['attributes', 'min', 'max', 'range', 'mean', 'mediana', 'std', 'skew', 'kurtosis']\nm1",
"_____no_output_____"
]
],
[
[
"### 1.7.2 Categorical Attributes",
"_____no_output_____"
]
],
[
[
"cat_attributes.head()",
"_____no_output_____"
]
],
[
[
"### Invoice_No",
"_____no_output_____"
]
],
[
[
"# Problema: Temos invoice com letras e números\n\n# Identificação >\n\ndf_letter_invoices = df1.loc[df1['invoice_no'].apply( lambda x : bool( re.search( '[^0-9]+', x ) ) ), :]\n\nprint('Total number of invoices: {}'.format( len( df_letter_invoices ))) \nprint('Total number of negative quantity: {}'.format( len(df_letter_invoices[ df_letter_invoices['quantity'] < 0])))",
"Total number of invoices: 9291\nTotal number of negative quantity: 9288\n"
]
],
[
[
"### Stock Code",
"_____no_output_____"
]
],
[
[
"# Check stock codes only characters\ndf1.loc[df1['stock_code'].apply( lambda x : bool( re.search( '^[a-zA-Z]+$', x ) ) ) ,'stock_code'].unique()\n\n# Ação:\n## 1. Remove stock_code in ['POST', 'D', 'M', 'PADS', 'DOT', 'CRUK']\n",
"_____no_output_____"
]
],
[
[
"### Description",
"_____no_output_____"
]
],
[
[
"df1.head()\n\n# Ação: Delete Description",
"_____no_output_____"
]
],
[
[
"### Country",
"_____no_output_____"
]
],
[
[
"df1['country'].unique()",
"_____no_output_____"
],
[
"df1['country'].value_counts( normalize = True).head()",
"_____no_output_____"
],
[
"df1[['customer_id','country']].drop_duplicates().groupby( 'country').count().reset_index().sort_values( 'customer_id', ascending = False).head()",
"_____no_output_____"
]
],
[
[
"# 2.0. Filtragem de Variáveis",
"_____no_output_____"
]
],
[
[
"df2 = df1.copy()",
"_____no_output_____"
],
[
"df2.dtypes",
"_____no_output_____"
],
[
" # === Numerical attributes ====\ndf2 = df2.loc[df2['unit_price'] >= 0.04, :]\n\n# === Categorical attributes ====\ndf2 = df2[~df2['stock_code'].isin( ['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY',\n 'DCGSSGIRL', 'PADS', 'B', 'CRUK'], )]\n\n# description\ndf2 = df2.drop( columns='description', axis=1 )\n\n# map \ndf2 = df2[~df2['country'].isin( ['European Community', 'Unspecified' ] ) ]\n\n\n# bad users\ndf2 = df2[~df2['customer_id'].isin( [16446] )]\n\n# quantity\ndf2_returns = df2.loc[df1['quantity'] < 0, :]\ndf2_purchases = df2.loc[df1['quantity'] >= 0, :]\n\n\n\n",
"_____no_output_____"
]
],
[
[
"# 3.0. Feature Engineering",
"_____no_output_____"
]
],
[
[
"df3 = df2.copy()",
"_____no_output_____"
]
],
[
[
"## 3.1. Feature Creation",
"_____no_output_____"
]
],
[
[
"# Data Reference\n\ndf_ref = df3.drop( ['invoice_no', 'stock_code', 'quantity', 'invoice_date', 'unit_price', 'country'], axis=1 ).drop_duplicates( ignore_index=True )",
"_____no_output_____"
]
],
[
[
"### 3.1.1 Gross Revenue",
"_____no_output_____"
]
],
[
[
"# Gross Revenue ( Faturamento ) quantity * price\ndf2_purchases.loc[:, 'gross_revenue'] = df2_purchases.loc[:, 'quantity'] * df2_purchases.loc[:, 'unit_price']\n\n# Monetary\ndf_monetary = df2_purchases.loc[:, ['customer_id', 'gross_revenue']].groupby( 'customer_id' ).sum().reset_index()\ndf_ref = pd.merge( df_ref, df_monetary, on='customer_id', how='left' )\n\ndf_ref.isna().sum()",
"/home/leandro/.pyenv/versions/pa005insiderscluestering/lib/python3.8/site-packages/pandas/core/indexing.py:1667: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.obj[key] = value\n"
]
],
[
[
"### 3.1.2 Recency - Day from last purchase",
"_____no_output_____"
]
],
[
[
" # Recency - Last day purchase\ndf_recency = df2_purchases.loc[:, ['customer_id', 'invoice_date']].groupby( 'customer_id' ).max().reset_index()\ndf_recency['recency_days'] = ( df2['invoice_date'].max() - df_recency['invoice_date'] ).dt.days\ndf_recency = df_recency[['customer_id', 'recency_days']].copy()\ndf_ref = pd.merge( df_ref, df_recency, on='customer_id', how='left' )\n\ndf_ref.isna().sum()",
"_____no_output_____"
]
],
[
[
"### 3.1.4.1 Quantity of products purchased",
"_____no_output_____"
]
],
[
[
" # Numero de produtos\ndf_freq = (df2_purchases.loc[:, ['customer_id', 'stock_code']].groupby( 'customer_id' ).count()\n .reset_index()\n .rename( columns={'stock_code': 'qtde_products'} ) )\ndf_ref = pd.merge( df_ref, df_freq, on='customer_id', how='left' )\ndf_ref.isna().sum()",
"_____no_output_____"
]
],
[
[
"### 3.1.7 Number of returns",
"_____no_output_____"
]
],
[
[
"# Number of Returns\ndf_returns = df2_returns [[ 'customer_id', 'quantity']].groupby( 'customer_id').sum().reset_index().rename( columns ={'quantity': 'qtde_returns'} )\ndf_returns['qtde_returns'] = df_returns['qtde_returns'] * -1\n\ndf_ref = pd.merge( df_ref, df_returns, how = 'left', on= 'customer_id')\ndf_ref.loc[ df_ref['qtde_returns'].isna(), 'qtde_returns'] = 0\n\n\ndf_ref.isna().sum()",
"_____no_output_____"
],
[
"# Number of Returns\ndf2_returns [[ 'customer_id', 'quantity']].groupby( 'customer_id').sum().reset_index().rename( columns ={'quantity': 'qtde_returns'} )\n ",
"_____no_output_____"
]
],
[
[
"### 3.1.10 Frequency Purchase",
"_____no_output_____"
]
],
[
[
"df_aux = (df2_purchases[['customer_id', 'invoice_no', 'invoice_date']].drop_duplicates()\n .groupby( 'customer_id')\n .agg( max_ = ( 'invoice_date', 'max' ),\n min_ = ( 'invoice_date', 'min'),\n days_ = ('invoice_date', lambda x : ( ( x.max()- x.min() ).days) + 1 ) ,\n buy_ = ( 'invoice_no', 'count') ) ).reset_index()\n\n\n# Frequency\ndf_aux['frequency'] = df_aux[['buy_', 'days_']].apply ( lambda x: x['buy_'] / x['days_'] if x['days_'] != 0 else 0, axis = 1)\n\n# Merge\ndf_ref = pd.merge( df_ref, df_aux[['customer_id', 'frequency']], on = 'customer_id', how = 'left')\n\ndf_ref.isna().sum()",
"_____no_output_____"
],
[
"df_ref.head()",
"_____no_output_____"
]
],
[
[
"# 4.0. Exploratory Data Analysis",
"_____no_output_____"
]
],
[
[
"df4 = df_ref.dropna()\n",
"_____no_output_____"
]
],
[
[
"## 4.3 Estudo do Espaço",
"_____no_output_____"
]
],
[
[
"# Selected dataset\ncols_selected = ['customer_id', 'gross_revenue', 'recency_days', 'qtde_products', 'frequency', 'qtde_returns']\ndf43 = df4[cols_selected].drop( columns = 'customer_id', axis = 1)",
"_____no_output_____"
]
],
[
[
"<span id=\"papermill-error-cell\" style=\"color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;\">Execution using papermill encountered an exception here and stopped:</span>",
"_____no_output_____"
]
],
[
[
"mm = pp.MinMaxScaler()\nfs = s3fs.S3FileSystem( anon=False, key=AWS_ACCESS_KEY_ID , secret=AWS_SECRET_ACCESS_KEY )\n\ngross_revenue_scaler = pickle.load( fs.open( 's3://insiders-datasett/gross_revenue_scaler.pkl', 'rb') )\ndf43['gross_revenue'] = gross_revenue_scaler.transform( df43[['gross_revenue']] )\n\nrecency_days_scaler = pickle.load( fs.open( 's3://insiders-datasett/recency_days_scaler.pkl', 'rb' ) )\ndf43['recency_days'] = recency_days_scaler.transform( df43[['recency_days']] )\n\nqtde_products_scaler = pickle.load( fs.open( 's3://insiders-datasett/qtde_products_scaler.pkl', 'rb' ) )\ndf43['qtde_products'] = qtde_products_scaler.transform( df43[['qtde_products']])\n\nfrequency_scaler = pickle.load( fs.open( 's3://insiders-datasett/frequency_scaler.pkl', 'rb' ) )\ndf43['frequency'] = frequency_scaler.transform( df43[['frequency']])\n\nqtde_returns_scaler = pickle.load( fs.open( 's3://insiders-datasett/qtde_returns_scaler.pkl', 'rb' ) )\ndf43['qtde_returns'] = qtde_returns_scaler.transform( df43[['qtde_returns']])\n\nX = df43.copy()",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
]
],
[
[
"### 4.3.4 Tree-Based embedding",
"_____no_output_____"
]
],
[
[
"# Training dataset\n\nX = df43.drop( columns = [ 'gross_revenue'], axis = 1 )\ny = df43['gross_revenue']\n\n# # Model definittion\n# rf_model = en.RandomForestRegressor ( n_estimators = 100, random_state= 42)\n\n# # Model trainning\n# rf_model.fit( X,y)\n\n# Carregando modelo\n#rf_model = pickle.load( open('../models/rf_model.pkl', 'rb'))\nrf_model = pickle.load( fs.open('s3://insiders-datasett/rf_model.pkl', 'rb'))\n\n\n#Leaf\ndf_leaf = pd.DataFrame( rf_model.apply( X ) )\n\n# DataFrame Leaf\n\n",
"_____no_output_____"
],
[
"# Reduzer dimensionality\n# reducer = umap.UMAP( random_state=42 )\n# embedding = reducer.fit_transform( df_leaf )\n\n\n#reducer = pickle.load( open( '../features/umap_reducer.pkl', 'rb'))\nreducer = pickle.load( fs.open( 's3://insiders-datasett/umap_reducer.pkl', 'rb'))\n\nembedding = reducer.transform( df_leaf)\n\n# embedding\ndf_tree = pd.DataFrame()\ndf_tree['embedding_x'] = embedding[:, 0]\ndf_tree['embedding_y'] = embedding[:, 1]\n",
"_____no_output_____"
]
],
[
[
"# 5.0 Data Preparation",
"_____no_output_____"
]
],
[
[
"df5 = df_tree.copy( )",
"_____no_output_____"
],
[
"#df5.to_csv(path_s3+'src/data/tree_based_embedding.csv')",
"_____no_output_____"
]
],
[
[
"# 7.0. Hyperparameter Fine-tuning",
"_____no_output_____"
]
],
[
[
"X = df_tree.copy()",
"_____no_output_____"
],
[
"X.head()",
"_____no_output_____"
]
],
[
[
"# 8.0. Model Training",
"_____no_output_____"
],
[
"## 8.1. Final Model",
"_____no_output_____"
]
],
[
[
"# Model Definition\nk = 8\ngmm_model = mx.GaussianMixture ( n_components = k, n_init = 300, random_state = 32)\n# Model Training\ngmm_model.fit(X)\n\n# Clustering\nlabels = gmm_model.predict( X )\n",
"_____no_output_____"
]
],
[
[
"## 8.2. Cluster Validation",
"_____no_output_____"
]
],
[
[
"## WSS ( Within-cluster sum of square)\n#print( 'WSS value: {}'.format( kmeans.inertia_ ) )\n\n## SS ( Silhouette Score )\nprint( 'SS value: {}'.format( m.silhouette_score( X, labels, metric='euclidean' ) ) )",
"_____no_output_____"
]
],
[
[
"# 9.0. Cluster Analysis",
"_____no_output_____"
]
],
[
[
"df92 = df4[cols_selected].copy()\ndf92['cluster'] = labels\n\n\n# change dtypes\ndf92['recency_days'] = df92['recency_days'].astype( int )\ndf92['qtde_products'] = df92['qtde_products'].astype( int )\ndf92['qtde_returns'] = df92['qtde_returns'].astype( int )\n\nfrom datetime import datetime\n\n#df92['last_training_timestamp'] = datetime.now().strftime( '%Y-%m-%d %H:%M:%S')",
"_____no_output_____"
],
[
"# Number of customer\ndf_cluster = df92[['customer_id','cluster']].groupby( 'cluster' ).count().reset_index()\ndf_cluster['perc_customer'] = 100*(df_cluster['customer_id']/df_cluster['customer_id'].sum())\n\n# Average gross revenue\ndf_avg_gross_revenue = df92[['gross_revenue', 'cluster']].groupby('cluster').mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_avg_gross_revenue, how = 'inner', on = 'cluster')\n\n# Average recency days\ndf_avg_recency_days = df92[['recency_days', 'cluster']].groupby('cluster').mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_avg_recency_days, how = 'inner', on = 'cluster')\n\n# Quantidade de produtos\ndf_qtde_products = df92[['qtde_products', 'cluster']].groupby('cluster').mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_qtde_products, how = 'inner', on = 'cluster')\n\n# Frequency\ndf_frequency = df92[['frequency', 'cluster']].groupby('cluster').mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_frequency, how = 'inner', on = 'cluster')\n\n\n# returns\ndf_qtde_returns = df92[['qtde_returns', 'cluster']].groupby('cluster').mean().reset_index()\ndf_cluster = pd.merge( df_cluster, df_qtde_returns, how = 'inner', on = 'cluster')\n\n\ndf_cluster.sort_values( 'gross_revenue', ascending = False)",
"_____no_output_____"
]
],
[
[
"02 Cluster Insiders\n\n06 Cluster More Products\n\n01 Cluster Spend Money\n\n03 Cluster Even More Products\n\n00 Cluster Less Days\n\n05 Cluster 1K\n\n07 Cluster Stop Returnres\n\n04 Cluster More Buy",
"_____no_output_____"
],
[
"### Cluster 01: ( Candidato a Insider )\n \n- Número de customers: 468 (16% do customers )\n- Faturamento médio: 8836\n- Recência média: 21 dias\n- Média de Produtos comprados: 424 produtos\n- Frequência de Produtos comprados: 0.09 produtos/dia\n- Receita em média: $8836.13,00 dólares\n \n### Cluster 02: \n \n - Número de customer: 31 (0.7% dos customers)\n - Recência em média: 14 dias\n - Compras em média: 53 compras\n - Receita em média: $ 40.543,00.\n \n ### Cluster 03: \n \n - Número de customer: 4.335 (99% dos customers)\n - Recência em média: 92 dias\n - Compras em média: 05 compras\n - Receita em média: $ 1.372,57.",
"_____no_output_____"
],
[
"# 11.0. Deploy to Production",
"_____no_output_____"
]
],
[
[
"import sqlite3\nfrom sqlalchemy import create_engine",
"_____no_output_____"
],
[
"df92.head()",
"_____no_output_____"
],
[
"host='database-insidersv.cvrkgzmlnj5s.us-east-1.rds.amazonaws.com' \nport='5432' \ndatabase='postgres'\nuser='leandro'\npwd='comunidadeds!' \n\nendpoint='postgresql://leandro:comunidadeds!@database-insidersv.cvrkgzmlnj5s.us-east-1.rds.amazonaws.com/postgres'\n\n\nconn = create_engine( endpoint )",
"_____no_output_____"
],
[
"# # create table\n# query_create_insiders = \"\"\"\n# CREATE TABLE insiders ( \n# customer_id INTEGER,\n# gross_revenue REAL,\n# recency_days INTEGER,\n# qtde_products INTEGER,\n# frequency REAL,\n# qtde_returns INTEGER,\n# cluster INTEGER\n# )\n# \"\"\"\n\n# conn.execute( query_create_insiders )",
"_____no_output_____"
],
[
"# insert data into\ndf92.to_sql( 'insiders', con=conn, if_exists='append', index=False )",
"_____no_output_____"
],
[
"df92.head()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abfa417f8ae0873be194d149b213713a7d099f5
| 14,715 |
ipynb
|
Jupyter Notebook
|
newton-fitness-watch.ipynb
|
Newton-Fitness/main
|
f2cc86822fb71a91280978502118f7fab6300ef3
|
[
"MIT"
] | 1 |
2021-05-25T21:43:09.000Z
|
2021-05-25T21:43:09.000Z
|
newton-fitness-watch.ipynb
|
Newton-Fitness/main
|
f2cc86822fb71a91280978502118f7fab6300ef3
|
[
"MIT"
] | null | null | null |
newton-fitness-watch.ipynb
|
Newton-Fitness/main
|
f2cc86822fb71a91280978502118f7fab6300ef3
|
[
"MIT"
] | null | null | null | 14,715 | 14,715 | 0.725246 |
[
[
[
"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import average_precision_score\nfrom inspect import signature\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom matplotlib import pyplot\nfrom sklearn import metrics\n\nprint(\"Setup Complete\")",
"_____no_output_____"
],
[
"df = pd.read_csv(\"../input/fitness-watch-dataset/dataset_halfSecondWindow.csv\") #dataset_halfSecondWindows\n#df.info\n# first doing label encodingon User\n\n# sorting based on the user label\n\n\n\n# plotting given_user distribution\n\n\n# making split",
"_____no_output_____"
],
[
"df.isna().sum().sum() #5893 * 70 ",
"_____no_output_____"
],
[
"cleanup_target = {\"target\": {\"Car\":1,\"Still\":2,\"Train\":3,\"Bus\":4,\"Walking\":5}}\ndf = df.replace(cleanup_target)\n\ncleanup_nums = {\"user\": {\"andrea\": 1, \"Luca\": 2, \"Damiano\": 3,\"michelangelo\": 4,\n \"Pierpaolo\": 5, \"Vincenzo\": 6,\"IvanHeibi\":7,\"AndreaCarpineti\":8,\n \"Federica\":9,\"Serena\":10,\"Claudio\":11,\"Elena\":12,\n \"Riccardo\":13}}\ndf = df.replace(cleanup_nums)\n#df = df.fillna(0)\ndf = df.fillna(df.median())\ndf1 = df.sort_values(by=['user']) \n",
"_____no_output_____"
],
[
"list_users=df.user.unique()\nax = df['user'].value_counts().plot(kind='bar')\ndf['user'].value_counts()\nax.set_xlabel(\"Users\")\nax.set_ylabel(\"Number of Responses\")\nax.figure.savefig('user_distribution.png')\n",
"_____no_output_____"
],
[
"grouped = df.groupby(df.user)\nuser_dict = {}\nsample_df = df[:0]\nfor i in range(1,10):\n user_dict[i] = grouped.get_group(i)\n user_dict[i] = user_dict[i].sample(n=2225)\n sample_df = sample_df.append(user_dict[i])",
"_____no_output_____"
],
[
"list_users=sample_df.user.unique()\nax = sample_df['user'].value_counts().plot(kind='bar')\n#sample_df['user'].value_counts()\nax.set_xlabel(\"Users\")\nax.set_ylabel(\"Number of Responses\")\nax.figure.savefig('user_distribution_sampled.png')\n",
"_____no_output_____"
],
[
"df1 = sample_df\ndf1\n",
"_____no_output_____"
],
[
"df1 = df1.replace([' ','NULL'],np.nan)\ndf1 = df1.dropna(thresh=df1.shape[0]*0.6,how='all',axis=1)\n",
"_____no_output_____"
],
[
"df1.isna().sum().sum() #5893 * 52 \ndf1",
"_____no_output_____"
],
[
"# commmon\n#df = df.dropna(axis=1, how='all')\ndf2 = df1\ntrain_pct_index1 = int(0.2 * len(df2))\ntrain_pct_index2 = int(0.4 * len(df2))\ntrain_pct_index3 = int(0.6 * len(df2))\ntrain_pct_index4 = int(0.8 * len(df2))\nprint(0,train_pct_index1,train_pct_index2,train_pct_index3,train_pct_index4,len(df2))\n# first fold:\ntrain1, test1 = df2[train_pct_index1:], df2[:train_pct_index1] # 20 to 100 \n# 2 fold:\ntrain2, test2 = df2.head(train_pct_index2).append(df2.tail(train_pct_index2)), df2[train_pct_index1:train_pct_index2] # 40 to 100 + 0 to 20\ntrain3, test3 = df2.head(-train_pct_index3).append(df2.head(train_pct_index2)), df2[train_pct_index2:train_pct_index3] # 60 to 100 + 0 to 40\ntrain4, test4 = df2.head(-train_pct_index4).append(df2.head(train_pct_index3)), df2[train_pct_index3:train_pct_index4] # 80 to 100 + 0 to 60\ntrain5, test5 = df2[:train_pct_index4], df2[train_pct_index4:] # 0 to 80\n",
"_____no_output_____"
],
[
"# first fold: train1, test1\n# train separate\n# train1 = train1.dropna(axis = 1, how='all')\n#train1 = train1.fillna(train1.mean())\n\n#df2 = train1\ntrain1 = train1.drop(['user'], axis=1)\ntrain1 = train1.drop(['id'], axis =1)\n\n# test separate\n#test1 = test1.dropna(axis=1, how='all')\n#df2 = df1\ntest1 =test1.drop(['user'], axis=1)\ntest1 = test1.drop(['id'], axis =1)\ntest1 = test1.dropna(axis=0)\n\ny = train1.target\nx = train1.loc[:, train1.columns != 'target']\ny1 = test1.target\nx1 = test1.loc[:, test1.columns != 'target']\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nmodel=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nprint(\"internal accuracy:\", metrics.accuracy_score(y_test, y_pred))\n\ny_pred=model.predict(x1)\nfold1= metrics.accuracy_score(y1, y_pred)\n\nprint(\"Accuracy:\",fold1)\n\n# second fold: train2, test2\n\n# train separate\ntrain2 = train2.drop(['user'], axis=1)\ntrain2 = train2.drop(['id'], axis =1)\n\n# test separate\ntest2 = test2.drop(['user'], axis=1)\ntest2 = test2.drop(['id'], axis =1)\ntest2 = test2.dropna(axis=0)\n\ny = train2.target\nx = train2.loc[:, train2.columns != 'target']\ny1 = test2.target\nx1 = test2.loc[:, test2.columns != 'target']\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nmodel=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nprint(\"internal accuracy:\", metrics.accuracy_score(y_test, y_pred))\n\ny_pred=model.predict(x1)\nfold2= metrics.accuracy_score(y1, y_pred)\n\nprint(\"Accuracy:\",fold2)\n\n\n# third fold: train3, test3\n\n# train separate\ntrain3 = train3.drop(['user'], axis=1)\ntrain3 = train3.drop(['id'], axis =1)\n\n# test separate\ntest3 = test3.drop(['user'], axis=1)\ntest3 = test3.drop(['id'], axis =1)\ntest3 = test3.dropna(axis=0)\n\ny = train3.target\nx = train3.loc[:, train3.columns != 'target']\ny1 = test3.target\nx1 = test3.loc[:, test3.columns != 'target']\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nmodel=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nprint(\"internal accuracy:\", metrics.accuracy_score(y_test, y_pred))\n\ny_pred=model.predict(x1)\nfold3= metrics.accuracy_score(y1, y_pred)\n\nprint(\"Accuracy:\",fold3)\n\n\n# forth fold: train4, test4\n\n# train separate\ntrain4 = train4.drop(['user'], axis=1)\ntrain4 = train4.drop(['id'], axis =1)\n\n# test separate\ntest4 = test4.drop(['user'], axis=1)\ntest4 = test4.drop(['id'], axis =1)\ntest4 = test4.dropna(axis=0)\n\ny = train4.target\nx = train4.loc[:, train4.columns != 'target']\ny1 = test4.target\nx1 = test4.loc[:, test4.columns != 'target']\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nmodel=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nprint(\"internal accuracy:\", metrics.accuracy_score(y_test, y_pred))\n\ny_pred=model.predict(x1)\nfold4= metrics.accuracy_score(y1, y_pred)\n\nprint(\"Accuracy:\",fold4)\n\n\n# fifth fold: train5, test5\n\n# train separate\ntrain5 = train5.drop(['user'], axis=1)\ntrain5 = train5.drop(['id'], axis =1)\n\n# test separate\ntest5 = test5.drop(['user'], axis=1)\ntest5 = test5.drop(['id'], axis =1)\n#test5 = test5.dropna(axis=0)\n\ny = train5.target\nx = train5.loc[:, train5.columns != 'target']\ny1 = test5.target\nx1 = test5.loc[:, test5.columns != 'target']\n\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nmodel=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nmodel.fit(X_train,y_train)\ny_pred=model.predict(X_test)\nprint(\"internal accuracy:\", metrics.accuracy_score(y_test, y_pred))\n\ny_pred=model.predict(x1)\nfold5= metrics.accuracy_score(y1, y_pred)\n\nprint(\"Accuracy:\",fold5)\n\n",
"_____no_output_____"
],
[
"print(\"average fold:\", (fold1+fold2+fold3+fold4+fold5)/5)",
"_____no_output_____"
],
[
"import pickle\nfilename = 'model.sav'\npickle.dump(model, open(filename, 'wb'))",
"_____no_output_____"
]
],
[
[
"Feature engineering ",
"_____no_output_____"
]
],
[
[
"print(\"F1:\", f1_score(y1, y_pred, average='macro'))",
"_____no_output_____"
],
[
"to give to dilan\ndf3= df1.loc[df1['user'] == 3]\ndf4= df1.loc[df1['user'] == 4]\ndf3 = df3.drop(['user'], axis=1)\ndf3 = df3.drop(['id'], axis =1)\ndf4 = df4.drop(['user'], axis=1)\ndf4 = df4.drop(['id'], axis =1)\ndf3.to_csv(\"userdata_3.csv\")\ndf4.to_csv(\"userdata_4.csv\")",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4abfacddb53618fe19ab7c7b08c26cd6e8b8bbd8
| 13,629 |
ipynb
|
Jupyter Notebook
|
Cap00_PresentacionCurso_202101.ipynb
|
AlbertoD10-10/Analisis_Numerico
|
abf0394180564393bad071a3b1d8725e183afc81
|
[
"MIT"
] | null | null | null |
Cap00_PresentacionCurso_202101.ipynb
|
AlbertoD10-10/Analisis_Numerico
|
abf0394180564393bad071a3b1d8725e183afc81
|
[
"MIT"
] | null | null | null |
Cap00_PresentacionCurso_202101.ipynb
|
AlbertoD10-10/Analisis_Numerico
|
abf0394180564393bad071a3b1d8725e183afc81
|
[
"MIT"
] | null | null | null | 32.372922 | 1,150 | 0.581334 |
[
[
[
"<p float=\"center\">\n <img src=\"https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C00_Img00_logo.png?raw=true\" width=\"350\" />\n</p>\n<h1 align=\"center\">ST0256 - Análisis Numérico</h1>\n<h1 align=\"center\">Presentación del Curso</h1>\n<h1 align=\"center\">2021/01</h1>\n<h1 align=\"center\">MEDELLÍN - COLOMBIA </h1>",
"_____no_output_____"
],
[
"<table>\n <tr align=left><td><img align=left src=\"./images/CC-BY.png\">\n <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license.(c) Carlos Alberto Alvarez Henao</td>\n</table>",
"_____no_output_____"
],
[
"*** \n\n***Docente:*** Carlos Alberto Álvarez Henao, I.C. D.Sc.\n\n***e-mail:*** [email protected]\n\n***skype:*** carlos.alberto.alvarez.henao\n\n***Herramienta:*** [Jupyter](http://jupyter.org/)\n\n***Kernel:*** Python 3.8\n\n\n***",
"_____no_output_____"
],
[
"<a id='TOC'></a>",
"_____no_output_____"
],
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Motivación\" data-toc-modified-id=\"Motivación-1\"><span class=\"toc-item-num\">1 </span>Motivación</a></span></li><li><span><a href=\"#Aspectos-generales-del-curso\" data-toc-modified-id=\"Aspectos-generales-del-curso-2\"><span class=\"toc-item-num\">2 </span>Aspectos generales del curso</a></span><ul class=\"toc-item\"><li><span><a href=\"#Programa-clase-a-clase\" data-toc-modified-id=\"Programa-clase-a-clase-2.1\"><span class=\"toc-item-num\">2.1 </span>Programa clase-a-clase</a></span></li><li><span><a href=\"#Evaluación\" data-toc-modified-id=\"Evaluación-2.2\"><span class=\"toc-item-num\">2.2 </span>Evaluación</a></span></li><li><span><a href=\"#Bibliográfia\" data-toc-modified-id=\"Bibliográfia-2.3\"><span class=\"toc-item-num\">2.3 </span>Bibliográfia</a></span></li><li><span><a href=\"#Asesorías-y-Monitorias-académicas\" data-toc-modified-id=\"Asesorías-y-Monitorias-académicas-2.4\"><span class=\"toc-item-num\">2.4 </span>Asesorías y Monitorias académicas</a></span></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"<p float=\"center\">\n <img src=\"https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C00_Img01_Intro.PNG?raw=true\" width=\"500\" />\n</p>\n",
"_____no_output_____"
],
[
"## Motivación",
"_____no_output_____"
],
[
"Deseamos desarrollar las siguientes operaciones aritméticas: \n\n- $2+2$\n\n- $4 \\times 4$\n\n- $\\left(\\sqrt{3} \\right )^2$\n\ndesde un punto de vista analítico, las soluciones exactas (a mano y en papel?) son\n\n- $2+2 = 4$\n\n- $4 \\times 4 = 16$\n\n- $\\left(\\sqrt{3} \\right )^2 = 3$\n\npero veamos qué sucede cuando realizamos las mismas operaciones empleando un dispositivo electrónico (calculadora, computador, etc)",
"_____no_output_____"
]
],
[
[
"a = 2 + 2\nb = 4 * 4\nc = (3**(1/2))**2",
"_____no_output_____"
]
],
[
[
"preguntemos al computador si los resultados obtenidos en los cálculos son los esperados",
"_____no_output_____"
]
],
[
[
"a == 4",
"_____no_output_____"
],
[
"b == 16",
"_____no_output_____"
],
[
"c == 3",
"_____no_output_____"
]
],
[
[
"`False`? Qué sucedió? por qué el resultado de comparar el valor que entendemos como verdadero y el obtenido empleando un dispositivo electrónico (calculadora) es falso? Veamos entonces cuál es el resultado que arrojó el cálculo:",
"_____no_output_____"
]
],
[
[
"print(c)",
"2.9999999999999996\n"
]
],
[
[
"Efectivamente, se oberva que el valor calculado no es el esperado. Puede ser que, para muchas de las situaciones cotidianas, este valor no sea percibido como una diferencia apreciable (\"error\") y simplemente se asuma que ambos valores son iguales (\"redondeo\"). Pero, y si esta operación la tuviera qué repetir muchas veces? qué sucede con ese pequeño errror? Será que se puede simplemente despreciar? qué sucede para cálculos más complejos? se podría determinar la cantidad de error en los cálculos numéricos realizados a través de un computador? este error aumenta sin control? hasta cuándo se podrá decir que dos cantidades son \"iguales\"? El errror es debido a qué? una mala implementación de la operación aritmética? el lenguaje empleado para realizar el cálculo? La máquina? la formulación matemática? humano?\n\nEstas, y muchas otras, preguntas son las que se pretenden resolver en el curso de Análisis Numérico.",
"_____no_output_____"
],
[
"[Volver a la Tabla de Contenido](#TOC)",
"_____no_output_____"
],
[
"## Aspectos generales del curso",
"_____no_output_____"
],
[
"### Programa clase-a-clase",
"_____no_output_____"
],
[
"|**Clase** |**Fecha**|**Capítulo**|**Contenido** |**Actividad Evaluativa**|\n|--------:|:-------:|:-------|--------------|------------------------|\n|1 |26/01/2021||Descripción del curso y de los contenidos a ser tratados| |\n|2 |28/01/2021|Teoría de Errores|Fuentes de error, Notación Big-O, Error de punto flotante| |\n|3 |02/02/2021||Error de truncamiento||\n|4 |04/02/2021||Error de punto flotante (cont), Combinación de error, Operaciones de conteo||\n|5 |09/02/2021||Por qué debería importarnos esto?||\n|6 |11/02/2021||Número de condición de una función. Forma anidad de Hörner y aritmética de $n$ decimales. El error y su relación con la estabilidad de los algoritmos||\n|7 |16/02/2021|Raíces de ecuaciones|Método gráfico. Búsquedas incrementales||\n|8 |18/02/2021||Método de Bisección, Teorema sobre convergencia en bisección||\n|9 |23/02/2021||Punto fijo: teorema, corolarios y condiciones para hallar $G(x)$||\n|10 |25/02/2021||Punto fijo: Método de Newton-Raphson (teoremas y conclusión)||\n|11|02/03/2021||Punto fijo: Método de la Secante (teoremas y conclusión)||\n|12|04/03/2021||Método de Raíces múltiples (teoremas y conclusión)||\n|13|09/03/2021|Sistema de Ecuaciones Lineales|Introducción a la solución numérica de Sistemas De ecuaciones. criterios de Existencia y Unicidad |||\n|14|11/03/2021||Eliminación Gaussiana. Número de operaciones en el proceso de Eliminación Gaussiana|Primer parcial (15%)|\n|15|16/03/2021||Problemas en la solución. Estrategias de pivoteo||\n|16|18/03/2021||Factorización $LU$: Doolittle, Crout y Cholesky|Asamblea estudiantíl|\n|17|23/03/2021||Aplicaciones de la Factorización $LU$||\n|18|25/03/2021||Introducción a métodos iterativos. Normas Vectoriales y Matriciales. Número de Condición de una Matriz||\n||29/03-03/04/2021|||Semana Santa||\n|19|06/04/2021||Métodos de Jacobi. Gauss-Seidel y SOR. Formas Matriciales||\n|20|08/04/2021||Teoremas de Convergencias para métodos iterativos. Aspectos generales del refinamiento iterativo||\n|21|13/04/2021|Interpolación Numérica|Introducción. Método de diferencias dividas de Newton|Segundo Parcial (15%)|\n|22|15/04/2021||Método de Interpolación de Lagrange||\n|23|20/04/2021||Trazadores lineales, cuadráticos y cúbicos||\n|24|22/04/2021|Diferenciación e Integración Numérica|Métodos diferenciación numérica. Integración numérica Trapecio||\n|25|27/04/2021||Integración numérica Simpson 1/3 simple y compuesto||\n|26|29/04/2021||Integración numérica Simpson 3/8 simple y compuesto||\n|27|04/05/2021|Ecuaciones Diferenciales Ordinarias|Introducción, Método de Euler||\n|28|06/05/2021||Método de RK-2 y RK-4|Tercer Parcial (15%)|\n|29|11/05/2021||||\n|30|13/05/2021|||16/05/2021 - 70%|\n|31|18/05/2021||||\n|32|20/05/2021||||\n||24-29/05/2021|||Semana de Colchón|\n||31-04/06/2021||Semana 1 finales ||\n||08-11/06/2021||Semana 2 finales ||\n",
"_____no_output_____"
],
[
"[Volver a la Tabla de Contenido](#TOC)",
"_____no_output_____"
],
[
"### Evaluación",
"_____no_output_____"
],
[
"|Tema |Porcentaje|Fecha |\n|:-----------------------------|:--------:|:--------:|\n|Error y Raíces de ecuaciones |15% |11/03/2021|\n|Sistema de Ecuaciones Lineales|15% |13/04/2021|\n|Interpolación, Diferenciación e Integración Numérica y EDO|15%|06/05/2021|\n|Seguimiento | 25%|13/05/2021|\n|Práctica Final | 30%|08/062021|\n\n- ***Parciales:*** Los parciales se realizarán 8 días después de finalizado el tema correspondiente\n\n\n- ***Seguimiento:*** El seguimiento constará de una serie de actividades extraclase a ser entregados en el mismo día o en días siguientes, dependiendo de la actividad.\n\n\n- ***Práctica:*** La práctica consistirá en el desarrollo de un problema de Simulación numérica. Los temas serán indicados en las primeras semanas del semestre y se podrá realizar en equipos de a tres (3) estudiantes.",
"_____no_output_____"
],
[
"[Volver a la Tabla de Contenido](#TOC)",
"_____no_output_____"
],
[
"### Bibliográfia",
"_____no_output_____"
],
[
"- Burden, Richard L. & Faires, Duglas. Análisis Numérico. Editorial Thomson. 9° Edición 2011.\n\n\n- Chapra, Steven & Canale, Raymond. Métodos Numéricos para ingenieros, McgrawHill, 1987.\n\n\n- Heath, Michael T. Scientific Computing: An Introductory Survey, SIAM, 2006.\n\n\n- Python. Recuperado de https://www.python.org/\n\n\n- Jupyter Notebook. Recuperado de https://jupyter.org/\n\n\n- Anaconda. Recuperado de https://www.anaconda.com/\n\n\n- Google Colab. Recuperado de https://colab.research.google.com/",
"_____no_output_____"
],
[
"[Volver a la Tabla de Contenido](#TOC)",
"_____no_output_____"
],
[
"### Asesorías y Monitorias académicas",
"_____no_output_____"
],
[
"Próximamente...",
"_____no_output_____"
],
[
"[Volver a la Tabla de Contenido](#TOC)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4abfb2e81fcbd169d1b1902c259dffbd86d95efd
| 4,003 |
ipynb
|
Jupyter Notebook
|
SupportVectorMachines/6.SVM-Regression.ipynb
|
TobiahShaw/machine-learning-algorithm
|
0ce0e0d463159d8d30b11a15ba98c5a64b2c5b89
|
[
"Apache-2.0"
] | null | null | null |
SupportVectorMachines/6.SVM-Regression.ipynb
|
TobiahShaw/machine-learning-algorithm
|
0ce0e0d463159d8d30b11a15ba98c5a64b2c5b89
|
[
"Apache-2.0"
] | null | null | null |
SupportVectorMachines/6.SVM-Regression.ipynb
|
TobiahShaw/machine-learning-algorithm
|
0ce0e0d463159d8d30b11a15ba98c5a64b2c5b89
|
[
"Apache-2.0"
] | null | null | null | 21.179894 | 81 | 0.52061 |
[
[
[
"# SVM思路结局回归问题\n\n- 在margin两条线内尽可能包含最多的点,所决定的中间的那个直线,就是拟合的模型\n- 思想和分类问题相反,在分类问题中margin要求点尽可能少,甚至Hard Margin SVM里,margin之间没有点",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets",
"_____no_output_____"
],
[
"boston = datasets.load_boston()\nX = boston.data\ny = boston.target",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)",
"_____no_output_____"
],
[
"from sklearn.svm import LinearSVR\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\ndef StandardLinearSVR(epsilon=0.1):\n return Pipeline([\n (\"std_scaler\", StandardScaler()),\n (\"linearSVR\", LinearSVR(epsilon=epsilon))\n ])",
"_____no_output_____"
],
[
"svr = StandardLinearSVR()\nsvr.fit(X_train, y_train)\nsvr.score(X_test, y_test)",
"_____no_output_____"
],
[
"def RBFKernelSVR(epsilon=0.1, gamma=1.0):\n return Pipeline([\n (\"std_scaler\", StandardScaler()),\n (\"SVR\", SVR(epsilon=epsilon, kernel='rbf', gamma=gamma))\n ])",
"_____no_output_____"
],
[
"rbf_svr = RBFKernelSVR(0.1, 0.045)\nrbf_svr.fit(X_train, y_train)\nrbf_svr.score(X_test, y_test)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import PolynomialFeatures\n\ndef PolySVR(degree=3, epsilon=0.1):\n return Pipeline([\n (\"poly\", PolynomialFeatures(degree=degree)),\n (\"std_scaler\", StandardScaler()),\n (\"linearSVR\", LinearSVR(epsilon=epsilon))\n ])",
"_____no_output_____"
],
[
"poly_svr = PolySVR()\npoly_svr.fit(X_train, y_train)\npoly_svr.score(X_test, y_test)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abfb9f77dd0df4f575cbea10b6c41a413a6f223
| 93,601 |
ipynb
|
Jupyter Notebook
|
courses/dl1/lesson7-cifar10.ipynb
|
AbhimanyuAryan/fastai
|
fcd732f7a838e7a3ea0099f19b11748974b77789
|
[
"Apache-2.0"
] | 2 |
2019-03-06T23:19:16.000Z
|
2020-08-12T23:44:31.000Z
|
courses/dl1/lesson7-cifar10.ipynb
|
AbhimanyuAryan/fastai
|
fcd732f7a838e7a3ea0099f19b11748974b77789
|
[
"Apache-2.0"
] | 3 |
2021-05-20T19:59:09.000Z
|
2022-02-26T09:11:29.000Z
|
courses/dl1/lesson7-cifar10.ipynb
|
AbhimanyuAryan/fastai
|
fcd732f7a838e7a3ea0099f19b11748974b77789
|
[
"Apache-2.0"
] | 2 |
2018-09-19T09:35:09.000Z
|
2018-10-03T09:08:12.000Z
| 60.819363 | 15,730 | 0.696702 |
[
[
[
"## CIFAR 10",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%reload_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"You can get the data via:\n\n wget http://pjreddie.com/media/files/cifar.tgz \n**Important:** Before proceeding, the student must reorganize the downloaded dataset files to match the expected directory structure, so that there is a dedicated folder for each class under 'test' and 'train', e.g.:\n\n```\n* test/airplane/airplane-1001.png\n* test/bird/bird-1043.png\n\n* train/bird/bird-10018.png\n* train/automobile/automobile-10000.png\n```\n\nThe filename of the image doesn't have to include its class.",
"_____no_output_____"
]
],
[
[
"from fastai.conv_learner import *\nPATH = \"data/cifar10/\"\nos.makedirs(PATH,exist_ok=True)\n\n!ls {PATH}\n\nif not os.path.exists(f\"{PATH}/train/bird\"):\n raise Exception(\"expecting class subdirs under 'train/' and 'test/'\")\n!ls {PATH}/train",
"labels.txt test train\nairplane automobile bird cat deer dog frog horse ship truck\n"
],
[
"classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\nstats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))",
"_____no_output_____"
],
[
"def get_data(sz,bs):\n tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8)\n return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)",
"_____no_output_____"
],
[
"bs=256",
"_____no_output_____"
]
],
[
[
"### Look at data",
"_____no_output_____"
]
],
[
[
"data = get_data(32,4)",
"_____no_output_____"
],
[
"x,y=next(iter(data.trn_dl))",
"_____no_output_____"
],
[
"plt.imshow(data.trn_ds.denorm(x)[0]);",
"_____no_output_____"
],
[
"plt.imshow(data.trn_ds.denorm(x)[1]);",
"_____no_output_____"
]
],
[
[
"## Fully connected model",
"_____no_output_____"
]
],
[
[
"data = get_data(32,bs)",
"_____no_output_____"
],
[
"lr=1e-2",
"_____no_output_____"
]
],
[
[
"From [this notebook](https://github.com/KeremTurgutlu/deeplearning/blob/master/Exploring%20Optimizers.ipynb) by our student Kerem Turgutlu:",
"_____no_output_____"
]
],
[
[
"class SimpleNet(nn.Module):\n def __init__(self, layers):\n super().__init__()\n self.layers = nn.ModuleList([\n nn.Linear(layers[i], layers[i + 1]) for i in range(len(layers) - 1)])\n \n def forward(self, x):\n x = x.view(x.size(0), -1)\n for l in self.layers:\n l_x = l(x)\n x = F.relu(l_x)\n return F.log_softmax(l_x, dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(SimpleNet([32*32*3, 40,10]), data)",
"_____no_output_____"
],
[
"learn, [o.numel() for o in learn.model.parameters()]",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
],
[
"learn.lr_find()",
"_____no_output_____"
],
[
"learn.sched.plot()",
"\n\n \u001b[A\u001b[A"
],
[
"%time learn.fit(lr, 2)",
"_____no_output_____"
],
[
"%time learn.fit(lr, 2, cycle_len=1)",
"_____no_output_____"
]
],
[
[
"## CNN",
"_____no_output_____"
]
],
[
[
"class ConvNet(nn.Module):\n def __init__(self, layers, c):\n super().__init__()\n self.layers = nn.ModuleList([\n nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, stride=2)\n for i in range(len(layers) - 1)])\n self.pool = nn.AdaptiveMaxPool2d(1)\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, x):\n for l in self.layers: x = F.relu(l(x))\n x = self.pool(x)\n x = x.view(x.size(0), -1)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(ConvNet([3, 20, 40, 80], 10), data)",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
],
[
"learn.lr_find(end_lr=100)",
"_____no_output_____"
],
[
"learn.sched.plot()",
"_____no_output_____"
],
[
"%time learn.fit(1e-1, 2)",
"_____no_output_____"
],
[
"%time learn.fit(1e-1, 4, cycle_len=1)",
"_____no_output_____"
]
],
[
[
"## Refactored",
"_____no_output_____"
]
],
[
[
"class ConvLayer(nn.Module):\n def __init__(self, ni, nf):\n super().__init__()\n self.conv = nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)\n \n def forward(self, x): return F.relu(self.conv(x))",
"_____no_output_____"
],
[
"class ConvNet2(nn.Module):\n def __init__(self, layers, c):\n super().__init__()\n self.layers = nn.ModuleList([ConvLayer(layers[i], layers[i + 1])\n for i in range(len(layers) - 1)])\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, x):\n for l in self.layers: x = l(x)\n x = F.adaptive_max_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(ConvNet2([3, 20, 40, 80], 10), data)",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
],
[
"%time learn.fit(1e-1, 2)",
"_____no_output_____"
],
[
"%time learn.fit(1e-1, 2, cycle_len=1)",
"_____no_output_____"
]
],
[
[
"## BatchNorm",
"_____no_output_____"
]
],
[
[
"class BnLayer(nn.Module):\n def __init__(self, ni, nf, stride=2, kernel_size=3):\n super().__init__()\n self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride,\n bias=False, padding=1)\n self.a = nn.Parameter(torch.zeros(nf,1,1))\n self.m = nn.Parameter(torch.ones(nf,1,1))\n \n def forward(self, x):\n x = F.relu(self.conv(x))\n x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1)\n if self.training:\n self.means = x_chan.mean(1)[:,None,None]\n self.stds = x_chan.std (1)[:,None,None]\n return (x-self.means) / self.stds *self.m + self.a",
"_____no_output_____"
],
[
"class ConvBnNet(nn.Module):\n def __init__(self, layers, c):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)\n self.layers = nn.ModuleList([BnLayer(layers[i], layers[i + 1])\n for i in range(len(layers) - 1)])\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, x):\n x = self.conv1(x)\n for l in self.layers: x = l(x)\n x = F.adaptive_max_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(ConvBnNet([10, 20, 40, 80, 160], 10), data)",
"_____no_output_____"
],
[
"learn.summary()",
"_____no_output_____"
],
[
"%time learn.fit(3e-2, 2)",
"_____no_output_____"
],
[
"%time learn.fit(1e-1, 4, cycle_len=1)",
"_____no_output_____"
]
],
[
[
"## Deep BatchNorm",
"_____no_output_____"
]
],
[
[
"class ConvBnNet2(nn.Module):\n def __init__(self, layers, c):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)\n self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])\n for i in range(len(layers) - 1)])\n self.layers2 = nn.ModuleList([BnLayer(layers[i+1], layers[i + 1], 1)\n for i in range(len(layers) - 1)])\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, x):\n x = self.conv1(x)\n for l,l2 in zip(self.layers, self.layers2):\n x = l(x)\n x = l2(x)\n x = F.adaptive_max_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(ConvBnNet2([10, 20, 40, 80, 160], 10), data)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 2)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 2, cycle_len=1)",
"_____no_output_____"
]
],
[
[
"## Resnet",
"_____no_output_____"
]
],
[
[
"class ResnetLayer(BnLayer):\n def forward(self, x): return x + super().forward(x)",
"_____no_output_____"
],
[
"class Resnet(nn.Module):\n def __init__(self, layers, c):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=5, stride=1, padding=2)\n self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])\n for i in range(len(layers) - 1)])\n self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)\n for i in range(len(layers) - 1)])\n self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)\n for i in range(len(layers) - 1)])\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, x):\n x = self.conv1(x)\n for l,l2,l3 in zip(self.layers, self.layers2, self.layers3):\n x = l3(l2(l(x)))\n x = F.adaptive_max_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 10), data)",
"_____no_output_____"
],
[
"wd=1e-5",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 2, wds=wd)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)",
"_____no_output_____"
]
],
[
[
"## Resnet 2",
"_____no_output_____"
]
],
[
[
"class Resnet2(nn.Module):\n def __init__(self, layers, c, p=0.5):\n super().__init__()\n self.conv1 = BnLayer(3, 16, stride=1, kernel_size=7)\n self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1])\n for i in range(len(layers) - 1)])\n self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)\n for i in range(len(layers) - 1)])\n self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1)\n for i in range(len(layers) - 1)])\n self.out = nn.Linear(layers[-1], c)\n self.drop = nn.Dropout(p)\n \n def forward(self, x):\n x = self.conv1(x)\n for l,l2,l3 in zip(self.layers, self.layers2, self.layers3):\n x = l3(l2(l(x)))\n x = F.adaptive_max_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n x = self.drop(x)\n return F.log_softmax(self.out(x), dim=-1)",
"_____no_output_____"
],
[
"learn = ConvLearner.from_model_data(Resnet2([16, 32, 64, 128, 256], 10, 0.2), data)",
"_____no_output_____"
],
[
"wd=1e-6",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 2, wds=wd)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2, wds=wd)",
"_____no_output_____"
],
[
"%time learn.fit(1e-2, 8, cycle_len=4, wds=wd)",
"_____no_output_____"
],
[
"learn.save('tmp3')",
"_____no_output_____"
],
[
"log_preds,y = learn.TTA()\npreds = np.mean(np.exp(log_preds),0)",
" \r"
],
[
"metrics.log_loss(y,preds), accuracy_np(preds,y)",
"_____no_output_____"
]
],
[
[
"### End",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4abfbf2fdbc805dabfb2d591ec780227ea582f19
| 7,313 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/PythonIntroCh1-checkpoint.ipynb
|
totolhua/Python3_Jupyter_Notebook
|
95e99de9a9e359816058594b2d1fd871d812361f
|
[
"MIT"
] | 24 |
2018-07-24T04:55:15.000Z
|
2022-01-09T03:17:20.000Z
|
.ipynb_checkpoints/PythonIntroCh1-checkpoint.ipynb
|
totolhua/Python3_Jupyter_Notebook
|
95e99de9a9e359816058594b2d1fd871d812361f
|
[
"MIT"
] | 2 |
2022-01-03T10:32:12.000Z
|
2022-01-04T10:23:15.000Z
|
.ipynb_checkpoints/PythonIntroCh1-checkpoint.ipynb
|
totolhua/Python3_Jupyter_Notebook
|
95e99de9a9e359816058594b2d1fd871d812361f
|
[
"MIT"
] | 40 |
2018-07-24T12:02:33.000Z
|
2022-03-21T10:21:51.000Z
| 22.501538 | 366 | 0.542185 |
[
[
[
"# 1. Very simple 'programs'\n## 1.1 Running Python from the command line\nIn order to test pieces of code we can run Python from the command line. In this Jupyter Notebook we are going to simulate this. You can type the commands in the fields and execute them.<br>\nIn the field type:<br>\n`print('Hello, World')`<br>\nThen press `<shift> + <return>` to execute the command.\n\n",
"_____no_output_____"
],
[
"What happened?<br>You just created a program, that prints the words 'Hello, World'. The Python environment that you are in immediately compiles whatever you have typed in. This is useful for testing things, e.g. define a few variables, and then test to see if a certain line will work. That will come in a later lesson, though.",
"_____no_output_____"
],
[
"## 1.2 Math in Python\nType<br>\n`1 + 1`",
"_____no_output_____"
],
[
"Type<br>\n`20 + 80`",
"_____no_output_____"
],
[
"These are additions. We can of course use other mathematical operators.<br>\nTry this subtraction:<br>\n`6 - 5`",
"_____no_output_____"
],
[
"and this multiplication:<br>\n`2 * 5`",
"_____no_output_____"
],
[
"Try:<br>\n`5 ** 2`",
"_____no_output_____"
],
[
"`**` is the exponential operator, so we executed 5 squared.",
"_____no_output_____"
],
[
"Type:<br>\n`print('1 + 2 is an addition')`",
"_____no_output_____"
],
[
"You see that the `print` statement writes something on the screen.<br>\nTry this:<br>\n`print('one kilobyte is 2^10 bytes, or', 2 ** 10, 'bytes')`",
"_____no_output_____"
],
[
"This demonstrates that you can print text and calculations in a sentence.<br>\nThe commas separating each section are a way of separating strings (text) from calculations or variable.",
"_____no_output_____"
],
[
"Now try this:<br>\n`23 / 3`",
"_____no_output_____"
],
[
"And this:<br>\n`23%3`",
"_____no_output_____"
],
[
"`%` returns the remainder of the division.",
"_____no_output_____"
],
[
"## 1.3 Order of Operations",
"_____no_output_____"
],
[
"Remember that thing called order of operation that they taught in maths? Well, it applies in Python, too. Here it is, if you need reminding:<br>\n1. Parenthesis `()`\n2. Exponents `**`\n3. Multiplication `*`, division `/` and remainder `%`\n4. Addition `+` and subtraction `-`",
"_____no_output_____"
],
[
"Here are some examples that you might want to try, if you're rusty on this:<br>\n`1 + 2 * 3`<br>\n`(1 + 2) * 3`",
"_____no_output_____"
],
[
"## 1.4 Comments, Please\nThe final thing you'll need to know to move on to multi-line programs is the comment. Type the following (and yes, the output is shown):<br>\n`#I am a comment. Fear my wrath!`",
"_____no_output_____"
],
[
"A comment is a piece of code that is not run. In Python, you make something a comment by putting a hash in front of it. A hash comments everything after it in the line, and nothing before it. So you could type this:<br>\n`print(\"food is very nice\") #eat me`",
"_____no_output_____"
],
[
"This results in a normal output, without the smutty comment, thank you very much.<br>\nNow try this:<br>\n`# print(\"food is very nice\")`",
"_____no_output_____"
],
[
"Nothing happens, because the code was after a comment.",
"_____no_output_____"
],
[
"Comments are important for adding necessary information for another programmer to read, but not the computer. For example, an explanation of a section of code, saying what it does, or what is wrong with it. You can also comment bits of code by putting a `#` in front of it - if you don't want it to compile, but can't delete it because you might need it later.",
"_____no_output_____"
],
[
"__[Home](PythonIntro.ipynb)__<br>\n__[Lesson 2: Programs in a file, and variables](PythonIntroCh2.ipynb)__",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4abfc1669af6da2ad39a5188b258dd66de9333ad
| 58,783 |
ipynb
|
Jupyter Notebook
|
Python/CMS_DASH_DataLoad.ipynb
|
InitiateSolutions/CMS-DASH
|
913b6acdfa4f54ab6cb52f5221e13bc61145e5ef
|
[
"CC0-1.0"
] | 1 |
2021-05-29T18:40:30.000Z
|
2021-05-29T18:40:30.000Z
|
Python/CMS_DASH_DataLoad.ipynb
|
InitiateSolutions/CMS-DASH
|
913b6acdfa4f54ab6cb52f5221e13bc61145e5ef
|
[
"CC0-1.0"
] | null | null | null |
Python/CMS_DASH_DataLoad.ipynb
|
InitiateSolutions/CMS-DASH
|
913b6acdfa4f54ab6cb52f5221e13bc61145e5ef
|
[
"CC0-1.0"
] | null | null | null | 37.32254 | 612 | 0.62416 |
[
[
[
"## Install packages and connect to Oracle",
"_____no_output_____"
]
],
[
[
"sc.install_pypi_package(\"sqlalchemy\")",
"_____no_output_____"
],
[
"sc.install_pypi_package(\"pandas\")",
"_____no_output_____"
],
[
"sc.install_pypi_package(\"s3fs\")",
"_____no_output_____"
],
[
"sc.install_pypi_package(\"cx_Oracle\")",
"_____no_output_____"
],
[
"sc.install_pypi_package(\"fsspec\")\n",
"_____no_output_____"
],
[
"from sqlalchemy import create_engine\n",
"_____no_output_____"
],
[
"engine = create_engine('oracle://CMSDASHADMIN:4#X9#Veut#KSsU#[email protected]:1521/', echo=False)",
"_____no_output_____"
],
[
"# Import necessary libraries\nimport cx_Oracle\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"dsn_tns = cx_Oracle.makedsn('oracle-prod-cms-dash.ccwgq0kcp9fq.us-east-1.rds.amazonaws.com', '1521', service_name='ORCL')",
"_____no_output_____"
],
[
"conn = cx_Oracle.connect(user=r'VILASM', password='Z#5iC$Ld4sE', dsn=dsn_tns)",
"_____no_output_____"
],
[
"con = cx_Oracle.connect('VILASM/Z#5iC$Ld4sE@oracle-prod-cms-dash.ccwgq0kcp9fq.us-east-1.rds.amazonaws.com/ORCL')",
"_____no_output_____"
],
[
"print (con.version)\ncur =con.cursor()",
"_____no_output_____"
]
],
[
[
"# Insert into DASH_BENEFICIARY table",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"# Create datatype dictionary for reading in the files\ndtype_dic= {'BENE_BIRTH_DT':str, 'BENE_DEATH_DT':str}",
"_____no_output_____"
],
[
"# Read in all three files from 2008, 2009, and 2010\nbene08 = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 20/DE1_0_2008_Beneficiary_Summary_File_Sample_20.csv\", dtype = dtype_dic)\nbene09 = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 20/DE1_0_2009_Beneficiary_Summary_File_Sample_20.csv\", dtype = dtype_dic)\nbene10 = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 20/DE1_0_2010_Beneficiary_Summary_File_Sample_20.csv\", dtype = dtype_dic)",
"_____no_output_____"
],
[
"# Add the FILE_YEAR column and insert it to index 0\nbene08['FILE_YEAR']='2008'\nfirst_col = bene08.pop('FILE_YEAR')\nbene08.insert(0,'FILE_YEAR',first_col)\n\nbene09['FILE_YEAR']='2009'\nfirst_col = bene09.pop('FILE_YEAR')\nbene09.insert(0,'FILE_YEAR',first_col)\n\nbene10['FILE_YEAR']='2010'\nfirst_col = bene10.pop('FILE_YEAR')\nbene10.insert(0,'FILE_YEAR',first_col)",
"_____no_output_____"
],
[
"# Add leading zeros to SP_STATE_CODE and BENE_COUNTY_CD\nbene08['SP_STATE_CODE'] = bene08['SP_STATE_CODE'].astype(str).apply(lambda x: x.zfill(2))\nbene08['BENE_COUNTY_CD'] = bene08['BENE_COUNTY_CD'].astype(str).apply(lambda x: x.zfill(3))\n\nbene09['SP_STATE_CODE'] = bene09['SP_STATE_CODE'].astype(str).apply(lambda x: x.zfill(2))\nbene09['BENE_COUNTY_CD'] = bene09['BENE_COUNTY_CD'].astype(str).apply(lambda x: x.zfill(3))\n\nbene10['SP_STATE_CODE'] = bene10['SP_STATE_CODE'].astype(str).apply(lambda x: x.zfill(2))\nbene10['BENE_COUNTY_CD'] = bene10['BENE_COUNTY_CD'].astype(str).apply(lambda x: x.zfill(3))",
"_____no_output_____"
],
[
"# Converty BENE_BIRTH_DT and BENE_DEATH_DT to datetimes\nA = pd.to_datetime(bene08.BENE_BIRTH_DT)\nbene08['BENE_BIRTH_DT'] = A.dt.date\nB = pd.to_datetime(bene08.BENE_DEATH_DT)\nbene08['BENE_DEATH_DT'] = B.dt.date\n\nA = pd.to_datetime(bene09.BENE_BIRTH_DT)\nbene09['BENE_BIRTH_DT'] = A.dt.date\nB = pd.to_datetime(bene09.BENE_DEATH_DT)\nbene09['BENE_DEATH_DT'] = B.dt.date\n\nA = pd.to_datetime(bene10.BENE_BIRTH_DT)\nbene10['BENE_BIRTH_DT'] = A.dt.date\nB = pd.to_datetime(bene10.BENE_DEATH_DT)\nbene10['BENE_DEATH_DT'] = B.dt.date",
"_____no_output_____"
],
[
"# Insert into table DASH_BENEFICIARY for 2008\n\nsql= \"\"\" INSERT INTO DASH_BENEFICIARY (FILE_YEAR, DESYNPUF_ID,BENE_BIRTH_DT, BENE_DEATH_DT, BENE_SEX_IDENT_CD,BENE_RACE_CD, \nBENE_ESRD_IND, SP_STATE_CODE, BENE_COUNTY_CD, BENE_HI_CVRAGE_TOT_MONS,BENE_SMI_CVRAGE_TOT_MONS, BENE_HMO_CVRAGE_TOT_MONS, \nPLAN_CVRG_MOS_NUM, SP_ALZHDMTA, SP_CHF,SP_CHRNKIDN, SP_CNCR, SP_COPD, SP_DEPRESSN,SP_DIABETES, SP_ISCHMCHT, SP_OSTEOPRS, \nSP_RA_OA, SP_STRKETIA, MEDREIMB_IP, BENRES_IP, PPPYMT_IP, MEDREIMB_OP, BENRES_OP, PPPYMT_OP, MEDREIMB_CAR, BENRES_CAR, PPPYMT_CAR) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33)\"\"\"\ndf_list = bene08.values.tolist()\nn = 0\nfor i in bene08.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
],
[
"# Insert into table DASH_BENEFICIARY for 2009\n\nsql= \"\"\" INSERT INTO DASH_BENEFICIARY (FILE_YEAR, DESYNPUF_ID,BENE_BIRTH_DT, BENE_DEATH_DT, BENE_SEX_IDENT_CD,BENE_RACE_CD, \nBENE_ESRD_IND, SP_STATE_CODE, BENE_COUNTY_CD, BENE_HI_CVRAGE_TOT_MONS,BENE_SMI_CVRAGE_TOT_MONS, BENE_HMO_CVRAGE_TOT_MONS, \nPLAN_CVRG_MOS_NUM, SP_ALZHDMTA, SP_CHF,SP_CHRNKIDN, SP_CNCR, SP_COPD, SP_DEPRESSN,SP_DIABETES, SP_ISCHMCHT, SP_OSTEOPRS, \nSP_RA_OA, SP_STRKETIA, MEDREIMB_IP, BENRES_IP, PPPYMT_IP, MEDREIMB_OP, BENRES_OP, PPPYMT_OP, MEDREIMB_CAR, BENRES_CAR, PPPYMT_CAR) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33)\"\"\"\ndf_list = bene09.values.tolist()\nn = 0\nfor i in bene09.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
],
[
"# Insert into table DASH_BENEFICIARY for 2010\n\nsql= \"\"\" INSERT INTO DASH_BENEFICIARY (FILE_YEAR, DESYNPUF_ID,BENE_BIRTH_DT, BENE_DEATH_DT, BENE_SEX_IDENT_CD,BENE_RACE_CD, \nBENE_ESRD_IND, SP_STATE_CODE, BENE_COUNTY_CD, BENE_HI_CVRAGE_TOT_MONS,BENE_SMI_CVRAGE_TOT_MONS, BENE_HMO_CVRAGE_TOT_MONS, \nPLAN_CVRG_MOS_NUM, SP_ALZHDMTA, SP_CHF,SP_CHRNKIDN, SP_CNCR, SP_COPD, SP_DEPRESSN,SP_DIABETES, SP_ISCHMCHT, SP_OSTEOPRS, \nSP_RA_OA, SP_STRKETIA, MEDREIMB_IP, BENRES_IP, PPPYMT_IP, MEDREIMB_OP, BENRES_OP, PPPYMT_OP, MEDREIMB_CAR, BENRES_CAR, PPPYMT_CAR) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33)\"\"\"\ndf_list = bene10.values.tolist()\nn = 0\nfor i in bene10.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
],
[
"# con.close()",
"_____no_output_____"
]
],
[
[
"## Insert into DASH_CLAIM_CARRIER table",
"_____no_output_____"
],
[
"### DASH_CLAIM_CARRIER A\n",
"_____no_output_____"
]
],
[
[
"claimsA = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 3/DE1_0_2008_to_2010_Carrier_Claims_Sample_3A.csv\")\n# claimsB = pd.read_csv(\"s3n://cms-dash-datasets/Data/DE1.0 Sample 20/DE1_0_2008_to_2010_Carrier_Claims_Sample_1B.csv\")",
"_____no_output_____"
],
[
"# Take first 51 cols, move CLM_ID to front, convert two datetime columns\nclaims_A_toload = claimsA.iloc[: , :51]\nfirst_col = claims_A_toload.pop('CLM_ID')\nclaims_A_toload.insert(0,'CLM_ID',first_col)\nclaims_A_toload.columns\n\nA = pd.to_datetime(claims_A_toload['CLM_FROM_DT'])\nclaims_A_toload['CLM_FROM_DT'] = A.dt.date\n\nB = pd.to_datetime(claims_A_toload.CLM_THRU_DT)\nclaims_A_toload['CLM_THRU_DT'] = B.dt.date",
"_____no_output_____"
],
[
"# Convert necessary columns to string\nnonstr = claims_A_toload[['CLM_ID','CLM_FROM_DT','CLM_THRU_DT']]\nclaims_str = claims_A_toload.astype(str)\n\nclaims_str['CLM_ID']=nonstr['CLM_ID']\nclaims_str['CLM_FROM_DT']=nonstr['CLM_FROM_DT']\nclaims_str['CLM_THRU_DT']=nonstr['CLM_THRU_DT']",
"_____no_output_____"
],
[
"# Insert into DASH_CLAIM_CARRIER table\n\nsql=\"\"\" INSERT INTO DASH_CLAIM_CARRIER (\nCLM_ID, DESYNPUF_ID, CLM_FROM_DT, CLM_THRU_DT, ICD9_DGNS_CD_1, ICD9_DGNS_CD_2, ICD9_DGNS_CD_3, ICD9_DGNS_CD_4, ICD9_DGNS_CD_5, ICD9_DGNS_CD_6, \nICD9_DGNS_CD_7, ICD9_DGNS_CD_8, PRF_PHYSN_NPI_1, PRF_PHYSN_NPI_2, PRF_PHYSN_NPI_3, PRF_PHYSN_NPI_4, PRF_PHYSN_NPI_5, PRF_PHYSN_NPI_6, \nPRF_PHYSN_NPI_7, PRF_PHYSN_NPI_8, PRF_PHYSN_NPI_9, PRF_PHYSN_NPI_10, PRF_PHYSN_NPI_11, PRF_PHYSN_NPI_12, PRF_PHYSN_NPI_13, TAX_NUM_1, \nTAX_NUM_2, TAX_NUM_3, TAX_NUM_4, TAX_NUM_5, TAX_NUM_6, TAX_NUM_7, TAX_NUM_8, TAX_NUM_9, TAX_NUM_10, TAX_NUM_11, TAX_NUM_12, TAX_NUM_13, \nHCPCS_CD_1, HCPCS_CD_2, HCPCS_CD_3, HCPCS_CD_4, HCPCS_CD_5, HCPCS_CD_6,HCPCS_CD_7,HCPCS_CD_8,HCPCS_CD_9,HCPCS_CD_10,HCPCS_CD_11,HCPCS_CD_12,\nHCPCS_CD_13\n) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33\n,:34,:35,:36,:37,:38,:39,:40,:41,:42,:43,:44,:45,:46,:47,:48,:49,:50,:51\n)\"\"\"\ndf_list = claims_str.values.tolist()\nn = 0\nfor i in claims_str.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"## DASH CLAIMS CARRIER B",
"_____no_output_____"
]
],
[
[
"claimsB = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 2/DE1_0_2008_to_2010_Carrier_Claims_Sample_2B.csv\")",
"_____no_output_____"
],
[
"# Take first 51 cols, move CLM ID to front, convert two datetime columns\nclaims_B_toload = claimsB.iloc[: , :51]\nfirst_col = claims_B_toload.pop('CLM_ID')\nclaims_B_toload.insert(0,'CLM_ID',first_col)\nclaims_B_toload.columns\n\nA = pd.to_datetime(claims_B_toload['CLM_FROM_DT'])\nclaims_B_toload['CLM_FROM_DT'] = A.dt.date\n\nB = pd.to_datetime(claims_B_toload.CLM_THRU_DT)\nclaims_B_toload['CLM_THRU_DT'] = B.dt.date\n\nnonstr = claims_B_toload[['CLM_ID','CLM_FROM_DT','CLM_THRU_DT']]\nclaims_str = claims_B_toload.astype(str)\n\nclaims_str['CLM_ID']=nonstr['CLM_ID']\nclaims_str['CLM_FROM_DT']=nonstr['CLM_FROM_DT']\nclaims_str['CLM_THRU_DT']=nonstr['CLM_THRU_DT']",
"_____no_output_____"
],
[
"sql=\"\"\" INSERT INTO DASH_CLAIM_CARRIER (\nCLM_ID, DESYNPUF_ID, CLM_FROM_DT, CLM_THRU_DT, ICD9_DGNS_CD_1, ICD9_DGNS_CD_2, ICD9_DGNS_CD_3, ICD9_DGNS_CD_4, ICD9_DGNS_CD_5, ICD9_DGNS_CD_6, \nICD9_DGNS_CD_7, ICD9_DGNS_CD_8, PRF_PHYSN_NPI_1, PRF_PHYSN_NPI_2, PRF_PHYSN_NPI_3, PRF_PHYSN_NPI_4, PRF_PHYSN_NPI_5, PRF_PHYSN_NPI_6, \nPRF_PHYSN_NPI_7, PRF_PHYSN_NPI_8, PRF_PHYSN_NPI_9, PRF_PHYSN_NPI_10, PRF_PHYSN_NPI_11, PRF_PHYSN_NPI_12, PRF_PHYSN_NPI_13, TAX_NUM_1, \nTAX_NUM_2, TAX_NUM_3, TAX_NUM_4, TAX_NUM_5, TAX_NUM_6, TAX_NUM_7, TAX_NUM_8, TAX_NUM_9, TAX_NUM_10, TAX_NUM_11, TAX_NUM_12, TAX_NUM_13, \nHCPCS_CD_1, HCPCS_CD_2, HCPCS_CD_3, HCPCS_CD_4, HCPCS_CD_5, HCPCS_CD_6,HCPCS_CD_7,HCPCS_CD_8,HCPCS_CD_9,HCPCS_CD_10,HCPCS_CD_11,HCPCS_CD_12,\nHCPCS_CD_13\n) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33\n,:34,:35,:36,:37,:38,:39,:40,:41,:42,:43,:44,:45,:46,:47,:48,:49,:50,:51\n)\"\"\"\ndf_list = claims_str.values.tolist()\nn = 0\nfor i in claims_str.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"## Insert into DASH_CLAIM_INPATIENT",
"_____no_output_____"
]
],
[
[
"dtype_dic= {'AT_PHYSN_NPI':str, 'OP_PHYSN_NPI':str,'OT_PHYSN_NPI': str, 'CLM_FROM_DT':str, 'CLM_THRU_DT':str, 'CLM_ADMSN_DT':str, 'NCH_BENE_DSCHRG_DT':str}\ninpatient = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 1/DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.csv\" , dtype = dtype_dic)\n\n# Take only Segment 1\nseg1 = inpatient.loc[inpatient['SEGMENT'] == 1]\n\n\n# Take first 36 columns and rearrange SEGMENT and CLM_ID\ninpatient_toload = seg1.iloc[: , :36]\nfirst_col = inpatient_toload.pop('SEGMENT')\ninpatient_toload.insert(0,'SEGMENT',first_col)\nsec = inpatient_toload.pop('CLM_ID')\ninpatient_toload.insert(0,'CLM_ID',sec)\n\n# Convert necessary columns\nA = pd.to_datetime(inpatient_toload.CLM_FROM_DT)\ninpatient_toload['CLM_FROM_DT'] = A.dt.date\n\nB = pd.to_datetime(inpatient_toload.CLM_THRU_DT)\ninpatient_toload['CLM_THRU_DT'] = B.dt.date\n\nC = pd.to_datetime(inpatient_toload.CLM_ADMSN_DT)\ninpatient_toload['CLM_ADMSN_DT'] = C.dt.date\n\nD = pd.to_datetime(inpatient_toload.NCH_BENE_DSCHRG_DT)\ninpatient_toload['NCH_BENE_DSCHRG_DT'] = D.dt.date\n\n# Fill NaN's with zeros\ninpatient_toload[['NCH_BENE_IP_DDCTBL_AMT']]=inpatient_toload[['NCH_BENE_IP_DDCTBL_AMT']].fillna(0.0)\ninpatient_toload['CLM_UTLZTN_DAY_CNT'] = (inpatient_toload['CLM_UTLZTN_DAY_CNT'].fillna(0)).astype(int)\n\n# inpatient_toload[['AT_PHYSN_NPI', 'OP_PHYSN_NPI', 'OT_PHYSN_NPI']] = inpatient_toload[['AT_PHYSN_NPI', 'OP_PHYSN_NPI', 'OT_PHYSN_NPI']].fillna(0)astype(int)\nall_columns = ['DESYNPUF_ID', 'PRVDR_NUM', 'AT_PHYSN_NPI','OP_PHYSN_NPI','OT_PHYSN_NPI','ADMTNG_ICD9_DGNS_CD', 'CLM_DRG_CD', \n'ICD9_DGNS_CD_1','ICD9_DGNS_CD_2','ICD9_DGNS_CD_3','ICD9_DGNS_CD_4','ICD9_DGNS_CD_5','ICD9_DGNS_CD_6','ICD9_DGNS_CD_7','ICD9_DGNS_CD_8','ICD9_DGNS_CD_9','ICD9_DGNS_CD_10',\n'ICD9_PRCDR_CD_1','ICD9_PRCDR_CD_2','ICD9_PRCDR_CD_3','ICD9_PRCDR_CD_4','ICD9_PRCDR_CD_5','ICD9_PRCDR_CD_6']\ninpatient_toload[all_columns] = inpatient_toload[all_columns].astype(str)\ninpatient_toload = inpatient_toload.reset_index().drop(columns=['index'])",
"_____no_output_____"
],
[
"# Insert into DASH_CLAIM_INPATIENT table\n\nsql=\"\"\"INSERT INTO DASH_CLAIM_INPATIENT (CLM_ID,SEGMENT,DESYNPUF_ID,CLM_FROM_DT,CLM_THRU_DT,PRVDR_NUM,CLM_PMT_AMT,NCH_PRMRY_PYR_CLM_PD_AMT,\nAT_PHYSN_NPI,OP_PHYSN_NPI,OT_PHYSN_NPI,CLM_ADMSN_DT,ADMTNG_ICD9_DGNS_CD,CLM_PASS_THRU_PER_DIEM_AMT,NCH_BENE_IP_DDCTBL_AMT,NCH_BENE_PTA_COINSRNC_LBLTY_AM,\nNCH_BENE_BLOOD_DDCTBL_LBLTY_AM,CLM_UTLZTN_DAY_CNT,NCH_BENE_DSCHRG_DT,CLM_DRG_CD,ICD9_DGNS_CD_1,ICD9_DGNS_CD_2,ICD9_DGNS_CD_3,ICD9_DGNS_CD_4,\nICD9_DGNS_CD_5,ICD9_DGNS_CD_6,ICD9_DGNS_CD_7,ICD9_DGNS_CD_8,ICD9_DGNS_CD_9,ICD9_DGNS_CD_10,ICD9_PRCDR_CD_1,ICD9_PRCDR_CD_2,ICD9_PRCDR_CD_3,\nICD9_PRCDR_CD_4,ICD9_PRCDR_CD_5,ICD9_PRCDR_CD_6) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33\n,:34,:35,:36)\"\"\"\n\ndf_list = inpatient_toload.values.tolist()\nn = 0\nfor i in inpatient_toload.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"## DASH_CLAIM_OUTPATIENT",
"_____no_output_____"
]
],
[
[
"outpatient = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 2/DE1_0_2008_to_2010_Outpatient_Claims_Sample_2.csv\")",
"_____no_output_____"
],
[
"# Take first 36 columns and rearrange #SEGMENT and CLM_ID\noutpatient_toload = outpatient.iloc[: , :31]\nfirst_col = outpatient_toload.pop('SEGMENT')\noutpatient_toload.insert(0,'SEGMENT',first_col)\nsec = outpatient_toload.pop('CLM_ID')\noutpatient_toload.insert(0,'CLM_ID',sec)\n\n# Convert necessary columns\nA = pd.to_datetime(outpatient_toload['CLM_FROM_DT'])\noutpatient_toload['CLM_FROM_DT'] = A.dt.date\n\nB = pd.to_datetime(outpatient_toload.CLM_THRU_DT)\noutpatient_toload['CLM_THRU_DT'] = B.dt.date\n\nall_columns = ['PRVDR_NUM', 'AT_PHYSN_NPI','OP_PHYSN_NPI','OT_PHYSN_NPI','ICD9_DGNS_CD_1','ICD9_DGNS_CD_2','ICD9_DGNS_CD_3','ICD9_DGNS_CD_4','ICD9_DGNS_CD_5','ICD9_DGNS_CD_6',\n'ICD9_DGNS_CD_7','ICD9_DGNS_CD_8','ICD9_DGNS_CD_9','ICD9_DGNS_CD_10','ICD9_PRCDR_CD_1','ICD9_PRCDR_CD_2','ICD9_PRCDR_CD_3','ICD9_PRCDR_CD_4','ICD9_PRCDR_CD_5','ICD9_PRCDR_CD_6','ADMTNG_ICD9_DGNS_CD']\noutpatient_toload[all_columns] = outpatient_toload[all_columns].astype(str)",
"_____no_output_____"
],
[
"# Insert into DASH_CLAIM_OUTPATIENT table\n\nsql=\"\"\"INSERT INTO DASH_CLAIM_OUTPATIENT (CLM_ID,SEGMENT,DESYNPUF_ID,CLM_FROM_DT,CLM_THRU_DT,PRVDR_NUM,CLM_PMT_AMT,NCH_PRMRY_PYR_CLM_PD_AMT,\nAT_PHYSN_NPI,OP_PHYSN_NPI,OT_PHYSN_NPI,NCH_BENE_BLOOD_DDCTBL_LBLTY_AM,ICD9_DGNS_CD_1,ICD9_DGNS_CD_2,ICD9_DGNS_CD_3,ICD9_DGNS_CD_4,\nICD9_DGNS_CD_5,ICD9_DGNS_CD_6,ICD9_DGNS_CD_7,ICD9_DGNS_CD_8,ICD9_DGNS_CD_9,ICD9_DGNS_CD_10,ICD9_PRCDR_CD_1,ICD9_PRCDR_CD_2,ICD9_PRCDR_CD_3,\nICD9_PRCDR_CD_4,ICD9_PRCDR_CD_5,ICD9_PRCDR_CD_6,NCH_BENE_PTB_DDCTBL_AMT,NCH_BENE_PTB_COINSRNC_AMT,ADMTNG_ICD9_DGNS_CD) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31)\"\"\"\ndf_list = outpatient_toload.values.tolist()\nn = 0\nfor i in outpatient_toload.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"# INSERT INTO CENSUS",
"_____no_output_____"
]
],
[
[
"census = pd.read_csv(\"s3://cms-dash-datasets/Data/Census/census_acs20195yr_county (1).csv\", encoding='unicode_escape')",
"_____no_output_____"
],
[
"census['FIPS'] = census['FIPS'].astype(str).apply(lambda x: x.zfill(5))\ncensus['FIPS']",
"_____no_output_____"
],
[
"# Insert into DASH_CENSUS table\n\nsql=\"\"\"INSERT INTO DASH_CENSUS (\nFIPS,COUNTY_NAME,EST_HBT_TH_1,EST_HBT_TH_2,EST_ANC_TP_1,EST_ANC_TP_2,EST_ANC_TP_3,EST_ANC_TP_4,EST_ANC_TP_5,EST_ANC_TP_6,EST_ANC_TP_7,EST_ANC_TP_8,EST_ANC_TP_9,EST_ANC_TP_10,EST_ANC_TP_11,EST_ANC_TP_12,EST_ANC_TP_13,EST_ANC_TP_14,EST_ANC_TP_15,EST_ANC_TP_16,EST_ANC_TP_17,EST_ANC_TP_18,EST_ANC_TP_19,EST_ANC_TP_20,EST_ANC_TP_21,EST_ANC_TP_22,EST_ANC_TP_23,EST_ANC_TP_24,EST_ANC_TP_25,EST_ANC_TP_26,EST_ANC_TP_27,EST_BR_THU_1,EST_BR_THU_2,EST_BR_THU_3,EST_BR_THU_4,\nEST_BR_THU_5,EST_BR_THU_6,EST_BR_THU_7,EST_CTZN_VP_1,EST_CTZN_VP_2,EST_CTZN_VP_3,EST_COW_1,EST_COW_2,EST_COW_3,EST_COW_4,EST_COW_5,EST_CTW_1,EST_CTW_2,EST_CTW_3,EST_CTW_4,EST_CTW_5,EST_CTW_6,EST_CTW_7,EST_CTW_8,EST_CNI_TH_1,EST_CNI_TH_2,EST_CNI_TH_3,EST_DIS_1,EST_DIS_2,EST_DIS_3,EST_DIS_4,EST_DIS_5,EST_DIS_6,EST_DIS_7,EST_DIS_8,EST_EA_1,EST_EA_2,EST_EA_3,EST_EA_4,EST_EA_5,EST_EA_6,EST_EA_7,EST_EA_8,EST_EA_9,EST_EA_10,EST_EMP_1,EST_EMP_2,EST_EMP_3,EST_EMP_4,EST_EMP_5,EST_EMP_6,\nEST_EMP_7,EST_EMP_8,EST_EMP_9,EST_EMP_10,EST_EMP_11,EST_EMP_12,EST_EMP_13,EST_EMP_14,EST_EMP_15,EST_EMP_16,EST_EMP_17,EST_FERT_1,EST_FERT_2,EST_FERT_3,EST_FERT_4,EST_FERT_5,EST_FERT_6,EST_FERT_7,EST_GP_1,EST_GP_2,EST_GP_3,EST_GP_4,EST_GP_5,EST_GP_6,EST_GP_7,EST_GP_8,EST_GP_9,EST_GRAPI_1,EST_GRAPI_2,EST_GRAPI_3,EST_GRAPI_4,EST_GRAPI_5,EST_GRAPI_6,EST_GRAPI_7,EST_GRAPI_8,EST_GR_1,EST_GR_2,EST_GR_3,EST_GR_4,EST_GR_5,EST_GR_6,EST_GR_7,EST_GR_8,EST_GR_9,EST_GR_10,EST_HIC_1,EST_HIC_2,\nEST_HIC_3,EST_HIC_4,EST_HIC_5,EST_HIC_6,EST_HIC_7,EST_HIC_8,EST_HIC_9,EST_HIC_10,EST_HIC_11,EST_HIC_12,EST_HIC_13,EST_HIC_14,EST_HIC_15,EST_HIC_16,EST_HIC_17,EST_HIC_18,EST_HIC_19,EST_HIC_20,EST_HIC_21,EST_HIC_22,EST_HIC_23,EST_HIC_24,EST_HISP_1,EST_HISP_2,EST_HISP_3,EST_HISP_4,EST_HISP_5,EST_HISP_6,EST_HISP_7,EST_HISP_8,EST_HISP_9,EST_HISP_10,EST_HISP_11,EST_HISP_12,EST_HISP_13,EST_HISP_14,EST_HISP_15,EST_HISP_16,EST_HEAT_1,EST_HEAT_2,EST_HEAT_3,EST_HEAT_4,EST_HEAT_5,EST_HEAT_6,EST_HEAT_7,\nEST_HEAT_8,EST_HEAT_9,EST_HEAT_10,EST_HHT_1,EST_HHT_2,EST_HHT_3,EST_HHT_4,EST_HHT_5,EST_HHT_6,EST_HHT_7,EST_HHT_8,EST_HHT_9,EST_HHT_10,EST_HHT_11,EST_HHT_12,EST_HHT_13,EST_HHT_14,EST_HHT_15,EST_HOCC_1,EST_HOCC_2,EST_HOCC_3,EST_HOCC_4,EST_HOCC_5,EST_HT_1,EST_HT_2,EST_HT_3,EST_HT_4,EST_HT_5,EST_INB_1,EST_INB_2,EST_INB_3,EST_INB_4,EST_INB_5,EST_INB_6,EST_INB_7,EST_INB_8,EST_INB_9,EST_INB_10,EST_INB_11,EST_INB_12,EST_INB_13,EST_INB_14,EST_INB_15,EST_INB_16,EST_INB_17,EST_INB_18,EST_INB_19,EST_INB_20,EST_INB_21,\nEST_INB_22,EST_INB_23,EST_INB_24,EST_INB_25,EST_INB_26,EST_INB_27,EST_INB_28,EST_INB_29,EST_INB_30,EST_INB_31,EST_INB_32,EST_INB_33,EST_INB_34,EST_INB_35,EST_INB_36,EST_INB_37,EST_INB_38,EST_INB_39,EST_INB_40,EST_INB_41,EST_INB_42,EST_INB_43,EST_INB_44,EST_IND_1,EST_IND_2,EST_IND_3,EST_IND_4,EST_IND_5,EST_IND_6,EST_IND_7,EST_IND_8,EST_IND_9,EST_IND_10,EST_IND_11,EST_IND_12,EST_IND_13,EST_IND_14,EST_LANG_1,EST_LANG_2,EST_LANG_3,EST_LANG_4,EST_LANG_5,EST_LANG_6,EST_LANG_7,EST_LANG_8,EST_LANG_9,EST_LANG_10,EST_LANG_11,EST_LANG_12,\nEST_MRTL_1,EST_MRTL_2,EST_MRTL_3,EST_MRTL_4,EST_MRTL_5,EST_MRTL_6,EST_MRTL_7,EST_MRTL_8,EST_MRTL_9,EST_MRTL_10,EST_MRTL_11,EST_MRTL_12,EST_MRTG_1,EST_MRTG_2,EST_MRTG_3,EST_OPR_1,EST_OPR_2,EST_OPR_3,EST_OPR_4,EST_OCC_1,EST_OCC_2,EST_OCC_3,EST_OCC_4,EST_OCC_5,EST_OCC_6,EST_BPL_1,EST_BPL_2,EST_BPL_3,EST_BPL_4,EST_BPL_5,EST_BPL_6,EST_BPL_7,EST_BPL_8,EST_BPL_9,EST_BPL_10,EST_BPL_11,EST_BPL_12,EST_BPL_13,EST_BPL_14,EST_BPL_15,EST_BPL_16,EST_BPL_17,EST_BPL_18,EST_BPL_19,EST_POB_1,EST_POB_2,EST_POB_3,EST_POB_4,EST_POB_5,EST_POB_6,EST_POB_7,EST_RACE_1,EST_RACE_2,\nEST_RACE_3,EST_RACE_4,EST_RACE_5,EST_RACE_6,EST_RACE_7,EST_RACE_8,EST_RACE_9,EST_RACE_10,EST_RACE_11,EST_RACE_12,EST_RACE_13,EST_RACE_14,EST_RACE_15,EST_RACE_16,EST_RACE_17,EST_RACE_18,EST_RACE_19,EST_RACE_20,EST_RACE_21,EST_RACE_22,EST_RACE_23,EST_RACE_24,EST_RACE_25,EST_RACE_26,EST_RACE_27,EST_RACE_28,EST_RACE_29,EST_RACE_30,EST_RACE_31,EST_RACE_32,EST_RACE_33,EST_RACE_34,EST_RACE_35,EST_RACE_36,EST_RACE_37,EST_RLTNSHP_1,EST_RLTNSHP_2,EST_RLTNSHP_3,EST_RLTNSHP_4,EST_RLTNSHP_5,EST_RLTNSHP_6,EST_RLTNSHP_7,EST_RSDNC_1,EST_RSDNC_2,EST_RSDNC_3,EST_RSDNC_4,EST_RSDNC_5,\nEST_RSDNC_6,EST_RSDNC_7,EST_RSDNC_8,EST_ROOM_1,EST_ROOM_2,EST_ROOM_3,EST_ROOM_4,EST_ROOM_5,EST_ROOM_6,EST_ROOM_7,EST_ROOM_8,EST_ROOM_9,EST_ROOM_10,EST_ROOM_11,EST_SCHOOL_1,EST_SCHOOL_2,EST_SCHOOL_3,EST_SCHOOL_4,EST_SCHOOL_5,EST_SCHOOL_6,EST_SEL_CHAR_1,EST_SEL_CHAR_2,EST_SEL_CHAR_3,EST_SEL_CHAR_4,EST_SMOC_1,EST_SMOC_2,EST_SMOC_3,EST_SMOC_4,EST_SMOC_5,EST_SMOC_6,EST_SMOC_7,EST_SMOC_8,EST_SMOC_9,EST_SMOC_10,EST_SMOC_11,EST_SMOC_12,EST_SMOC_13,EST_SMOC_14,EST_SMOC_15,EST_SMOC_16,EST_SMOC_17,EST_SMOCAPI_1,EST_SMOCAPI_2,EST_SMOCAPI_3,EST_SMOCAPI_4,EST_SMOCAPI_5,EST_SMOCAPI_6,\nEST_SMOCAPI_7,EST_SMOCAPI_8,EST_SMOCAPI_9,EST_SMOCAPI_10,EST_SMOCAPI_11,EST_SMOCAPI_12,EST_SMOCAPI_13,EST_SMOCAPI_14,EST_SMOCAPI_15,EST_SMOCAPI_16,EST_SEX_AGE_1,EST_SEX_AGE_2,EST_SEX_AGE_3,EST_SEX_AGE_4,EST_SEX_AGE_5,EST_SEX_AGE_6,EST_SEX_AGE_7,EST_SEX_AGE_8,EST_SEX_AGE_9,EST_SEX_AGE_10,EST_SEX_AGE_11,EST_SEX_AGE_12,EST_SEX_AGE_13,EST_SEX_AGE_14,EST_SEX_AGE_15,EST_SEX_AGE_16,EST_SEX_AGE_17,EST_SEX_AGE_18,EST_SEX_AGE_19,EST_SEX_AGE_20,EST_SEX_AGE_21,EST_SEX_AGE_22,EST_SEX_AGE_23,EST_SEX_AGE_24,EST_SEX_AGE_25,EST_SEX_AGE_26,EST_SEX_AGE_27,EST_SEX_AGE_28,EST_SEX_AGE_29,EST_SEX_AGE_30,EST_SEX_AGE_31,\nEST_SEX_AGE_32,EST_THU,EST_CTZNSHP_1,EST_CTZNSHP_2,EST_CTZNSHP_3,EST_UNIT_1,EST_UNIT_2,EST_UNIT_3,EST_UNIT_4,EST_UNIT_5,EST_UNIT_6,EST_UNIT_7,EST_UNIT_8,EST_UNIT_9,EST_UNIT_10,EST_OWNER_UNIT_1,EST_OWNER_UNIT_2,EST_OWNER_UNIT_3,EST_OWNER_UNIT_4,EST_OWNER_UNIT_5,EST_OWNER_UNIT_6,EST_OWNER_UNIT_7,EST_OWNER_UNIT_8,EST_OWNER_UNIT_9,EST_OWNER_UNIT_10,EST_VEH_1,EST_VEH_2,EST_VEH_3,EST_VEH_4,EST_VEH_5,EST_VET_1,EST_VET_2,EST_FOREIGN_1,EST_FOREIGN_2,EST_FOREIGN_3,EST_FOREIGN_4,EST_FOREIGN_5,EST_FOREIGN_6,EST_FOREIGN_7,EST_MOVE_YEAR_1,EST_MOVE_YEAR_2,EST_MOVE_YEAR_3,EST_MOVE_YEAR_4,\nEST_MOVE_YEAR_5,EST_MOVE_YEAR_6,EST_MOVE_YEAR_7,EST_US_ENTRY_1,EST_US_ENTRY_2,EST_US_ENTRY_3,EST_US_ENTRY_4,EST_US_ENTRY_5,EST_US_ENTRY_6,EST_US_ENTRY_7,EST_BUILT_YEAR_1,EST_BUILT_YEAR_2,EST_BUILT_YEAR_3,EST_BUILT_YEAR_4,EST_BUILT_YEAR_5,EST_BUILT_YEAR_6,EST_BUILT_YEAR_7,EST_BUILT_YEAR_8,EST_BUILT_YEAR_9,EST_BUILT_YEAR_10,EST_BUILT_YEAR_11\n) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33,:34,:35,:36,:37,:38,:39,:40,:41,:42,:43,:44,:45\n,:46,:47,:48,:49,:50,:51,:52,:53,:54,:55,:56,:57,:58,:59,:60,:61,:62,:63,:64,:65,:66,:67,:68,:69,:70,:71,:72,:73,:74,:75,:76,:77,:78,:79,:80,:81,:82,:83,:84,:85,:86,:87,:88,:89,:90\n,:91,:92,:93,:94,:95,:96,:97,:98,:99,:100,:101,:102,:103,:104,:105,:106,:107,:108,:109,:110,:111,:112,:113,:114,:115,:116,:117,:118,:119,:120,:121,:122,:123,:124,:125,:126,:127,:128,:129\n,:130,:131,:132,:133,:134,:135,:136,:137,:138,:139,:140,:141,:142,:143,:144,:145,:146,:147,:148,:149,:150,:151,:152,:153,:154,:155,:156,:157,:158,:159,:160,:161,:162,:163,:164,:165,:166,:167,:168\n,:169,:170,:171,:172,:173,:174,:175,:176,:177,:178,:179,:180,:181,:182,:183,:184,:185,:186,:187,:188,:189,:190,:191,:192,:193,:194,:195,:196,:197,:198,:199,:200,:201,:202,:203,:204,:205,:206,:207\n,:208,:209,:210,:211,:212,:213,:214,:215,:216,:217,:218,:219,:220,:221,:222,:223,:224,:225,:226,:227,:228,:229,:230,:231,:232,:233,:234,:235,:236,:237,:238,:239,:240,:241,:242,:243,:244,:245,:246,:247,:248,:249\n,:250,:251,:252,:253,:254,:255,:256,:257,:258,:259,:260,:261,:262,:263,:264,:265,:266,:267,:268,:269,:270,:271,:272,:273,:274,:275,:276,:277,:278,:279,:280,:281,:282,:283,:284,:285,:286,:287,:288,:289,:290\n,:291,:292,:293,:294,:295,:296,:297,:298,:299,:300,:301,:302,:303,:304,:305,:306,:307,:308,:309,:310,:311,:312,:313,:314,:315,:316,:317,:318,:319,:320,:321,:322,:323,:324,:325,:326,:327,:328,:329,:330,:331,:332,:333\n,:334,:335,:336,:337,:338,:339,:340,:341,:342,:343,:344,:345,:346,:347,:348,:349,:350,:351,:352,:353,:354,:355,:356,:357,:358,:359,:360,:361,:362,:363,:364,:365,:366,:367,:368,:369,:370,:371,:372,:373,:374,:375,:376,:377,:378,:379,:380\n,:381,:382,:383,:384,:385,:386,:387,:388,:389,:390,:391,:392,:393,:394,:395,:396,:397,:398,:399,:400,:401,:402,:403,:404,:405,:406,:407,:408,:409,:410,:411,:412,:413,:414,:415,:416,:417,:418,:419,:420,:421,:422,:423,:424,:425,:426,:427,:428,:429,:430\n,:431,:432,:433,:434,:435,:436,:437,:438,:439,:440,:441,:442,:443,:444,:445,:446,:447,:448,:449,:450,:451,:452,:453,:454,:455,:456,:457,:458,:459,:460,:461,:462,:463,:464,:465,:466,:467,:468\n,:469,:470,:471,:472,:473,:474,:475,:476,:477,:478,:479,:480,:481,:482,:483,:484,:485,:486,:487,:488,:489,:490,:491,:492,:493,:494,:495,:496,:497,:498,:499,:500,:501,:502,:503,:504,:505,:506,:507,:508,:509,:510,:511,:512\n,:513,:514,:515,:516,:517,:518,:519,:520,:521,:522, :523)\"\"\"\ndf_list = census.values.tolist()\nn = 0\nfor i in census.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"# INSERT INTO PERCRIPTIONS DRUGS",
"_____no_output_____"
]
],
[
[
"drugs = pd.read_csv(\"s3://cms-dash-datasets/Data/DE1.0 Sample 2/DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_2.csv\")",
"_____no_output_____"
],
[
"# Rearrange columns and convert necessary columns \nfirst_col = drugs.pop('PDE_ID')\ndrugs.insert(0,'PDE_ID',first_col)\n\nall_columns = ['PDE_ID', 'DESYNPUF_ID','PROD_SRVC_ID',]\ndrugs[all_columns] = drugs[all_columns].astype(str)\n\n# Convert date columns\nA = pd.to_datetime(drugs['SRVC_DT'])\ndrugs['SRVC_DT'] = A.dt.date",
"_____no_output_____"
],
[
"# Insert into DASH_DRUG_PRESCRIPTION table\n\nsql=\"\"\"INSERT INTO DASH_DRUG_PRESCRIPTION (PDE_ID, DESYNPUF_ID, SRVC_DT,PROD_SRVC_ID,QTY_DSPNSD_NUM,DAYS_SUPLY_NUM,PTNT_PAY_AMT,TOT_RX_CST_AMT) \nvalues(:1,:2,:3,:4,:5,:6,:7,:8)\"\"\"\ndf_list = drugs.values.tolist()\nn = 0\nfor i in drugs.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
]
],
[
[
"## Insert NLP data",
"_____no_output_____"
]
],
[
[
"# Import the two NLP files\nnlp = pd.read_csv(\"s3://cms-dash-datasets/Jason-NLP/output.csv\")\nnlp_xlsx = pd.read_csv(\"s3://cms-dash-datasets/Jason-NLP/Review Data (Hospital Review Data for NLP processing)_Review Data.csv\")\n# Rename columns\nnlp_xlsx = nlp_xlsx.rename(columns={'At Physn Npi':'AT_PHYSN_API'})\nnlp2 = nlp_xlsx[['AT_PHYSN_API','Review Comment']]",
"_____no_output_____"
],
[
"# Join the two NLP files\nnlp_load = pd.merge(left=nlp, right=nlp2, left_on ='AT_PHYSN_NPI', right_on = 'AT_PHYSN_API')\nnlp_load2 = nlp_load[['AT_PHYSN_NPI','Review Comment','label']].rename(columns={'Review Comment':'REVIEW_TEXT','label':'RATING_VALUE'})",
"_____no_output_____"
],
[
"# Create index column\nnlp_load2['REVIEW_SID']=nlp_load2.index\n\n# Rearrange columns\nnlp_load3 = nlp_load2[['REVIEW_SID','AT_PHYSN_NPI','REVIEW_TEXT','RATING_VALUE']]",
"_____no_output_____"
],
[
"# Insert into DASH_PROVIDER_REVIEW Table\n\nsql=\"\"\"INSERT INTO DASH_PROVIDER_REVIEW (REVIEW_SID, AT_PHYSN_NPI, REVIEW_TEXT, RATING_VALUE ) \nvalues(:1,:2,:3, :4)\"\"\"\ndf_list = nlp_load3.values.tolist()\nn = 0\nfor i in nlp_load3.iterrows():\n cur.execute(sql,df_list[n])\n n += 1\n \ncon.commit()",
"_____no_output_____"
],
[
"con.close",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4abfc1af3ec0d8c6178984d22a6a8417cb34d750
| 13,241 |
ipynb
|
Jupyter Notebook
|
markdown_generator/publications.ipynb
|
nkouagou/nkouagou.github.io
|
9c789520439f2a57672d8f6c0c4613715e6a16d5
|
[
"MIT"
] | null | null | null |
markdown_generator/publications.ipynb
|
nkouagou/nkouagou.github.io
|
9c789520439f2a57672d8f6c0c4613715e6a16d5
|
[
"MIT"
] | null | null | null |
markdown_generator/publications.ipynb
|
nkouagou/nkouagou.github.io
|
9c789520439f2a57672d8f6c0c4613715e6a16d5
|
[
"MIT"
] | null | null | null | 33.436869 | 448 | 0.526093 |
[
[
[
"# Publications markdown generator for academicpages\n\nTakes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.\n\nTODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.\n",
"_____no_output_____"
],
[
"## Data format\n\nThe TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. \n\n- `excerpt` and `paper_url` can be blank, but the others must have values. \n- `pub_date` must be formatted as YYYY-MM-DD.\n- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`\n\nThis is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).",
"_____no_output_____"
]
],
[
[
"!cat publications.tsv",
"pub_date\ttitle\tvenue\texcerpt\tcitation\turl_slug\tpaper_url\n2009-10-01\tPaper Title Number 1\tJournal 1\tThis paper is about the number 1. The number 2 is left for future work.\tYour Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1).\tpaper-title-number-1\thttp://academicpages.github.io/files/paper1.pdf\n2010-10-01\tPaper Title Number 2\tJournal 1\tThis paper is about the number 2. The number 3 is left for future work.\tYour Name, You. (2010). \"Paper Title Number 2.\" <i>Journal 1</i>. 1(2).\tpaper-title-number-2\thttp://academicpages.github.io/files/paper2.pdf\n2015-10-01\tPaper Title Number 3\tJournal 1\tThis paper is about the number 3. The number 4 is left for future work.\tYour Name, You. (2015). \"Paper Title Number 3.\" <i>Journal 1</i>. 1(3).\tpaper-title-number-3\thttp://academicpages.github.io/files/paper3.pdf"
]
],
[
[
"## Import pandas\n\nWe are using the very handy pandas library for dataframes.",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"## Import TSV\n\nPandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\\t`.\n\nI found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.",
"_____no_output_____"
]
],
[
[
"publications = pd.read_csv(\"publications.tsv\", sep=\"\\t\", header=0)\npublications\n",
"_____no_output_____"
]
],
[
[
"## Escape special characters\n\nYAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.",
"_____no_output_____"
]
],
[
[
"html_escape_table = {\n \"&\": \"&\",\n '\"': \""\",\n \"'\": \"'\"\n }\n\ndef html_escape(text):\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c,c) for c in text)",
"_____no_output_____"
]
],
[
[
"## Creating the markdown files\n\nThis is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.",
"_____no_output_____"
]
],
[
[
"import os\nfor row, item in publications.iterrows():\n \n md_filename = str(item.pub_date) + \"-\" + item.url_slug + \".md\"\n html_filename = str(item.pub_date) + \"-\" + item.url_slug\n year = item.pub_date[:4]\n \n ## YAML variables\n \n md = \"---\\ntitle: \\\"\" + item.title + '\"\\n'\n \n md += \"\"\"collection: publications\"\"\"\n \n md += \"\"\"\\npermalink: /publication/\"\"\" + html_filename\n \n if len(str(item.excerpt)) > 5:\n md += \"\\nexcerpt: '\" + html_escape(item.excerpt) + \"'\"\n \n md += \"\\ndate: \" + str(item.pub_date) \n \n md += \"\\nvenue: '\" + html_escape(item.venue) + \"'\"\n \n if len(str(item.paper_url)) > 5:\n md += \"\\npaperurl: '\" + item.paper_url + \"'\"\n \n md += \"\\ncitation: '\" + html_escape(item.citation) + \"'\"\n \n md += \"\\n---\"\n \n ## Markdown description for individual page\n \n if len(str(item.excerpt)) > 5:\n md += \"\\n\" + html_escape(item.excerpt) + \"\\n\"\n \n if len(str(item.paper_url)) > 5:\n md += \"\\n[Download paper here](\" + item.paper_url + \")\\n\" \n \n md += \"\\nRecommended citation: \" + item.citation\n \n md_filename = os.path.basename(md_filename)\n \n with open(\"../_publications/\" + md_filename, 'w') as f:\n f.write(md)",
"_____no_output_____"
]
],
[
[
"These files are in the publications directory, one directory below where we're working from.",
"_____no_output_____"
]
],
[
[
"!ls ../_publications/",
"2009-10-01-paper-title-number-1.md 2015-10-01-paper-title-number-3.md\n2010-10-01-paper-title-number-2.md\n"
],
[
"!cat ../_publications/2009-10-01-paper-title-number-1.md",
"---\ntitle: \"Paper Title Number 1\"\ncollection: publications\npermalink: /publication/2009-10-01-paper-title-number-1\nexcerpt: 'This paper is about the number 1. The number 2 is left for future work.'\ndate: 2009-10-01\nvenue: 'Journal 1'\npaperurl: 'http://academicpages.github.io/files/paper1.pdf'\ncitation: 'Your Name, You. (2009). "Paper Title Number 1." <i>Journal 1</i>. 1(1).'\n---\nThis paper is about the number 1. The number 2 is left for future work.\n\n[Download paper here](http://academicpages.github.io/files/paper1.pdf)\n\nRecommended citation: Your Name, You. (2009). \"Paper Title Number 1.\" <i>Journal 1</i>. 1(1)."
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4abfdb518b48e49748fd1677ab4b0ce964b7def7
| 527,038 |
ipynb
|
Jupyter Notebook
|
Python-Analysis/00_nii_videodata_jsonl_parse.ipynb
|
wajimax/Python-Analysis
|
a13a47a4412ca1ab7a3a41d21d53498d2a9900b0
|
[
"MIT"
] | null | null | null |
Python-Analysis/00_nii_videodata_jsonl_parse.ipynb
|
wajimax/Python-Analysis
|
a13a47a4412ca1ab7a3a41d21d53498d2a9900b0
|
[
"MIT"
] | null | null | null |
Python-Analysis/00_nii_videodata_jsonl_parse.ipynb
|
wajimax/Python-Analysis
|
a13a47a4412ca1ab7a3a41d21d53498d2a9900b0
|
[
"MIT"
] | null | null | null | 25.135349 | 165 | 0.522304 |
[
[
[
"#!/usr/bin/python\n# coding: UTF-8\nimport json\nimport pandas as pd\nimport logging\nimport codecs",
"_____no_output_____"
],
[
"SimulationName=\"nii_videodata_jsonl_parse\"",
"_____no_output_____"
],
[
"#ログ設定\nlog_fmt = '%(asctime)s- %(name)s - %(levelname)s - %(message)s'\nlogger_name = \"LOGGER\"\nlogging.basicConfig(filename=\"./Log/\" + SimulationName + \".log\",format=log_fmt, level=logging.DEBUG)\nlogger = logging.getLogger(logger_name)",
"_____no_output_____"
],
[
"#処理開始\nlogger.info(\"----処理開始----\")",
"_____no_output_____"
],
[
"#カラム名を設定\nvideo_info_columns = ['video_id', 'watch_num', 'comment_num','mylist_num','title','category','upload_time','file_type','length','size_high','size_low']",
"_____no_output_____"
],
[
"#データフレームを作成\nvideo_info_data = pd.DataFrame([],columns=video_info_columns)\ntag_info_data = pd.DataFrame([])",
"_____no_output_____"
],
[
"num = 0\n\n#ファイルを指定して1行ずつ処理\nwith codecs.open('./Input/jsonl_merge.jsonl', 'rb', 'utf-8') as jsonl_file: \n#with codecs.open('./Input/0000.jsonl', 'rb', 'utf-8') as jsonl_file:\n #1行読み込み\n jsonl_file_readline = jsonl_file.readline()\n\n #1行jsonパース\n json_object = json.loads(jsonl_file_readline)\n \n while True:\n try:\n #リスト作成\n video_info_list = []\n tag_info_list = []\n\n ##################ここからファイルの処理##################\n ##カラム名\n #['video_id', 'watch_num', 'comment_num','mylist_num','title','category','upload_time','file_type','length','size_high','size_low']\n\n #リストにパースした結果を追加\n video_info_list.append(json_object[\"video_id\"].encode('utf-8'))\n video_info_list.append(str(json_object[\"watch_num\"]))\n video_info_list.append(str(json_object[\"comment_num\"]))\n video_info_list.append(str(json_object[\"mylist_num\"]))\n video_info_list.append(json_object[\"title\"].encode('utf-8'))\n\n if json_object[\"category\"] is not None:\n video_info_list.append(json_object[\"category\"].encode('utf-8'))\n else:\n video_info_list.append(\"\")\n\n video_info_list.append(str(json_object[\"upload_time\"]))\n video_info_list.append(json_object[\"file_type\"].encode('utf-8'))\n video_info_list.append(str(json_object[\"length\"]))\n video_info_list.append(str(json_object[\"size_high\"]))\n video_info_list.append(str(json_object[\"size_low\"]))\n\n #データフレームに変換\n video_info_data_list = pd.DataFrame(video_info_list)\n video_info_data_list = video_info_data_list.T\n \n #出力用のデータフレームに追加\n video_info_data = pd.concat([video_info_data, video_info_data_list], axis=0)\n \n if (num % 100 == 0):\n logger.info(\"ファイル出力の文書番号:\" + str(num))\n \n #ファイル出力\n video_info_data.to_csv(\"./Output/video_tsv/video_info_data_\" + str(num) + \".tsv\",encoding='utf-8',header=False, index=False,sep=\"\\t\")\n \n #初期化\n video_info_data = pd.DataFrame([],columns=video_info_columns)\n \n\n ##################ここからタグの処理##################\n ##video_id+タグ\n #タグの配列を取得\n tags = json_object[\"tags\"]\n\n #タグリストにIDを設定\n tag_info_list.append(json_object[\"video_id\"].encode('utf-8'))\n\n for tag in tags:\n tag_info_list.append(tag.encode('utf-8'))\n\n #タグのデータフレームに変換\n tag_info_data_list = pd.DataFrame(tag_info_list)\n tag_info_data_list = tag_info_data_list.T\n\n #タグのデータフレームに追加\n tag_info_data = pd.concat([tag_info_data, tag_info_data_list], axis=0)\n \n if (num % 100 == 0):\n logger.info(\"タグファイル出力の文書番号:\" + str(num))\n \n #タグのファイル出力\n tag_info_data.to_csv(\"./Output/tag_tsv/tag_info_data_\" + str(num) + \".tsv\",encoding='utf-8',header=False, index=False,sep=\"\\t\")\n \n #初期化\n tag_info_data = pd.DataFrame([])\n\n\n #文書番号\n num = num + 1\n\n #100000件ごとに文書番号を出力\n if (num % 100000 == 0):\n logger.info(\"処理中の文書番号:\" + str(num))\n print num\n\n #1行読み込み\n jsonl_file_readline = jsonl_file.readline()\n\n #最終行の場合は処理完了\n if not jsonl_file_readline:\n break\n\n #1行jsonパース\n json_object = json.loads(jsonl_file_readline)\n except :\n print('Error:' + str(num))\n logger.info('Error:' + str(num))",
"100000\nError:132778\nError:132779\nError:132780\nError:132781\nError:132782\nError:132783\nError:132784\nError:132785\nError:147270\nError:147271\n200000\nError:234898\nError:234899\nError:234900\nError:243240\nError:243241\nError:243242\nError:243243\nError:243244\nError:243245\nError:243246\nError:243247\nError:243248\n300000\nError:333208\nError:333209\nError:333210\nError:333657\nError:333658\nError:333659\nError:333660\nError:333661\nError:333662\nError:350950\nError:350951\nError:350952\nError:351233\nError:351234\nError:351256\nError:351257\nError:353029\nError:353030\nError:353031\nError:380030\nError:380031\nError:380032\nError:384609\nError:384610\nError:384611\nError:390299\nError:390300\n400000\nError:416991\nError:416992\nError:416993\nError:416994\nError:416995\nError:433081\nError:433082\nError:433870\nError:433871\nError:433872\nError:433873\nError:433874\nError:433875\nError:452162\nError:452163\nError:452164\nError:477921\nError:477922\nError:483351\nError:483352\nError:483353\nError:483354\n500000\nError:525217\nError:525218\nError:525219\nError:525220\nError:525221\nError:525222\nError:528244\nError:528245\nError:528246\nError:534022\nError:534023\nError:543526\nError:543527\nError:549011\nError:549012\nError:558647\nError:558648\nError:558649\nError:558650\nError:584896\nError:584897\nError:584898\n600000\nError:612328\nError:612329\nError:612330\nError:612331\nError:612332\nError:616201\nError:616202\nError:628942\nError:628943\nError:628944\nError:628945\nError:629883\nError:629884\nError:629885\nError:633633\nError:633634\nError:633635\nError:656933\nError:656934\nError:657986\nError:657987\nError:675918\nError:699658\nError:699659\n700000\nError:729108\nError:729109\nError:729110\nError:729111\nError:742850\nError:742851\nError:742852\nError:742853\nError:742854\nError:742855\nError:742856\nError:742857\nError:742858\nError:742859\nError:742860\nError:756259\nError:756260\n800000\nError:800698\nError:800699\nError:844565\nError:844566\nError:847155\nError:847156\nError:853547\nError:853548\nError:899877\nError:899878\n900000\nError:906555\nError:906556\nError:906735\nError:906736\nError:906737\nError:906738\nError:916870\nError:916871\nError:916872\nError:916873\nError:921296\nError:921297\nError:921298\nError:921299\nError:922014\nError:922015\nError:922016\nError:922627\nError:922628\nError:922629\nError:922630\nError:947457\nError:947458\nError:951543\nError:951544\nError:953254\nError:953255\nError:958065\nError:958066\nError:958067\nError:958068\nError:958069\nError:958070\nError:958071\nError:958072\nError:958073\nError:958074\nError:958075\nError:972442\nError:972443\nError:972444\nError:972445\nError:972446\nError:972447\nError:995580\nError:995581\n1000000\nError:1000139\nError:1000140\nError:1000914\nError:1000915\nError:1001079\nError:1001080\nError:1001282\nError:1001283\nError:1001930\nError:1001931\nError:1002234\nError:1002235\nError:1002406\nError:1002407\nError:1002848\nError:1002849\nError:1003603\nError:1003604\nError:1003630\nError:1003631\nError:1004613\nError:1004614\nError:1008520\nError:1008521\nError:1008522\nError:1011455\nError:1011456\nError:1016819\nError:1016820\nError:1016821\nError:1017735\nError:1017736\nError:1033401\nError:1033402\nError:1033482\nError:1033483\nError:1033998\nError:1033999\nError:1036219\nError:1036220\nError:1038241\nError:1038242\nError:1039345\nError:1039346\nError:1043814\nError:1043815\nError:1043816\nError:1043817\nError:1060554\nError:1060555\n1100000\nError:1111631\nError:1111632\nError:1113488\nError:1113489\nError:1119015\nError:1119016\n1200000\nError:1226938\nError:1226939\nError:1226940\nError:1226941\nError:1229748\nError:1229749\nError:1229750\nError:1229751\nError:1230465\nError:1230466\nError:1276180\nError:1276181\nError:1293498\nError:1293499\nError:1296597\nError:1296598\n1300000\nError:1313875\nError:1313876\nError:1313877\nError:1313878\nError:1351801\nError:1351802\nError:1383469\nError:1383470\nError:1383471\nError:1383472\nError:1384136\nError:1384137\nError:1384138\nError:1386385\nError:1386386\nError:1386387\n1400000\n1500000\nError:1535306\nError:1535307\nError:1535308\nError:1535309\n1600000\nError:1612870\nError:1612871\nError:1612872\nError:1612873\nError:1614790\nError:1614791\nError:1614792\nError:1614793\nError:1652897\nError:1652898\nError:1652899\nError:1652900\nError:1655252\nError:1655253\nError:1655254\nError:1655255\nError:1656114\nError:1656115\nError:1656116\nError:1656117\nError:1656478\nError:1656479\nError:1656480\nError:1656481\nError:1658762\nError:1658763\nError:1658764\nError:1658765\nError:1659806\nError:1659807\nError:1659808\nError:1659809\nError:1661183\nError:1661184\nError:1661185\nError:1661186\nError:1661353\nError:1661354\nError:1661355\nError:1661356\nError:1670740\nError:1670741\nError:1671050\nError:1671051\nError:1695817\nError:1695818\nError:1695819\n1700000\nError:1744122\nError:1744123\nError:1744124\nError:1744125\nError:1744126\nError:1764950\nError:1764951\nError:1764952\nError:1775120\nError:1775121\nError:1789960\nError:1789961\nError:1792431\nError:1792432\nError:1792433\nError:1792434\nError:1792435\nError:1792436\nError:1792437\n1800000\nError:1810046\nError:1810047\nError:1842874\nError:1842875\nError:1843515\nError:1843516\nError:1843641\nError:1843642\nError:1843643\nError:1843644\nError:1843645\nError:1844002\nError:1844003\nError:1844004\nError:1844005\nError:1844006\nError:1857510\nError:1857511\nError:1870859\nError:1870860\nError:1870861\nError:1870862\nError:1871988\nError:1871989\nError:1871990\nError:1871991\nError:1871992\nError:1872049\nError:1872050\nError:1872051\nError:1872052\nError:1872053\nError:1872114\nError:1872115\nError:1872116\nError:1872117\nError:1872118\nError:1873753\nError:1873754\nError:1877718\nError:1877719\nError:1886043\nError:1886044\nError:1886045\nError:1886046\nError:1886047\n1900000\nError:1903588\nError:1903589\nError:1904067\nError:1904068\nError:1917701\nError:1917702\nError:1925171\nError:1925172\nError:1929072\nError:1929073\nError:1929074\nError:1929075\nError:1929498\nError:1929499\nError:1929500\nError:1929501\nError:1945482\nError:1945483\nError:1953909\nError:1953910\nError:1953911\nError:1953912\nError:1953913\nError:1953914\nError:1953915\nError:1953916\nError:1953917\nError:1953918\nError:1953919\nError:1956855\nError:1956856\nError:1956857\nError:1956858\nError:1980929\nError:1980930\nError:1986808\nError:1986809\nError:1986810\nError:1986811\nError:1996314\nError:1996315\nError:1996316\nError:1996317\nError:1997123\nError:1997124\nError:1997420\nError:1997421\nError:1997422\nError:1997423\nError:1997904\nError:1997905\nError:1998207\nError:1998208\n2000000\nError:2001584\nError:2001585\nError:2001586\nError:2004180\nError:2004181\nError:2004182\nError:2004183\nError:2004184\nError:2004185\nError:2004186\nError:2004187\nError:2025263\nError:2025264\nError:2025265\nError:2025266\nError:2033954\nError:2033955\nError:2033956\nError:2033957\nError:2062056\nError:2062057\nError:2062058\nError:2062059\nError:2071458\nError:2071459\nError:2071460\nError:2071461\nError:2073694\nError:2073695\nError:2077840\nError:2077841\nError:2085995\nError:2085996\nError:2085997\nError:2092551\nError:2092552\nError:2092553\nError:2092554\nError:2092555\nError:2092556\nError:2092557\nError:2093002\nError:2093003\n2100000\nError:2106084\nError:2106085\nError:2106086\nError:2106087\nError:2117636\nError:2117637\nError:2117638\nError:2119209\nError:2119210\nError:2119211\nError:2119212\nError:2119213\nError:2121115\nError:2121116\nError:2123983\nError:2123984\nError:2123985\nError:2123986\nError:2136530\nError:2136531\nError:2136532\nError:2136533\nError:2140816\nError:2140817\nError:2140818\nError:2140819\nError:2149319\nError:2149320\nError:2149321\nError:2149322\nError:2149323\nError:2149324\nError:2149325\nError:2149326\nError:2149327\nError:2149328\nError:2149329\nError:2149330\nError:2149331\nError:2149332\nError:2149333\nError:2149334\nError:2149335\nError:2153353\nError:2153354\nError:2153355\nError:2153356\nError:2157897\nError:2157898\nError:2157899\nError:2157900\nError:2159305\nError:2159306\nError:2159307\nError:2160969\nError:2160970\nError:2160971\nError:2161039\nError:2161040\nError:2161041\nError:2161155\nError:2161156\nError:2161157\nError:2161158\nError:2163750\nError:2163751\nError:2163752\nError:2163753\nError:2164048\nError:2164049\nError:2164050\nError:2164051\nError:2164052\nError:2164966\nError:2164967\nError:2164968\nError:2164969\nError:2164970\nError:2164971\nError:2164972\nError:2164973\nError:2164974\nError:2164975\nError:2164976\nError:2166656\nError:2166657\nError:2166745\nError:2166746\nError:2167325\nError:2167326\nError:2168336\nError:2168337\nError:2175382\nError:2175383\nError:2175384\nError:2175385\nError:2180404\nError:2180405\nError:2180406\n"
],
[
"logging.info(\"文書番号:\" + str(num-1))\nlogging.info(\"----出力データ作成完了----\")",
"_____no_output_____"
],
[
"logging.shutdown()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abfe36e4957b8ea77f808c46542b2e16ccd2263
| 627,163 |
ipynb
|
Jupyter Notebook
|
Java/Arrays/Multidimensional Array/MultiDimensionalArray.ipynb
|
Riya-Bisht/winter-of-contributing
|
373630b4eb99062c9d97228505064a9800d41c25
|
[
"MIT"
] | 1,078 |
2021-09-05T09:44:33.000Z
|
2022-03-27T01:16:02.000Z
|
Java/Arrays/Multidimensional Array/MultiDimensionalArray.ipynb
|
Riya-Bisht/winter-of-contributing
|
373630b4eb99062c9d97228505064a9800d41c25
|
[
"MIT"
] | 6,845 |
2021-09-05T12:49:50.000Z
|
2022-03-12T16:41:13.000Z
|
Java/Arrays/Multidimensional Array/MultiDimensionalArray.ipynb
|
Varun270/winter-of-contributing
|
8c76fe570e57ada470941747f23854f8003bbd3b
|
[
"MIT"
] | 2,629 |
2021-09-03T04:53:16.000Z
|
2022-03-20T17:45:00.000Z
| 1,603.997442 | 475,868 | 0.956554 |
[
[
[
"###**MULTIDIMENSIONAL ARRAY**",
"_____no_output_____"
],
[
"You are determined to learn Java in and out. So, you walk to a library nearby to get the book named “Java The Complete Reference, 8th Edition”. You ask the librarian where to find the book in that big library. \n\n\n\nAfter a quick scan in her database, she tells you that the book is available in the 3rd cell of the 2nd row in the 2nd shelf. So, what do you do? You walk straight to the 2nd shelf, find the 2nd row and locate the 3rd cell. Walla! you have found the book you wanted!!!",
"_____no_output_____"
],
[
"Now let’s figure out how the librarian had track of all the books in the library. For simplicity’s sake, lets imagine this poor library has only one shelf of books. This shelf contains many rows and each row contains many cells (or say columns). So, this shelf is a collection of rows and columns. Similarly in computer terms we term this shelf as an array which is a collection of rows and columns. \n\nNow consider our original library with many shelves (or arrays). So, we can conclude that this library is a collection of many arrays or an array of arrays. In Java we term this as a MULTIDIMENSIONAL ARRAY. Now how do we code this in Java? \n\n",
"_____no_output_____"
],
[
"**SYNTAX**\n\n```\ndataType [dimension1][dimension2]…..[dimensionN] arrayName = new dataType[size1][size2]…..[sizeN];\n```\nLet’s look what these keywords mean.\n* dataType\n * type of data to be stored in the array\n * Example: int, char, float, etc.\n* arrayName\n * name of the array\n* dimension1, dimension2, …….., dimensionN\n * dimension of the array to be created\n * Example: \n 1. for a 1D integer array –> int[ ] x = new int[5];\n 2. for a 2D char array –> char[ ][ ] c = new char[2];\n* size1, size2, ……., sizeN\n * sizes of the corresponding dimension\n\n***Note*** \n\nAs the library shelves contain only books and no other objects (say toys), the Java multidimensional array also contains the same data type for all dimensions i.e., say in a 2D array both the row and column data must be of the same data type.\n\n",
"_____no_output_____"
],
[
"There is another way to declare a multidimensional array by not specifying the size of the dimensions explicitly.\n\n```\ndataType [dimension1][dimension2]…..[dimensionN] arrayName ={\n\t\t\t\t\t\t\t\t {\n\t\t\t\t\t\t\t\t {valueD1R1C1, valueD1R1C2,…..},\n\t\t\t\t\t\t\t\t {valueD1R2C1, valueD1R2C2,…..},\n\t\t\t\t\t\t\t\t ………………………..\t\t\t\t\t\t\t\t \n }\n\t\t\t\t\t\t\t\t {\n\t\t\t\t\t\t\t\t {valueD2R1C1, valueD2R1C2,…..},\n\t\t\t\t\t\t\t\t {valueD2R2C1, valueD2R2C2,…..},\n\t\t\t\t\t\t\t\t ………………………..\t\t\t\t\t\t\t\t \n }\n\t\t\t\t\t\t\t\t …………………………..\n\t\t\t\t\t\t\t\t {\n\t\t\t\t\t\t\t\t {valueDnR1C1, valueDnR1C2,…..},\n\t\t\t\t\t\t\t\t {valueDnR2C1, valueDnR2C2,…..},\n\t\t\t\t\t\t\t\t ………………………..\t\t\t\t\t\t\t\t \n }\n };\n\n```\nExample: \n\nint[ ][ ] x = { {1, 2}, {3, 4} };\n\nint[ ][ ][ ] y = { { {0, 1}, {2, 3} }, { {4, 5}, {6, 7} } };\n\nNow let’s declare a 2D array for our library. In our case the data type is String. Let’s name the array as books. And say we have a size of 2 rows and 3 columns. So, we have:\n\n\t\t\t\t\t\t\tString[][] books = new String[2][3];\n\nWe can find the number of rows in the array by\n\n\t\t\t\t\tbooks.length\nWe can find the number of columns in the row by\n\n\t\t\t\t\tbooks[row].length",
"_____no_output_____"
],
[
"**ACCSESSING AN ELEMENT**\n\nNow recall how did the librarian tell you the location of the book. She used certain figures (or indices) to tell you go there, look there. The same way to access an element from a multidimensional array we use indices. \n\nExample: \n\nIn a 2D array – x[0][1] accesses the element in row 0 and column 1\n\nIn a 3D array – y[2][3][1] accesses the element present in the second array, 3rd row and 1st column\n\nIn our library example, we access an element by specifying the row and column number like \n\n`books[row][col]`\n",
"_____no_output_____"
],
[
"**ASSIGNING VALUES**\n\nOur 2D books array has no real elements in it. So now we’ll insert some books into the array. In Java we have two ways to assign values. One is by providing a value to each cell by accessing it individually. \n\n```\nbooks[0][0] = AliceInWonderLand;\n\nbooks[0][1] = TheThreeMusketeers;\n\nbooks[0][2] = TheThreeLittlePigs;\n\nbooks[1][0] = DataCommunication;\n\nbooks[1][1] = DatabaseSystems;\n\nbooks[1][2] = JavaTheCompleteReference;\n```\nThis is an exhaustive approach. For large arrays it is impossible to assign every element in this way.\n\nThe other way is an iterative approach. Input can be given by the user and that element will be assigned to its corresponding position.\n\n```\nScanner sc = new Scanner(System.in);\nfor (int row = 0; row < 2; row++){\n\tfor (int col = 0; col < 3; col++){\n\t\tSystem.out.print(\"Book at row\" + (row+1) + \" col \" + (col+1) + \": \");\n\t\t books[row][col] = sc.nextLine();\n\t }\n}\t\n```\n",
"_____no_output_____"
],
[
"**DISPLAYING VALUES**\n\nWe can easily display the array by using two for loops. Individual elements can be accessed and displayed.\n\n```\nfor (int row = 0; row < 2; row++){\n\tfor (int col = 0; col < 3; col++){\n\t\tSystem.out.print(books[row][col] + \"\\t\");\n\t}\n\tSystem.out.println();\n}\n```\n\n***Note:***\n\nThe tab space and the next line which I have included in the print statements are simply for alignment, to make it look beautiful by displaying it in a tabular form. You can simply print the elements one by one.\n\nWe’ve come a long way. So now let’s join all the snippets of code that we have coded for our library and see how our book collection looks like.\n",
"_____no_output_____"
]
],
[
[
"import java.util.Scanner;\t\t//Import util package to use Scanner class to get input from the user\n\npublic class Main\n{\n\tpublic static void main(String[] args) {\n\t \tScanner sc = new Scanner(System.in);\t\n\t\tString[][] books = new String[2][3];\t//Declaration of 2D array\n\t\tfor (int row = 0; row < 2; row++){\n\t\t for (int col = 0; col < 3; col++){\n\t\t System.out.print(\"Book at row\" + (row+1) + \" col \" + (col+1) + \": \");\n\t\t books[row][col] = sc.nextLine();\t//Get input from the user and assign it to the corresponding indices\n\t\t }\n\t\t}\n\t\tSystem.out.println(\"List of Books\");\n\t\tfor (int row = 0; row < 2; row++){\n\t\t for (int col = 0; col < 3; col++){\n\t\t System.out.print(books[row][col] + \"\\t\");\t//Display the elements present in our 2Darray\n\t\t }\n\t\t//To print the 2D array in a tabular format, go to the next line after each row\n\t\t System.out.println();\t\n\t\t}\n\t}\n}",
"Book at row1 col 1: AliceInWonderLand\nBook at row1 col 2: TheThreeMusketeers\nBook at row1 col 3: TheThreeLittlePigs\nBook at row2 col 1: DataCommunication\nBook at row2 col 2: DatabaseSystems\nBook at row2 col 3: JavaTheCompleteReference\nList of Books\nAliceInWonderLand\tTheThreeMusketeers\tTheThreeLittlePigs\t\nDataCommunication\tDatabaseSystems\tJavaTheCompleteReference\t\n"
]
],
[
[
"*Walla!!! Here we are! We have our collection of books for our poor 1 shelfed library.*",
"_____no_output_____"
],
[
"**BRAIN TEASER**\n\nAs a brain teaser try extending this knowledge to 3D array. Try expanding on our library example. The solution (spoiler) is available at the end but try by yourself first!\n\n\n",
"_____no_output_____"
],
[
"**SPOILER**",
"_____no_output_____"
]
],
[
[
"import java.util.Scanner;\npublic class Main\n{\n\tpublic static void main(String[] args) {\n\t Scanner sc = new Scanner(System.in);\n\t\tString[][][] books = new String[2][2][3];\t\t//Declaring a 3D array called books\n\t\tfor (int shelf = 0; shelf < 2; shelf++){\n \t\tfor (int row = 0; row < 2; row++){\n \t\t for (int col = 0; col < 3; col++){\n \t\t System.out.print(\"Book at shelf\" + (shelf+1) + \" row\" + (row+1) + \" col\" + (col+1) + \": \");\n \t\t books[shelf][row][col] = sc.nextLine();\t//Getting input from the user\n \t\t }\n \t\t}\n\t\t}\n\t\tSystem.out.println();\n\t\tSystem.out.println(\"List of Books\");\n System.out.println();\n\t\tfor (int shelf = 0; shelf < 2; shelf++){\n\t\t System.out.println(\"Shelf \" + (shelf+1));\n System.out.println();\n \t\tfor (int row = 0; row < 2; row++){\n \t\t for (int col = 0; col < 3; col++){\n \t\t System.out.print(books[shelf][row][col] + \"\\t\");\t//Display the collection of books\n \t\t }\n \t\t System.out.println();\n System.out.println();\n \t\t}\n\t\t}\n\t}\n}",
"Book at shelf1 row1 col1: 100FunFacts\nBook at shelf1 row1 col2: MathPuzzles\nBook at shelf1 row1 col3: WordPuzzles\nBook at shelf1 row2 col1: AkbarAndBirbal\nBook at shelf1 row2 col2: TenaliRaman\nBook at shelf1 row2 col3: Panchatantra\nBook at shelf2 row1 col1: AliceInWonderLand\nBook at shelf2 row1 col2: TheThreeMusketeers\nBook at shelf2 row1 col3: TheThreeLittlePigs\nBook at shelf2 row2 col1: DataCommunication\nBook at shelf2 row2 col2: DatabaseSystems\nBook at shelf2 row2 col3: JavaTheCompleteReference\n\nList of Books\n\nShelf 1\n\n100FunFacts\tMathPuzzles\tWordPuzzles\t\n\nAkbarAndBirbal\tTenaliRaman\tPanchatantra\t\n\nShelf 2\n\nAliceInWonderLand\tTheThreeMusketeers\tTheThreeLittlePigs\t\n\nDataCommunication\tDatabaseSystems\tJavaTheCompleteReference\t\n\n"
]
],
[
[
"**REFERENCES**\n\n[geeksforgeeks](https://www.geeksforgeeks.org/multidimensional-arrays-in-java/)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4abfe4cf38a3e8be9fd1a763c212fac87c115192
| 14,272 |
ipynb
|
Jupyter Notebook
|
notebooks/w4_generative_spaces_simplified.ipynb
|
shervinazadi/spatial_computing_workshops
|
660a4cb4b0f2ed34f539708c6b87566a3e2dcf5e
|
[
"MIT"
] | 3 |
2021-01-26T21:37:51.000Z
|
2021-09-18T22:24:14.000Z
|
notebooks/w4_generative_spaces_simplified.ipynb
|
shervinazadi/spatial_computing_workshops
|
660a4cb4b0f2ed34f539708c6b87566a3e2dcf5e
|
[
"MIT"
] | null | null | null |
notebooks/w4_generative_spaces_simplified.ipynb
|
shervinazadi/spatial_computing_workshops
|
660a4cb4b0f2ed34f539708c6b87566a3e2dcf5e
|
[
"MIT"
] | 8 |
2021-01-30T11:42:11.000Z
|
2022-03-03T11:33:14.000Z
| 31.298246 | 292 | 0.560538 |
[
[
[
"# Generative Spaces (ABM)\n\nIn this workshop we will lwarn how to construct a ABM (Agent Based Model) with spatial behaviours, that is capable of configuring the space. This file is a simplified version of Generative Spatial Agent Based Models. For further information, you can find more advanced versions here:\n\n* [Object Oriented version](https://github.com/shervinazadi/spatial_computing_workshops/blob/master/notebooks/w3_generative_spaces.ipynb)\n* [Vectorized version](https://topogenesis.readthedocs.io/notebooks/random_walker)",
"_____no_output_____"
],
[
"## 0. Initialization\n\n### 0.1. Load required libraries",
"_____no_output_____"
]
],
[
[
"# !pip install pyvista==0.28.1 ipyvtklink",
"_____no_output_____"
],
[
"import os\nimport topogenesis as tg\nimport pyvista as pv\nimport trimesh as tm\nimport pandas as pd\nimport numpy as np\nnp.random.seed(0)",
"_____no_output_____"
]
],
[
[
"### 0.2. Define the Neighborhood (Stencil)",
"_____no_output_____"
]
],
[
[
"# creating neighborhood definition\nstencil = tg.create_stencil(\"von_neumann\", 1, 1)\n# setting the center to zero\nstencil.set_index([0,0,0], 0)\nprint(stencil)",
"_____no_output_____"
]
],
[
[
"### 0.3 Visualize the Stencil",
"_____no_output_____"
]
],
[
[
"# initiating the plotter\np = pv.Plotter(notebook=True)\n\n# Create the spatial reference\ngrid = pv.UniformGrid()\n\n# Set the grid dimensions: shape because we want to inject our values\ngrid.dimensions = np.array(stencil.shape) + 1\n# The bottom left corner of the data set\ngrid.origin = [0,0,0]\n# These are the cell sizes along each axis\ngrid.spacing = [1,1,1]\n\n# Add the data values to the cell data\ngrid.cell_arrays[\"values\"] = stencil.flatten(order=\"F\") # Flatten the stencil\nthreshed = grid.threshold([0.9, 1.1])\n\n# adding the voxels: light red\np.add_mesh(threshed, show_edges=True, color=\"#ff8fa3\", opacity=0.3)\n\n# plotting\n# p.show(use_ipyvtk=True)",
"_____no_output_____"
]
],
[
[
"## 1. Setup the Environment\n\n### 1.1. Load the envelope lattice as the avialbility lattice",
"_____no_output_____"
]
],
[
[
"# loading the lattice from csv\nlattice_path = os.path.relpath('../data/voxelized_envelope.csv')\navail_lattice = tg.lattice_from_csv(lattice_path)\ninit_avail_lattice = tg.to_lattice(np.copy(avail_lattice), avail_lattice)",
"_____no_output_____"
]
],
[
[
"### 1.2 Load Program",
"_____no_output_____"
]
],
[
[
"program_complete = pd.read_csv(\"../data/program_small.csv\")\nprogram_complete",
"_____no_output_____"
],
[
"program_prefs = program_complete.drop([\"space_name\",\"space_id\"], 1)\nprogram_prefs",
"_____no_output_____"
]
],
[
[
"### 1.2 Load the value fields",
"_____no_output_____"
]
],
[
[
"# loading the lattice from csv\nfields = {}\nfor f in program_prefs.columns:\n lattice_path = os.path.relpath('../data/' + f + '.csv')\n fields[f] = tg.lattice_from_csv(lattice_path)",
"_____no_output_____"
]
],
[
[
"### 1.3. Initialize the Agents",
"_____no_output_____"
]
],
[
[
"# initialize the occupation lattice\nocc_lattice = avail_lattice * 0 - 1\n\n# Finding the index of the available voxels in avail_lattice\navail_flat = avail_lattice.flatten()\navail_index = np.array(np.where(avail_lattice == 1)).T\n\n# Randomly choosing three available voxels\nagn_num = len(program_complete)\nselect_id = np.random.choice(len(avail_index), agn_num)\nagn_origins = avail_index[select_id]\n\n# adding the origins to the agents locations\nagn_locs = []\n# for each agent origin ... \nfor a_id, a_origin in enumerate(agn_origins):\n\n # add the origin to the list of agent locations\n agn_locs.append([a_origin])\n\n # set the origin in availablity lattice as 0 (UNavailable)\n avail_lattice[tuple(a_origin)] = 0\n\n # set the origin in occupation lattice as the agent id (a_id)\n occ_lattice[tuple(a_origin)] = a_id",
"_____no_output_____"
]
],
[
[
"### 1.4. Visualize the environment",
"_____no_output_____"
]
],
[
[
"p = pv.Plotter(notebook=True)\n\n# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data\ngrid = pv.UniformGrid()\ngrid.dimensions = np.array(occ_lattice.shape) + 1\n# The bottom left corner of the data set\ngrid.origin = occ_lattice.minbound - occ_lattice.unit * 0.5\n# These are the cell sizes along each axis\ngrid.spacing = occ_lattice.unit \n\n# adding the boundingbox wireframe\np.add_mesh(grid.outline(), color=\"grey\", label=\"Domain\")\n\n# adding axes\np.add_axes()\np.show_bounds(grid=\"back\", location=\"back\", color=\"#777777\")\n\n# Add the data values to the cell data\ngrid.cell_arrays[\"Agents\"] = occ_lattice.flatten(order=\"F\").astype(int) # Flatten the array!\n# filtering the voxels\nthreshed = grid.threshold([-0.1, agn_num - 0.9])\n# adding the voxels\np.add_mesh(threshed, show_edges=True, opacity=1.0, show_scalar_bar=False)\n\n# adding the availability lattice\ninit_avail_lattice.fast_vis(p)\n\n# p.show(use_ipyvtk=True)",
"_____no_output_____"
]
],
[
[
"## 2. ABM Simulation (Agent Based Space Occupation)\n\n### 2.1. Running the simulation",
"_____no_output_____"
]
],
[
[
"# make a deep copy of occupation lattice\ncur_occ_lattice = tg.to_lattice(np.copy(occ_lattice), occ_lattice)\n# initialzing the list of frames\nframes = [cur_occ_lattice]\n\n# setting the time variable to 0\nt = 0\nn_frames = 30\n# main feedback loop of the simulation (for each time step ...)\nwhile t<n_frames:\n # for each agent ... \n for a_id, a_prefs in program_complete.iterrows():\n # retrieve the list of the locations of the current agent\n a_locs = agn_locs[a_id]\n # initialize the list of free neighbours\n free_neighs = []\n # for each location of the agent\n for loc in a_locs:\n # retrieve the list of neighbours of the agent based on the stencil\n neighs = avail_lattice.find_neighbours_masked(stencil, loc = loc)\n \n # for each neighbour ... \n for n in neighs:\n # compute 3D index of neighbour\n neigh_3d_id = np.unravel_index(n, avail_lattice.shape)\n # if the neighbour is available... \n if avail_lattice[neigh_3d_id]:\n # add the neighbour to the list of free neighbours\n free_neighs.append(neigh_3d_id)\n # check if found any free neighbour\n if len(free_neighs)>0:\n # convert free neighbours to a numpy array\n fns = np.array(free_neighs)\n\n # find the value of neighbours\n # init the agent value array\n a_eval = np.ones(len(fns))\n # for each field...\n for f in program_prefs.columns:\n # find the raw value of free neighbours...\n vals = fields[f][fns[:,0], fns[:,1], fns[:,2]]\n # raise the the raw value to the power of preference weight of the agent\n a_weighted_vals = vals ** a_prefs[f]\n # multiply them to the previous weighted values\n a_eval *= a_weighted_vals\n\n # select the neighbour with highest evaluation\n selected_int = np.argmax(a_eval)\n # find 3D integer index of selected neighbour\n selected_neigh_3d_id = free_neighs[selected_int]\n # find the location of the newly selected neighbour\n selected_neigh_loc = np.array(selected_neigh_3d_id).flatten()\n # add the newly selected neighbour location to agent locations\n agn_locs[a_id].append(selected_neigh_loc)\n # set the newly selected neighbour as UNavailable (0) in the availability lattice\n avail_lattice[selected_neigh_3d_id] = 0\n # set the newly selected neighbour as OCCUPIED by current agent \n # (-1 means not-occupied so a_id)\n occ_lattice[selected_neigh_3d_id] = a_id\n\n # constructing the new lattice\n new_occ_lattice = tg.to_lattice(np.copy(occ_lattice), occ_lattice)\n # adding the new lattice to the list of frames\n frames.append(new_occ_lattice)\n # adding one to the time counter\n t += 1",
"_____no_output_____"
]
],
[
[
"### 2.2. Visualizing the simulation",
"_____no_output_____"
]
],
[
[
"p = pv.Plotter(notebook=True)\n\nbase_lattice = frames[0]\n\n# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data\ngrid = pv.UniformGrid()\ngrid.dimensions = np.array(base_lattice.shape) + 1\n# The bottom left corner of the data set\ngrid.origin = base_lattice.minbound - base_lattice.unit * 0.5\n# These are the cell sizes along each axis\ngrid.spacing = base_lattice.unit \n\n# adding the boundingbox wireframe\np.add_mesh(grid.outline(), color=\"grey\", label=\"Domain\")\n\n# adding the availability lattice\ninit_avail_lattice.fast_vis(p)\n\n# adding axes\np.add_axes()\np.show_bounds(grid=\"back\", location=\"back\", color=\"#aaaaaa\")\n\ndef create_mesh(value):\n f = int(value)\n lattice = frames[f]\n\n # Add the data values to the cell data\n grid.cell_arrays[\"Agents\"] = lattice.flatten(order=\"F\").astype(int) # Flatten the array!\n # filtering the voxels\n threshed = grid.threshold([-0.1, agn_num - 0.9])\n # adding the voxels\n p.add_mesh(threshed, name='sphere', show_edges=True, opacity=1.0, show_scalar_bar=False)\n\n return\n\np.add_slider_widget(create_mesh, [0, n_frames], title='Time', value=0, event_type=\"always\", style=\"classic\")\np.show(use_ipyvtk=True)",
"_____no_output_____"
]
],
[
[
"### 2.3. Saving lattice frames in CSV",
"_____no_output_____"
]
],
[
[
"for i, lattice in enumerate(frames):\n csv_path = os.path.relpath('../data/abm_animation/abm_f_'+ f'{i:03}' + '.csv')\n lattice.to_csv(csv_path)",
"_____no_output_____"
]
],
[
[
"### Credits",
"_____no_output_____"
]
],
[
[
"__author__ = \"Shervin Azadi \"\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n__url__ = \"https://github.com/shervinazadi/spatial_computing_workshops\"\n__summary__ = \"Spatial Computing Design Studio Workshop on Agent Based Models for Generative Spaces\"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4abfed271cae558eabfb7b140f32889dccc3071e
| 203,111 |
ipynb
|
Jupyter Notebook
|
14_complexity_comparison_cifar_mosaic/3_focus_6_6_classify_6_18/run1/focus_6_6_classify_6_18.ipynb
|
lnpandey/DL_explore_synth_data
|
0a5d8b417091897f4c7f358377d5198a155f3f24
|
[
"MIT"
] | 2 |
2019-08-24T07:20:35.000Z
|
2020-03-27T08:16:59.000Z
|
14_complexity_comparison_cifar_mosaic/3_focus_6_6_classify_6_18/run1/focus_6_6_classify_6_18.ipynb
|
lnpandey/DL_explore_synth_data
|
0a5d8b417091897f4c7f358377d5198a155f3f24
|
[
"MIT"
] | null | null | null |
14_complexity_comparison_cifar_mosaic/3_focus_6_6_classify_6_18/run1/focus_6_6_classify_6_18.ipynb
|
lnpandey/DL_explore_synth_data
|
0a5d8b417091897f4c7f358377d5198a155f3f24
|
[
"MIT"
] | 3 |
2019-06-21T09:34:32.000Z
|
2019-09-19T10:43:07.000Z
| 103.522426 | 35,510 | 0.760308 |
[
[
[
"# from google.colab import drive\n# drive.mount('/content/drive')",
"_____no_output_____"
],
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\nfrom matplotlib import pyplot as plt\n\nimport copy\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n",
"_____no_output_____"
],
[
"transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
],
[
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)\n\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nforeground_classes = {'plane', 'car', 'bird'}\n\nbackground_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}\n\nfg1,fg2,fg3 = 0,1,2",
"_____no_output_____"
],
[
"dataiter = iter(trainloader)\nbackground_data=[]\nbackground_label=[]\nforeground_data=[]\nforeground_label=[]\nbatch_size=10\n\nfor i in range(5000):\n images, labels = dataiter.next()\n for j in range(batch_size):\n if(classes[labels[j]] in background_classes):\n img = images[j].tolist()\n background_data.append(img)\n background_label.append(labels[j])\n else:\n img = images[j].tolist()\n foreground_data.append(img)\n foreground_label.append(labels[j])\n \nforeground_data = torch.tensor(foreground_data)\nforeground_label = torch.tensor(foreground_label)\nbackground_data = torch.tensor(background_data)\nbackground_label = torch.tensor(background_label)",
"_____no_output_____"
],
[
"def create_mosaic_img(bg_idx,fg_idx,fg): \n \"\"\"\n bg_idx : list of indexes of background_data[] to be used as background images in mosaic\n fg_idx : index of image to be used as foreground image from foreground data\n fg : at what position/index foreground image has to be stored out of 0-8\n \"\"\"\n image_list=[]\n j=0\n for i in range(9):\n if i != fg:\n image_list.append(background_data[bg_idx[j]].type(\"torch.DoubleTensor\"))\n j+=1\n else: \n image_list.append(foreground_data[fg_idx].type(\"torch.DoubleTensor\"))\n label = foreground_label[fg_idx]- fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2\n #image_list = np.concatenate(image_list ,axis=0)\n image_list = torch.stack(image_list) \n return image_list,label",
"_____no_output_____"
],
[
"desired_num = 30000\nmosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images\nfore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 \nmosaic_label=[] # label of mosaic image = foreground class present in that mosaic\nfor i in range(desired_num):\n bg_idx = np.random.randint(0,35000,8)\n fg_idx = np.random.randint(0,15000)\n fg = np.random.randint(0,9)\n fore_idx.append(fg)\n image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)\n mosaic_list_of_images.append(image_list)\n mosaic_label.append(label)",
"_____no_output_____"
],
[
"class MosaicDataset(Dataset):\n \"\"\"MosaicDataset dataset.\"\"\"\n\n def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.mosaic = mosaic_list_of_images\n self.label = mosaic_label\n self.fore_idx = fore_idx\n\n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, idx):\n return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]\n\nbatch = 250\nmsd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)\ntrain_loader = DataLoader( msd,batch_size= batch ,shuffle=True)",
"_____no_output_____"
],
[
"class Focus(nn.Module):\n def __init__(self):\n super(Focus, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, padding=0)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=6, kernel_size=3, padding=0)\n # self.conv3 = nn.Conv2d(in_channels=12, out_channels=32, kernel_size=3, padding=0)\n self.fc1 = nn.Linear(1014, 512)\n self.fc2 = nn.Linear(512, 64)\n # self.fc3 = nn.Linear(512, 64)\n # self.fc4 = nn.Linear(64, 10)\n self.fc3 = nn.Linear(64,1)\n\n def forward(self,z): #y is avg image #z batch of list of 9 images\n y = torch.zeros([batch,3, 32,32], dtype=torch.float64)\n x = torch.zeros([batch,9],dtype=torch.float64)\n y = y.to(\"cuda\")\n x = x.to(\"cuda\")\n \n for i in range(9):\n x[:,i] = self.helper(z[:,i])[:,0]\n\n x = F.softmax(x,dim=1)\n\n x1 = x[:,0]\n torch.mul(x1[:,None,None,None],z[:,0])\n\n for i in range(9): \n x1 = x[:,i] \n y = y + torch.mul(x1[:,None,None,None],z[:,i])\n\n return x, y\n \n def helper(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = (F.relu(self.conv2(x)))\n # print(x.shape)\n # x = (F.relu(self.conv3(x)))\n x = x.view(x.size(0), -1)\n # print(x.shape)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n # x = F.relu(self.fc3(x))\n # x = F.relu(self.fc4(x))\n x = self.fc3(x)\n return x",
"_____no_output_____"
],
[
"focus_net = Focus().double()\nfocus_net = focus_net.to(\"cuda\")",
"_____no_output_____"
],
[
"class Classification(nn.Module):\n def __init__(self):\n super(Classification, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, padding=0)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=18, kernel_size=3, padding=0)\n # self.conv3 = nn.Conv2d(in_channels=12, out_channels=20, kernel_size=3, padding=0)\n self.fc1 = nn.Linear(3042, 1024)\n self.fc2 = nn.Linear(1024, 64)\n # self.fc3 = nn.Linear(512, 64)\n # self.fc4 = nn.Linear(64, 10)\n self.fc3 = nn.Linear(64,3)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = (F.relu(self.conv2(x)))\n # print(x.shape)\n # x = (F.relu(self.conv3(x)))\n x = x.view(x.size(0), -1)\n # print(x.shape)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n # x = F.relu(self.fc3(x))\n # x = F.relu(self.fc4(x))\n x = self.fc3(x)\n return x",
"_____no_output_____"
],
[
"classify = Classification().double()\nclassify = classify.to(\"cuda\")",
"_____no_output_____"
],
[
"test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images\nfore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image \ntest_label=[] # label of mosaic image = foreground class present in that mosaic\nfor i in range(10000):\n bg_idx = np.random.randint(0,35000,8)\n fg_idx = np.random.randint(0,15000)\n fg = np.random.randint(0,9)\n fore_idx_test.append(fg)\n image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)\n test_images.append(image_list)\n test_label.append(label)",
"_____no_output_____"
],
[
"test_data = MosaicDataset(test_images,test_label,fore_idx_test)\ntest_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)",
"_____no_output_____"
],
[
"import torch.optim as optim\ncriterion = nn.CrossEntropyLoss()\noptimizer_classify = optim.Adam(classify.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)\noptimizer_focus = optim.Adam(focus_net.parameters(), lr=0.001)#, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)",
"_____no_output_____"
],
[
"col1=[]\ncol2=[]\ncol3=[]\ncol4=[]\ncol5=[]\ncol6=[]\ncol7=[]\ncol8=[]\ncol9=[]\ncol10=[]\ncol11=[]\ncol12=[]\ncol13=[]",
"_____no_output_____"
],
[
"correct = 0\ntotal = 0\ncount = 0\nflag = 1\nfocus_true_pred_true =0\nfocus_false_pred_true =0\nfocus_true_pred_false =0\nfocus_false_pred_false =0\n\nargmax_more_than_half = 0\nargmax_less_than_half =0\n\nwith torch.no_grad():\n for data in train_loader:\n inputs, labels , fore_idx = data\n inputs, labels , fore_idx = inputs.to(\"cuda\"),labels.to(\"cuda\"), fore_idx.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n for j in range(labels.size(0)):\n count += 1\n focus = torch.argmax(alphas[j])\n if alphas[j][focus] >= 0.5 :\n argmax_more_than_half += 1\n else:\n argmax_less_than_half += 1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true += 1\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false += 1\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false += 1\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)\n\nprint(\"focus_true_pred_true %d =============> FTPT : %d %%\" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )\nprint(\"focus_false_pred_true %d =============> FFPT : %d %%\" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )\nprint(\"focus_true_pred_false %d =============> FTPF : %d %%\" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )\nprint(\"focus_false_pred_false %d =============> FFPF : %d %%\" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )\n\nprint(\"argmax_more_than_half ==================> \",argmax_more_than_half)\nprint(\"argmax_less_than_half ==================> \",argmax_less_than_half)\nprint(count)\n\nprint(\"=\"*100)\n\ncol1.append(0)\ncol2.append(argmax_more_than_half)\ncol3.append(argmax_less_than_half)\ncol4.append(focus_true_pred_true)\ncol5.append(focus_false_pred_true)\ncol6.append(focus_true_pred_false)\ncol7.append(focus_false_pred_false)",
"Accuracy of the network on the 30000 train images: 33 %\ntotal correct 9910\ntotal train set images 30000\nfocus_true_pred_true 1022 =============> FTPT : 3 %\nfocus_false_pred_true 8888 =============> FFPT : 29 %\nfocus_true_pred_false 2570 =============> FTPF : 8 %\nfocus_false_pred_false 17520 =============> FFPF : 58 %\nargmax_more_than_half ==================> 0\nargmax_less_than_half ==================> 30000\n30000\n====================================================================================================\n"
],
[
"correct = 0\ntotal = 0\ncount = 0\nflag = 1\nfocus_true_pred_true =0\nfocus_false_pred_true =0\nfocus_true_pred_false =0\nfocus_false_pred_false =0\n\nargmax_more_than_half = 0\nargmax_less_than_half =0\n\nwith torch.no_grad():\n for data in test_loader:\n inputs, labels , fore_idx = data\n inputs, labels , fore_idx = inputs.to(\"cuda\"),labels.to(\"cuda\"), fore_idx.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n for j in range(labels.size(0)):\n focus = torch.argmax(alphas[j])\n if alphas[j][focus] >= 0.5 :\n argmax_more_than_half += 1\n else:\n argmax_less_than_half += 1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true += 1\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false += 1\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false += 1\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)\n\nprint(\"focus_true_pred_true %d =============> FTPT : %d %%\" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )\nprint(\"focus_false_pred_true %d =============> FFPT : %d %%\" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )\nprint(\"focus_true_pred_false %d =============> FTPF : %d %%\" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )\nprint(\"focus_false_pred_false %d =============> FFPF : %d %%\" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )\n\nprint(\"argmax_more_than_half ==================> \",argmax_more_than_half)\nprint(\"argmax_less_than_half ==================> \",argmax_less_than_half)\ncol8.append(argmax_more_than_half)\ncol9.append(argmax_less_than_half)\ncol10.append(focus_true_pred_true)\ncol11.append(focus_false_pred_true)\ncol12.append(focus_true_pred_false)\ncol13.append(focus_false_pred_false)",
"Accuracy of the network on the 10000 test images: 33 %\ntotal correct 3356\ntotal train set images 10000\nfocus_true_pred_true 357 =============> FTPT : 3 %\nfocus_false_pred_true 2999 =============> FFPT : 29 %\nfocus_true_pred_false 853 =============> FTPF : 8 %\nfocus_false_pred_false 5791 =============> FFPF : 57 %\nargmax_more_than_half ==================> 0\nargmax_less_than_half ==================> 10000\n"
],
[
"nos_epochs = 200\nfocus_true_pred_true =0\nfocus_false_pred_true =0\nfocus_true_pred_false =0\nfocus_false_pred_false =0\n\nargmax_more_than_half = 0\nargmax_less_than_half =0\n\nfor epoch in range(nos_epochs): # loop over the dataset multiple times\n\n focus_true_pred_true =0\n focus_false_pred_true =0\n focus_true_pred_false =0\n focus_false_pred_false =0\n \n argmax_more_than_half = 0\n argmax_less_than_half =0\n \n running_loss = 0.0\n epoch_loss = []\n cnt=0\n\n iteration = desired_num // batch\n \n #training data set\n \n for i, data in enumerate(train_loader):\n inputs , labels , fore_idx = data\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n # zero the parameter gradients\n \n optimizer_focus.zero_grad()\n optimizer_classify.zero_grad()\n \n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n# print(outputs)\n# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))\n\n loss = criterion(outputs, labels) \n loss.backward()\n optimizer_focus.step()\n optimizer_classify.step()\n\n running_loss += loss.item()\n mini = 60\n if cnt % mini == mini-1: # print every 40 mini-batches\n print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))\n epoch_loss.append(running_loss/mini)\n running_loss = 0.0\n cnt=cnt+1\n \n if epoch % 5 == 0:\n for j in range (batch):\n focus = torch.argmax(alphas[j])\n\n if(alphas[j][focus] >= 0.5):\n argmax_more_than_half +=1\n else:\n argmax_less_than_half +=1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true +=1\n\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false +=1\n\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false +=1\n\n if(np.mean(epoch_loss) <= 0.005):\n break;\n\n if epoch % 5 == 0:\n # focus_net.eval()\n # classify.eval()\n col1.append(epoch+1)\n col2.append(argmax_more_than_half)\n col3.append(argmax_less_than_half)\n col4.append(focus_true_pred_true)\n col5.append(focus_false_pred_true)\n col6.append(focus_true_pred_false)\n col7.append(focus_false_pred_false)\n \n #************************************************************************\n #testing data set \n with torch.no_grad():\n focus_true_pred_true =0\n focus_false_pred_true =0\n focus_true_pred_false =0\n focus_false_pred_false =0\n\n argmax_more_than_half = 0\n argmax_less_than_half =0\n for data in test_loader:\n inputs, labels , fore_idx = data\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n _, predicted = torch.max(outputs.data, 1)\n\n for j in range (batch):\n focus = torch.argmax(alphas[j])\n\n if(alphas[j][focus] >= 0.5):\n argmax_more_than_half +=1\n else:\n argmax_less_than_half +=1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true +=1\n\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false +=1\n\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false +=1\n \n col8.append(argmax_more_than_half)\n col9.append(argmax_less_than_half)\n col10.append(focus_true_pred_true)\n col11.append(focus_false_pred_true)\n col12.append(focus_true_pred_false)\n col13.append(focus_false_pred_false)\n \n \nprint('Finished Training')",
"[1, 60] loss: 1.067\n[1, 120] loss: 1.028\n[2, 60] loss: 0.971\n[2, 120] loss: 0.941\n[3, 60] loss: 0.883\n[3, 120] loss: 0.875\n[4, 60] loss: 0.832\n[4, 120] loss: 0.822\n[5, 60] loss: 0.783\n[5, 120] loss: 0.767\n[6, 60] loss: 0.723\n[6, 120] loss: 0.717\n[7, 60] loss: 0.664\n[7, 120] loss: 0.652\n[8, 60] loss: 0.602\n[8, 120] loss: 0.599\n[9, 60] loss: 0.546\n[9, 120] loss: 0.544\n[10, 60] loss: 0.467\n[10, 120] loss: 0.487\n[11, 60] loss: 0.414\n[11, 120] loss: 0.434\n[12, 60] loss: 0.356\n[12, 120] loss: 0.367\n[13, 60] loss: 0.292\n[13, 120] loss: 0.301\n[14, 60] loss: 0.233\n[14, 120] loss: 0.263\n[15, 60] loss: 0.181\n[15, 120] loss: 0.196\n[16, 60] loss: 0.139\n[16, 120] loss: 0.140\n[17, 60] loss: 0.102\n[17, 120] loss: 0.115\n[18, 60] loss: 0.072\n[18, 120] loss: 0.086\n[19, 60] loss: 0.073\n[19, 120] loss: 0.064\n[20, 60] loss: 0.038\n[20, 120] loss: 0.049\n[21, 60] loss: 0.043\n[21, 120] loss: 0.054\n[22, 60] loss: 0.049\n[22, 120] loss: 0.054\n[23, 60] loss: 0.030\n[23, 120] loss: 0.038\n[24, 60] loss: 0.031\n[24, 120] loss: 0.031\n[25, 60] loss: 0.048\n[25, 120] loss: 0.036\n[26, 60] loss: 0.021\n[26, 120] loss: 0.026\n[27, 60] loss: 0.024\n[27, 120] loss: 0.035\n[28, 60] loss: 0.035\n[28, 120] loss: 0.032\n[29, 60] loss: 0.016\n[29, 120] loss: 0.025\n[30, 60] loss: 0.031\n[30, 120] loss: 0.029\n[31, 60] loss: 0.017\n[31, 120] loss: 0.020\n[32, 60] loss: 0.030\n[32, 120] loss: 0.032\n[33, 60] loss: 0.031\n[33, 120] loss: 0.018\n[34, 60] loss: 0.015\n[34, 120] loss: 0.023\n[35, 60] loss: 0.030\n[35, 120] loss: 0.020\n[36, 60] loss: 0.018\n[36, 120] loss: 0.017\n[37, 60] loss: 0.012\n[37, 120] loss: 0.008\n[38, 60] loss: 0.014\n[38, 120] loss: 0.044\n[39, 60] loss: 0.019\n[39, 120] loss: 0.019\n[40, 60] loss: 0.019\n[40, 120] loss: 0.015\n[41, 60] loss: 0.014\n[41, 120] loss: 0.021\n[42, 60] loss: 0.014\n[42, 120] loss: 0.011\n[43, 60] loss: 0.010\n[43, 120] loss: 0.013\n[44, 60] loss: 0.011\n[44, 120] loss: 0.029\n[45, 60] loss: 0.029\n[45, 120] loss: 0.035\n[46, 60] loss: 0.015\n[46, 120] loss: 0.026\n[47, 60] loss: 0.020\n[47, 120] loss: 0.009\n[48, 60] loss: 0.009\n[48, 120] loss: 0.025\n[49, 60] loss: 0.023\n[49, 120] loss: 0.013\n[50, 60] loss: 0.022\n[50, 120] loss: 0.015\n[51, 60] loss: 0.016\n[51, 120] loss: 0.032\n[52, 60] loss: 0.020\n[52, 120] loss: 0.014\n[53, 60] loss: 0.016\n[53, 120] loss: 0.010\n[54, 60] loss: 0.007\n[54, 120] loss: 0.009\n[55, 60] loss: 0.008\n[55, 120] loss: 0.010\n[56, 60] loss: 0.013\n[56, 120] loss: 0.023\n[57, 60] loss: 0.014\n[57, 120] loss: 0.014\n[58, 60] loss: 0.030\n[58, 120] loss: 0.012\n[59, 60] loss: 0.011\n[59, 120] loss: 0.019\n[60, 60] loss: 0.018\n[60, 120] loss: 0.023\n[61, 60] loss: 0.014\n[61, 120] loss: 0.022\n[62, 60] loss: 0.005\n[62, 120] loss: 0.006\n[63, 60] loss: 0.009\n[63, 120] loss: 0.010\n[64, 60] loss: 0.018\n[64, 120] loss: 0.017\n[65, 60] loss: 0.007\n[65, 120] loss: 0.011\n[66, 60] loss: 0.015\n[66, 120] loss: 0.016\n[67, 60] loss: 0.008\n[67, 120] loss: 0.005\n[68, 60] loss: 0.007\n[68, 120] loss: 0.019\n[69, 60] loss: 0.021\n[69, 120] loss: 0.019\n[70, 60] loss: 0.018\n[70, 120] loss: 0.016\n[71, 60] loss: 0.015\n[71, 120] loss: 0.006\n[72, 60] loss: 0.006\n[72, 120] loss: 0.006\n[73, 60] loss: 0.003\n[73, 120] loss: 0.003\nFinished Training\n"
],
[
"# torch.save(focus_net.state_dict(),\"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/\"+name+\"_focus_net.pt\")",
"_____no_output_____"
],
[
"# torch.save(classify.state_dict(),\"/content/drive/My Drive/Research/Cheating_data/16_experiments_on_cnn_3layers/\"+name+\"_classify.pt\")",
"_____no_output_____"
],
[
"columns = [\"epochs\", \"argmax > 0.5\" ,\"argmax < 0.5\", \"focus_true_pred_true\", \"focus_false_pred_true\", \"focus_true_pred_false\", \"focus_false_pred_false\" ]",
"_____no_output_____"
],
[
"df_train = pd.DataFrame()\ndf_test = pd.DataFrame()",
"_____no_output_____"
],
[
"df_train[columns[0]] = col1\ndf_train[columns[1]] = col2\ndf_train[columns[2]] = col3\ndf_train[columns[3]] = col4\ndf_train[columns[4]] = col5\ndf_train[columns[5]] = col6\ndf_train[columns[6]] = col7\n\ndf_test[columns[0]] = col1\ndf_test[columns[1]] = col8\ndf_test[columns[2]] = col9\ndf_test[columns[3]] = col10\ndf_test[columns[4]] = col11\ndf_test[columns[5]] = col12\ndf_test[columns[6]] = col13",
"_____no_output_____"
],
[
"df_train",
"_____no_output_____"
],
[
"# plt.figure(12,12)\nplt.plot(col1,col2, label='argmax > 0.5')\nplt.plot(col1,col3, label='argmax < 0.5')\n\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.xlabel(\"epochs\")\nplt.ylabel(\"training data\")\nplt.title(\"On Training set\")\nplt.show()\n\nplt.plot(col1,col4, label =\"focus_true_pred_true \")\nplt.plot(col1,col5, label =\"focus_false_pred_true \")\nplt.plot(col1,col6, label =\"focus_true_pred_false \")\nplt.plot(col1,col7, label =\"focus_false_pred_false \")\nplt.title(\"On Training set\")\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.xlabel(\"epochs\")\nplt.ylabel(\"training data\")\nplt.savefig(\"train_ftpt.pdf\", bbox_inches='tight')\nplt.show()",
"_____no_output_____"
],
[
"df_test",
"_____no_output_____"
],
[
"# plt.figure(12,12)\nplt.plot(col1,col8, label='argmax > 0.5')\nplt.plot(col1,col9, label='argmax < 0.5')\n\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.xlabel(\"epochs\")\nplt.ylabel(\"Testing data\")\nplt.title(\"On Testing set\")\nplt.show()\n\nplt.plot(col1,col10, label =\"focus_true_pred_true \")\nplt.plot(col1,col11, label =\"focus_false_pred_true \")\nplt.plot(col1,col12, label =\"focus_true_pred_false \")\nplt.plot(col1,col13, label =\"focus_false_pred_false \")\nplt.title(\"On Testing set\")\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.xlabel(\"epochs\")\nplt.ylabel(\"Testing data\")\nplt.savefig(\"test_ftpt.pdf\", bbox_inches='tight')\nplt.show()",
"_____no_output_____"
],
[
"correct = 0\ntotal = 0\ncount = 0\nflag = 1\nfocus_true_pred_true =0\nfocus_false_pred_true =0\nfocus_true_pred_false =0\nfocus_false_pred_false =0\n\nargmax_more_than_half = 0\nargmax_less_than_half =0\n\nwith torch.no_grad():\n for data in train_loader:\n inputs, labels , fore_idx = data\n inputs, labels , fore_idx = inputs.to(\"cuda\"),labels.to(\"cuda\"), fore_idx.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n for j in range(labels.size(0)):\n focus = torch.argmax(alphas[j])\n if alphas[j][focus] >= 0.5 :\n argmax_more_than_half += 1\n else:\n argmax_less_than_half += 1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true += 1\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false += 1\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false += 1\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 30000 train images: %d %%' % (\n 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)\n\nprint(\"focus_true_pred_true %d =============> FTPT : %d %%\" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )\nprint(\"focus_false_pred_true %d =============> FFPT : %d %%\" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )\nprint(\"focus_true_pred_false %d =============> FTPF : %d %%\" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )\nprint(\"focus_false_pred_false %d =============> FFPF : %d %%\" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )\n\nprint(\"argmax_more_than_half ==================> \",argmax_more_than_half)\nprint(\"argmax_less_than_half ==================> \",argmax_less_than_half)",
"Accuracy of the network on the 30000 train images: 99 %\ntotal correct 29989\ntotal train set images 30000\nfocus_true_pred_true 17852 =============> FTPT : 59 %\nfocus_false_pred_true 12137 =============> FFPT : 40 %\nfocus_true_pred_false 4 =============> FTPF : 0 %\nfocus_false_pred_false 7 =============> FFPF : 0 %\nargmax_more_than_half ==================> 10031\nargmax_less_than_half ==================> 19969\n"
],
[
"correct = 0\ntotal = 0\ncount = 0\nflag = 1\nfocus_true_pred_true =0\nfocus_false_pred_true =0\nfocus_true_pred_false =0\nfocus_false_pred_false =0\n\nargmax_more_than_half = 0\nargmax_less_than_half =0\n\nwith torch.no_grad():\n for data in test_loader:\n inputs, labels , fore_idx = data\n inputs, labels , fore_idx = inputs.to(\"cuda\"),labels.to(\"cuda\"), fore_idx.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n for j in range(labels.size(0)):\n focus = torch.argmax(alphas[j])\n if alphas[j][focus] >= 0.5 :\n argmax_more_than_half += 1\n else:\n argmax_less_than_half += 1\n\n if(focus == fore_idx[j] and predicted[j] == labels[j]):\n focus_true_pred_true += 1\n elif(focus != fore_idx[j] and predicted[j] == labels[j]):\n focus_false_pred_true += 1\n elif(focus == fore_idx[j] and predicted[j] != labels[j]):\n focus_true_pred_false += 1\n elif(focus != fore_idx[j] and predicted[j] != labels[j]):\n focus_false_pred_false += 1\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)\n\nprint(\"focus_true_pred_true %d =============> FTPT : %d %%\" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )\nprint(\"focus_false_pred_true %d =============> FFPT : %d %%\" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )\nprint(\"focus_true_pred_false %d =============> FTPF : %d %%\" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )\nprint(\"focus_false_pred_false %d =============> FFPF : %d %%\" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )\n\nprint(\"argmax_more_than_half ==================> \",argmax_more_than_half)\nprint(\"argmax_less_than_half ==================> \",argmax_less_than_half)",
"Accuracy of the network on the 10000 test images: 72 %\ntotal correct 7222\ntotal train set images 10000\nfocus_true_pred_true 4993 =============> FTPT : 49 %\nfocus_false_pred_true 2229 =============> FFPT : 22 %\nfocus_true_pred_false 785 =============> FTPF : 7 %\nfocus_false_pred_false 1993 =============> FFPF : 19 %\nargmax_more_than_half ==================> 3277\nargmax_less_than_half ==================> 6723\n"
],
[
"correct = 0\ntotal = 0\n\nwith torch.no_grad():\n for data in train_loader:\n inputs, labels , fore_idx = data\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)",
"Accuracy of the network on the 30000 train images: 99 %\ntotal correct 29989\ntotal train set images 30000\n"
],
[
"correct = 0\ntotal = 0\n\nwith torch.no_grad():\n for data in test_loader:\n inputs, labels , fore_idx = data\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n alphas, avg_images = focus_net(inputs)\n outputs = classify(avg_images)\n\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))\nprint(\"total correct\", correct)\nprint(\"total train set images\", total)",
"Accuracy of the network on the 10000 test images: 72 %\ntotal correct 7222\ntotal train set images 10000\n"
],
[
"max_alpha =[]\r\nalpha_ftpt=[]\r\nargmax_more_than_half=0\r\nargmax_less_than_half=0\r\nfor i, data in enumerate(test_loader):\r\n inputs, labels,_ = data\r\n inputs = inputs.double()\r\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\r\n alphas, avg = focus_net(inputs)\r\n outputs = classify(avg)\r\n mx,_ = torch.max(alphas,1)\r\n max_alpha.append(mx.cpu().detach().numpy())\r\n\r\n for j in range(labels.size(0)):\r\n focus = torch.argmax(alphas[j])\r\n if alphas[j][focus] >= 0.5 :\r\n argmax_more_than_half += 1\r\n else:\r\n argmax_less_than_half += 1\r\n\r\n if (focus == fore_idx[j] and predicted[j] == labels[j]):\r\n alpha_ftpt.append(alphas[j][focus].item())\r\n\r\nmax_alpha = np.concatenate(max_alpha,axis=0)\r\nprint(max_alpha.shape)",
"(10000,)\n"
],
[
"plt.figure(figsize=(6,6))\r\n_,bins,_ = plt.hist(max_alpha,bins=50,color =\"c\")\r\nplt.title(\"alpha values histogram\")\r\nplt.savefig(\"alpha_hist.pdf\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(6,6))\r\n_,bins,_ = plt.hist(np.array(alpha_ftpt),bins=50,color =\"c\")\r\nplt.title(\"alpha values in ftpt\")\r\nplt.savefig(\"alpha_hist_ftpt.pdf\")",
"_____no_output_____"
],
[
"\r\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abff8d5f313205540cef8e8e68648cee331bec2
| 19,224 |
ipynb
|
Jupyter Notebook
|
ann/Neural Networks.ipynb
|
juliocnsouzadev/deep-learning
|
f557b18c31bb69468e9f33f578be7513f57d9712
|
[
"MIT"
] | null | null | null |
ann/Neural Networks.ipynb
|
juliocnsouzadev/deep-learning
|
f557b18c31bb69468e9f33f578be7513f57d9712
|
[
"MIT"
] | null | null | null |
ann/Neural Networks.ipynb
|
juliocnsouzadev/deep-learning
|
f557b18c31bb69468e9f33f578be7513f57d9712
|
[
"MIT"
] | null | null | null | 38.294821 | 225 | 0.445329 |
[
[
[
"# Neural Network",
"_____no_output_____"
]
],
[
[
"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
],
[
"# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values",
"_____no_output_____"
],
[
"# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]",
"_____no_output_____"
],
[
"# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)",
"_____no_output_____"
],
[
"# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)",
"_____no_output_____"
],
[
"# Importing the Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense",
"Using TensorFlow backend.\n"
],
[
"# Initialising the ANN\nclassifier = Sequential()",
"_____no_output_____"
],
[
"# Adding the input layer and the first hidden layer\nclassifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation=\"relu\", input_dim=11, units=6, kernel_initializer=\"uniform\")`\n \n"
],
[
"# Adding the second hidden layer\nclassifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation=\"relu\", units=6, kernel_initializer=\"uniform\")`\n \n"
],
[
"# Adding the output layer\nclassifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:2: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation=\"sigmoid\", units=1, kernel_initializer=\"uniform\")`\n \n"
],
[
"# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])",
"_____no_output_____"
],
[
"# Fitting the ANN to the Training set\nclassifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\keras\\models.py:942: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.\n warnings.warn('The `nb_epoch` argument in `fit` '\n"
],
[
"# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)",
"_____no_output_____"
],
[
"# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)",
"_____no_output_____"
],
[
"cm",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4abff952a230a8f6ee7e20de686b310a06feac97
| 81,662 |
ipynb
|
Jupyter Notebook
|
HW1.ipynb
|
cohenjt/climate-dynamics-homework
|
87df12ed8c4de956255722c585503248e87683f2
|
[
"MIT"
] | null | null | null |
HW1.ipynb
|
cohenjt/climate-dynamics-homework
|
87df12ed8c4de956255722c585503248e87683f2
|
[
"MIT"
] | null | null | null |
HW1.ipynb
|
cohenjt/climate-dynamics-homework
|
87df12ed8c4de956255722c585503248e87683f2
|
[
"MIT"
] | null | null | null | 110.056604 | 17,560 | 0.873405 |
[
[
[
"# 1. Terrestrial vs solar origins of radiation in Earth's atmosphere",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Define Constants\nTs = 5785 # K\nTe = 255 # K\n\ndes = 150e9 # m\nre = 6.371e6 # m\nrs = 6.96e8 # m\n\nh = 6.62e-34 # m^2 kg/s\nc = 299792458 # m/s\nk = 1.38e-23 # J/K (kg m2 s-2 K-1)",
"_____no_output_____"
]
],
[
[
"### (a)",
"_____no_output_____"
]
],
[
[
"def I_lambda(T, lam):\n intensity = 2 * h * c**2 * (lam**-5)/(np.expm1((h*c)/(lam*k*T)))\n return intensity",
"_____no_output_____"
],
[
"lambda_vals_s = np.linspace(0, 5e-6, 201)[1:]\nlambda_vals_e = np.linspace(0, 5e-5, 201)[1:]\nIs = I_lambda(Ts, lambda_vals_s)\nIe = I_lambda(Te, lambda_vals_e)\n\nplt.plot(lambda_vals_s, Is)\nplt.title('Blackbody Intensity of the Sun')\nplt.show()\n\nplt.plot(lambda_vals_e, Ie)\nplt.title('Blackbody Intensity of the Earth')\nplt.show()",
"_____no_output_____"
],
[
"max_s = lambda_vals_s[np.argmax(Is)]*10**9\nmax_e = lambda_vals_e[np.argmax(Ie)]*10**6\n\nprint(f\"The peak wavelength of the Sun's radiation is at {max_s:.0f} nm.\")\nprint()\nprint(f\"The peak wavelength of the Earth's radiation is at {max_e:.0f} \\u03BCm.\")",
"The peak wavelength of the Sun's radiation is at 500 nm.\n\nThe peak wavelength of the Earth's radiation is at 11 μm.\n"
]
],
[
[
"### (b)",
"_____no_output_____"
]
],
[
[
"Is_smax = I_lambda(Ts, max_s)\nIe_smax = I_lambda(Te, max_s)\nIs_emax = I_lambda(Ts, max_e)\nIe_emax = I_lambda(Te, max_e)\n\nratio_smax = Is_smax / Ie_smax\nratio_emax = Is_emax / Ie_emax\n\nprint(f\"The ratio of the spectra at {max_s:.0f} nm is {ratio_smax}.\")\nprint(f\"The ratio of the spectra at {max_e:.0f} \\u03BCm is {ratio_emax}.\")",
"The ratio of the spectra at 500 nm is 22.68627573285443.\nThe ratio of the spectra at 11 μm is 22.686328867691113.\n"
]
],
[
[
"### (c)",
"_____no_output_____"
]
],
[
[
"s_emit_area = 4 * np.pi * des**2 # emits radiation as a shell with radius des\ne_absorb_area = np.pi * re**2 # absorbs radiation as a disk with radius re\nfrac_earth = e_absorb_area / s_emit_area\n\nIs_earth = Is * frac_earth\nplt.plot(lambda_vals_s, Is_earth)\nplt.title('Intensity at Earth')\nplt.show()",
"_____no_output_____"
],
[
"Is_smax_earth = Is_smax * frac_earth\nIs_emax_earth = Is_emax * frac_earth\n\nratio_smax_earth = Is_smax_earth / Ie_smax\nratio_emax_earth = Is_emax_earth / Ie_emax\n\nprint(f\"The ratio of the spectra at Earth's atmosphere at {max_s:.0f} nm is {ratio_smax_earth}.\")\nprint(f\"The ratio of the spectra at Earth's atmosphere at {max_e:.0f} \\u03BCm is {ratio_emax_earth}.\")",
"The ratio of the spectra at Earth's atmosphere at 500 nm is 1.0231419862484147e-08.\nThe ratio of the spectra at Earth's atmosphere at 11 μm is 1.0231443826083542e-08.\n"
]
],
[
[
"### (d)",
"_____no_output_____"
]
],
[
[
"Is_earth_full = I_lambda(Ts, lambda_vals_e) * frac_earth\nplt.plot(lambda_vals_e, Is_earth_full, lambda_vals_e, Ie)\nplt.xlim([0, 0.4e-5])\nplt.ylim([0, 7e3])\nplt.title('Intensity ')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The spectra overlap at a wavelength of about 2.5 micrometers. ",
"_____no_output_____"
]
],
[
[
"import scipy.integrate as integrate\n\ndef intens_ratio(lam):\n ratio = (I_lambda(Ts, lam)*frac_earth) / I_lambda(Te, lam)\n return ratio\n \nrad = integrate.quad(intens_ratio, 2.5e-6, 100e-6)\nprint(rad[0])",
"2.2075852465595215e-07\n"
]
],
[
[
"The ratio from lambda_overlap to 100 um tells us the relative fraction of the radiation at the top of the atmosphere between 2.5 and 100 um that is coming from the Sun. The amount of longwave radiation at the top of the atmosphere that originates from the sun is a tiny amount when compared to the amount of longwave radiation that comes from the Earth.",
"_____no_output_____"
],
[
"### (e)",
"_____no_output_____"
],
[
"The 4th power in the Stefan-Boltzmann equation is a result of the energy spectrum of photons. The photon spectrum, which is the energy density per unit photon energy, depends on the third power of the photon energy (1 for each spatial dimension) that is proportional to T. To find the total energy density, we integrate over all the photon energies, which gives us another factor of T so that we end up with a 4th power. The 4 comes from integrating the 3 spatial dimensions.",
"_____no_output_____"
],
[
"# 2. Climate of Flatland",
"_____no_output_____"
],
[
"I did most of Question 2 on paper.",
"_____no_output_____"
],
[
"### (b)",
"_____no_output_____"
]
],
[
[
"import scipy.optimize as so",
"_____no_output_____"
],
[
"def eq(x):\n return (x*np.exp(x)) / (np.expm1(x)) - 4\n\nx_init = 4 # initial guess based on 3d version\n\nx = so.fsolve(eq, x_init)[0]\n\nwein_const = (h*c) / (x*k) # m K\n\nprint(f\"Wein's Law in 2D: \\u03BBT = {(wein_const*10**6):.0f} \\u03BCm K\")",
"Wein's Law in 2D: λT = 3668 μm K\n"
],
[
"T = 5785 # K\nl_max = wein_const / T * 10**9\nprint(f\"The solar intensity peaks at \\u03BB = {l_max:.0f} nm\")",
"The solar intensity peaks at λ = 634 nm\n"
]
],
[
[
"### (c)",
"_____no_output_____"
]
],
[
[
"A = 2.404\nsig_2d = (k**3 * A) / (h**2 * c)\nprint(f\"\\u03C3 = {sig_2d:.2e} W/m/K^3\")\nprint(f\"The 2D Stefan-Boltzmann equation is \\u03C3T^3\")",
"σ = 4.81e-11 W/m/K^3\nThe 2D Stefan-Boltzmann equation is σT^3\n"
]
],
[
[
"### (d)",
"_____no_output_____"
]
],
[
[
"S0 = sig_2d * T**3\nrad_earth = S0 * re / 2\nprint(f\"The radiation that reaches Earth averaged over its 1D surface is {rad_earth:.2e} W/m\")",
"The radiation that reaches Earth averaged over its 1D surface is 2.97e+07 W/m\n"
],
[
"alpha = 0.3\nT_earth = (((1-alpha)*S0*re) / (2*sig_2d)) ** (1/3)\nprint(f\"The temperature of the 2D Earth is {T_earth:.2f} K.\")",
"The temperature of the 2D Earth is 755779.74 K.\n"
]
],
[
[
"# 3. Radiative forcing and global warming in a two-layer atmosphere model",
"_____no_output_____"
]
],
[
[
"sig = 5.67e-8 # W/m^2 K^4\nso = 1370 # W/m^2\nalpha = 0.3",
"_____no_output_____"
]
],
[
[
"### (a)",
"_____no_output_____"
]
],
[
[
"eps1 = 0.65\neps2 = 0.25",
"_____no_output_____"
],
[
"Tsurf4 = ((1-alpha)*(so/4)*(4-eps1*eps2)) / (sig*(2-eps1)*(2-eps2))\nT14 = Tsurf4 * ((2+eps2-eps1*eps2) / (4-eps1*eps2))\nT24 = Tsurf4 * ((2-eps1) / (4-eps1*eps2))\n\nTsurf = Tsurf4**(1/4)\nT1 = T14**(1/4)\nT2 = T24**(1/4)\n\nprint(f'Ts: {Tsurf:.2f} K')\nprint(f'T1: {T1:.2f} K')\nprint(f'T2: {T2:.2f} K')",
"Ts: 287.88 K\nT1: 247.23 K\nT2: 221.71 K\n"
]
],
[
[
"### (b)",
"_____no_output_____"
]
],
[
[
"eps2_prime = 0.29",
"_____no_output_____"
],
[
"def TOA(e1, e2):\n return (1-e1)*(1-e2)*sig*Tsurf4 + (1-e2)*sig*T14 + e2*sig*T24",
"_____no_output_____"
],
[
"delta_TOA = TOA(eps1, eps2_prime) - TOA(eps1, eps2)\nprint(f'The change in net TOA radiation flux is {delta_TOA:0.2f} W/m^2.')",
"The change in net TOA radiation flux is -8.45 W/m^2.\n"
]
],
[
[
"This is roughly double the amount that we calculated in class for a doubling of CO2 (-3.9 W/m^2).",
"_____no_output_____"
],
[
"### (c)",
"_____no_output_____"
]
],
[
[
"def surf_flux(e1, e2):\n return (1-alpha)*so/4 + e1*sig*T14 + (1-e1)*e2*sig*T24 - sig*Tsurf4",
"_____no_output_____"
],
[
"delta_surf_flux = surf_flux(eps1, eps2_prime) - surf_flux(eps1, eps2)\nprint(f'The change in net surface radiation flux is {delta_surf_flux:0.2f} W/m^2.')",
"The change in net surface radiation flux is 1.92 W/m^2.\n"
]
],
[
[
"Because the TOA radiation flux decreases and the surface radiation flux increases, I expect Ts, T1, and T2 to increase once they are allowed to adjust.",
"_____no_output_____"
],
[
"### (d)",
"_____no_output_____"
]
],
[
[
"T14_new = Tsurf4 * ((2+eps2_prime-eps1*eps2_prime) / (4-eps1*eps2_prime))\nT24_new = Tsurf4 * ((2-eps1) / (4-eps1*eps2_prime))\n\nT1_new = T14_new**(1/4)\nT2_new = T24_new**(1/4)\n\nprint(f'Adjusted T1: {T1_new:.2f} K')\nprint(f'Adjusted T2: {T2_new:.2f} K')",
"Adjusted T1: 248.07 K\nAdjusted T2: 222.09 K\n"
],
[
"def TOA_new(e1, e2):\n return (1-e1)*(1-e2)*sig*Tsurf4 + (1-e2)*sig*T14_new + e2*sig*T24_new\n\ndef surf_flux_new(e1, e2):\n return (1-alpha)*so/4 + e1*sig*T14_new + (1-e1)*e2*sig*T24_new - sig*Tsurf4",
"_____no_output_____"
],
[
"delta_TOA_new = TOA_new(eps1, eps2_prime) - TOA_new(eps1, eps2)\nprint(f'The adjusted change in net TOA radiation flux is {delta_TOA_new:0.2f} W/m^2.')\n\ndelta_surf_flux_new = surf_flux_new(eps1, eps2_prime) - surf_flux_new(eps1, eps2)\nprint(f'The adjusted change in net surface radiation flux is {delta_surf_flux_new:0.2f} W/m^2.')",
"The adjusted change in net TOA radiation flux is -8.52 W/m^2.\nThe adjusted change in net surface radiation flux is 1.93 W/m^2.\n"
]
],
[
[
"The effective radiative forcing is larger than the instantaneous radiative forcing.",
"_____no_output_____"
],
[
"### (e)",
"_____no_output_____"
]
],
[
[
"Tsurf_new = (((1-alpha)*(so/4)*(4-eps1*eps2_prime)) / (sig*(2-eps1)*(2-eps2_prime)))**(1/4)\nprint(f'The Equilibrium Climate Sensitivity is {(Tsurf_new-Tsurf):.2f} K.')",
"The Equilibrium Climate Sensitivity is 1.18 K.\n"
]
],
[
[
"This ECS value is below the canonical ECS range of 2-5 K. Possible climate processes not in this model that could explain this difference include changes in surface albedo, changes in cloud cover, and ocean dynamics. These are all sensitive to changes in radiative forcing and could influence the ECS.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4abffac5d6c8ad420c9a33028059a4fa160d62a9
| 135,088 |
ipynb
|
Jupyter Notebook
|
Data_Visualization_1.ipynb
|
ParthibHW19/Data-Analysis-with-Python
|
cbba07db445bd8ff4e6c146cb488ff60104b0595
|
[
"MIT"
] | null | null | null |
Data_Visualization_1.ipynb
|
ParthibHW19/Data-Analysis-with-Python
|
cbba07db445bd8ff4e6c146cb488ff60104b0595
|
[
"MIT"
] | null | null | null |
Data_Visualization_1.ipynb
|
ParthibHW19/Data-Analysis-with-Python
|
cbba07db445bd8ff4e6c146cb488ff60104b0595
|
[
"MIT"
] | null | null | null | 170.350567 | 110,294 | 0.837617 |
[
[
[
"# **Data Visulaization: Matplotlib, Seaborn**\n\n# **Project: Analyze \"Auto mpg data\" and draw a pair plot using seaborn library for mpg,weight,origin**\n\n# **Author: Parthib**",
"_____no_output_____"
]
],
[
[
"#import the required libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"#To view the matplotlib in notebook\n%matplotlib inline",
"_____no_output_____"
],
[
"#import the auto dataset\nAuto_DF = pd.read_csv('auto_data.csv')",
"_____no_output_____"
],
[
"#view first 5 records\nAuto_DF.head()",
"_____no_output_____"
],
[
"#write a user defined function for origin\ndef origin(num):\n if num ==1:\n return 'USA'\n elif num==2:\n return 'Europe'\n else:\n return 'Asia'",
"_____no_output_____"
],
[
"#Use apply function on data set\nAuto_DF['origin']=Auto_DF['origin'].apply(origin)",
"_____no_output_____"
],
[
"#view first 30 records\nAuto_DF.head(30)",
"_____no_output_____"
],
[
"#Draw the pairplot using seaborn for mpg, weight and origin\nsns.pairplot(Auto_DF[['mpg','weight','origin']],hue='origin',size=8)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0468f8d0d59dcfa54f7287a5e222d9f0e5e2a
| 14,989 |
ipynb
|
Jupyter Notebook
|
5. Databases_SQL/Lab - Connecting DB and querying .ipynb
|
naquech/IBM_Watson_Studio
|
9bc831b1448a60b8720b232e9a74d40665ef2cbf
|
[
"MIT"
] | null | null | null |
5. Databases_SQL/Lab - Connecting DB and querying .ipynb
|
naquech/IBM_Watson_Studio
|
9bc831b1448a60b8720b232e9a74d40665ef2cbf
|
[
"MIT"
] | null | null | null |
5. Databases_SQL/Lab - Connecting DB and querying .ipynb
|
naquech/IBM_Watson_Studio
|
9bc831b1448a60b8720b232e9a74d40665ef2cbf
|
[
"MIT"
] | null | null | null | 14,989 | 14,989 | 0.676429 |
[
[
[
"# Introduction\n\nThis notebook illustrates how to access your database instance using Python by following the steps below:\n1. Import the `ibm_db` Python library\n1. Identify and enter the database connection credentials\n1. Create the database connection\n1. Create a table\n1. Insert data into the table\n1. Query data from the table\n1. Retrieve the result set into a pandas dataframe\n1. Close the database connection\n\n\n__Notice:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud.\n\n## Task 1: Import the `ibm_db` Python library\n\nThe `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.\n\n\nWe import the ibm_db library into our Python Application\n",
"_____no_output_____"
]
],
[
[
"import ibm_db",
"_____no_output_____"
]
],
[
[
"When the command above completes, the `ibm_db` library is loaded in your notebook. \n\n\n## Task 2: Identify the database connection credentials\n\nConnecting to dashDB or DB2 database requires the following information:\n* Driver Name\n* Database name \n* Host DNS name or IP address \n* Host port\n* Connection protocol\n* User ID\n* User Password\n\n\n\n__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course\n\nNow enter your database credentials below\n\nReplace the placeholder values in angular brackets <> below with your actual database credentials \n\ne.g. replace \"database\" with \"BLUDB\"\n\n",
"_____no_output_____"
]
],
[
[
"#Replace the placeholder values with the actuals for your Db2 Service Credentials\ndsn_driver = \"{IBM DB2 ODBC DRIVER}\"\ndsn_database = \"database\" # e.g. \"BLUDB\"\ndsn_hostname = \"hostname\" # e.g.: \"dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net\"\ndsn_port = \"port\" # e.g. \"50000\" \ndsn_protocol = \"protocol\" # i.e. \"TCPIP\"\ndsn_uid = \"username\" # e.g. \"abc12345\"\ndsn_pwd = \"password\" # e.g. \"7dBZ3wWt9XN6$o0J\"",
"_____no_output_____"
],
[
"# @hidden_cell\n\ndsn_driver = \"DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=wvb91528;PWD=tm^1nlbn4dj3j04b;\"\ndsn_database = \"BLUDB\" \ndsn_hostname = \"dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net\" \ndsn_port = \"50000\" \ndsn_protocol = \"TCPIP\" \n\ndsn_uid = \"wvb91528\" \ndsn_pwd = \"tm^1nlbn4dj3j04b\" \n",
"_____no_output_____"
]
],
[
[
"## Task 3: Create the database connection\n\nIbm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.\n\n\nCreate the database connection\n",
"_____no_output_____"
]
],
[
[
"#Create database connection\n#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter\ndsn = (\n \"DRIVER={0};\"\n \"DATABASE={1};\"\n \"HOSTNAME={2};\"\n \"PORT={3};\"\n \"PROTOCOL={4};\"\n \"UID={5};\"\n \"PWD={6};\").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)\n\ntry:\n conn = ibm_db.connect(dsn, \"\", \"\")\n print (\"Connected to database: \", dsn_database)\n #print (\"Connected to database: \", dsn_database, \"as user: \", dsn_uid, \"on host: \", dsn_hostname)\n\nexcept:\n print (\"Unable to connect: \", ibm_db.conn_errormsg() )\n",
"Connected to database: BLUDB\n"
]
],
[
[
"## Task 4: Create a table in the database\n\nIn this step we will create a table in the database with following details:\n\n<img src = \"https://ibm.box.com/shared/static/ztd2cn4xkdoj5erlk4hhng39kbp63s1h.jpg\" align=\"center\">\n",
"_____no_output_____"
]
],
[
[
"#Lets first drop the table INSTRUCTOR in case it exists from a previous attempt\ndropQuery = \"drop table INSTRUCTOR\"\n\n#Now execute the drop statment\ndropStmt = ibm_db.exec_immediate(conn, dropQuery)",
"_____no_output_____"
]
],
[
[
"## Dont worry if you get this error:\nIf you see an exception/error similar to the following, indicating that INSTRUCTOR is an undefined name, that's okay. It just implies that the INSTRUCTOR table does not exist in the table - which would be the case if you had not created it previously.\n\nException: [IBM][CLI Driver][DB2/LINUXX8664] SQL0204N \"ABC12345.INSTRUCTOR\" is an undefined name. SQLSTATE=42704 SQLCODE=-204",
"_____no_output_____"
]
],
[
[
"#Construct the Create Table DDL statement - replace the ... with rest of the statement\ncreateQuery = \"create table INSTRUCTOR(id INTEGER PRIMARY KEY NOT NULL, fname varchar(15), lname varchar(15), city varchar(15), ccode char(3))\"\n\n#Now fill in the name of the method and execute the statement\ncreateStmt = ibm_db.exec_immediate(conn, createQuery)",
"_____no_output_____"
]
],
[
[
"## Task 5: Insert data into the table\n\nIn this step we will insert some rows of data into the table. \n\nThe INSTRUCTOR table we created in the previous step contains 3 rows of data:\n\n<img src=\"https://ibm.box.com/shared/static/j5yjassxefrjknivfpekj7698dqe4d8i.jpg\" align=\"center\">\n\nWe will start by inserting just the first row of data, i.e. for instructor Rav Ahuja \n",
"_____no_output_____"
]
],
[
[
"#Construct the query - replace ... with the insert statement\ninsertQuery = \"insert into INSTRUCTOR (id, fname, lname, city, ccode) values (1, 'Rav', 'Ahuja', 'Toronto', 'CA');\"\n\n#execute the insert statement\ninsertStmt = ibm_db.exec_immediate(conn, insertQuery)",
"_____no_output_____"
]
],
[
[
"Now use a single query to insert the remaining two rows of data",
"_____no_output_____"
]
],
[
[
"#replace ... with the insert statement that inerts the remaining two rows of data\ninsertQuery2 = \"insert into INSTRUCTOR values (2, 'Raul', 'Chong', 'Markham', 'CA'), \\\n (3, 'Hima', 'Vasudevan', 'Chicago', 'US')\"\n\n#execute the statement\ninsertStmt2 = ibm_db.exec_immediate(conn, insertQuery2)",
"_____no_output_____"
]
],
[
[
"## Task 6: Query data in the table\n\nIn this step we will retrieve data we inserted into the INSTRUCTOR table. \n",
"_____no_output_____"
]
],
[
[
"#Construct the query that retrieves all rows from the INSTRUCTOR table\nselectQuery = \"select * from INSTRUCTOR\"\n\n#Execute the statement\nselectStmt = ibm_db.exec_immediate(conn, selectQuery)\n\n#Fetch the Dictionary (for the first row only) - replace ... with your code\nibm_db.fetch_both(selectStmt)",
"_____no_output_____"
],
[
"#Fetch the rest of the rows and print the ID and FNAME for those rows\nwhile ibm_db.fetch_row(selectStmt) != False:\n print (\" ID:\", ibm_db.result(selectStmt, 0), \" FNAME:\", ibm_db.result(selectStmt, \"FNAME\"))",
" ID: 2 FNAME: Raul\n ID: 3 FNAME: Hima\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Hint:\n\n#Fetch the rest of the rows and print the ID and FNAME for those rows\nwhile ibm_db.fetch_row(selectStmt) != False:\n print (\" ID:\", ibm_db.result(selectStmt, 0), \" FNAME:\", ibm_db.result(selectStmt, \"FNAME\"))\n\n-->",
"_____no_output_____"
],
[
"Bonus: now write and execute an update statement that changes the Rav's CITY to MOOSETOWN ",
"_____no_output_____"
]
],
[
[
"#Enter your code below\nupdateQuery = \"update INSTRUCTOR set city='Moosetown' where fname='Rav' \"\nupdateStm = ibm_db.exec_immediate(conn, updateQuery)",
"_____no_output_____"
]
],
[
[
"## Task 7: Retrieve data into Pandas \n\nIn this step we will retrieve the contents of the INSTRUCTOR table into a Pandas dataframe",
"_____no_output_____"
]
],
[
[
"import pandas\nimport ibm_db_dbi",
"_____no_output_____"
],
[
"#connection for pandas\npconn = ibm_db_dbi.Connection(conn)",
"_____no_output_____"
],
[
"#query statement to retrieve all rows in INSTRUCTOR table\nselectQuery = \"select * from INSTRUCTOR\"\n\n#retrieve the query results into a pandas dataframe\npdf = pandas.read_sql(selectQuery, pconn)\n\n#print just the LNAME for first row in the pandas data frame\npdf.LNAME[0]",
"_____no_output_____"
],
[
"#print the entire data frame\npdf",
"_____no_output_____"
]
],
[
[
"Once the data is in a Pandas dataframe, you can do the typical pandas operations on it. \n\nFor example you can use the shape method to see how many rows and columns are in the dataframe",
"_____no_output_____"
]
],
[
[
"pdf.shape",
"_____no_output_____"
]
],
[
[
"## Task 8: Close the Connection\nWe free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources.\n",
"_____no_output_____"
]
],
[
[
"ibm_db.close(conn)",
"_____no_output_____"
]
],
[
[
"## Summary\n\nIn this tutorial you established a connection to a database instance of DB2 Warehouse on Cloud from a Python notebook using ibm_db API. Then created a table and insert a few rows of data into it. Then queried the data. You also retrieved the data into a pandas dataframe.",
"_____no_output_____"
],
[
"Copyright © 2017-2018 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4ac046e6e314a3032ea518b150a6fb36ef203a0c
| 274,803 |
ipynb
|
Jupyter Notebook
|
conditional/.ipynb_checkpoints/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_15_annealing_run3-checkpoint.ipynb
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | null | null | null |
conditional/.ipynb_checkpoints/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_15_annealing_run3-checkpoint.ipynb
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | null | null | null |
conditional/.ipynb_checkpoints/main_conditional_disentangle_cifar_bs900_sratio_0_5_drop_0_5_rl_stdscale_15_annealing_run3-checkpoint.ipynb
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | null | null | null | 104.567352 | 1,147 | 0.578829 |
[
[
[
"import os\nos.environ['CUDA_VISIBLE_DEVICES']='4,5,6,7'",
"_____no_output_____"
],
[
"%run -p ../train_cnf_disentangle_rl.py --data cifar10 --dims 64,64,64 --strides 1,1,1,1 --num_blocks 2 --layer_type concat --multiscale True --rademacher True --batch_size 900 --test_batch_size 500 --save ../experiments_published/cnf_conditional_disentangle_cifar10_bs900_sratio_0_5_drop_0_5_rl_stdscale_15_annealing_run3 --seed 3 --lr 0.001 --conditional True --controlled_tol False --train_mode semisup --log_freq 10 --weight_y 0.5 --condition_ratio 0.5 --dropout_rate 0.5 --scale_fac 1.0 --scale_std 15.0 --annealing_std True\n#",
"/tancode/repos/tan-ffjord/train_cnf_disentangle_rl.py\nimport argparse\nimport os\nimport time\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as tforms\nfrom torchvision.utils import save_image\n\nimport lib.layers as layers\nimport lib.utils as utils\nimport lib.multiscale_parallel as multiscale_parallel\nimport lib.modules as modules\nimport lib.thops as thops\n\nfrom train_misc import standard_normal_logprob\nfrom train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time, count_nfe_gate\nfrom train_misc import add_spectral_norm, spectral_norm_power_iteration\nfrom train_misc import create_regularization_fns, get_regularization, append_regularization_to_log\n\nfrom tensorboardX import SummaryWriter\n\n# go fast boi!!\ntorch.backends.cudnn.benchmark = True\nSOLVERS = [\"dopri5\", \"bdf\", \"rk4\", \"midpoint\", 'adams', 'explicit_adams']\nGATES = [\"cnn1\", \"cnn2\", \"rnn\"]\n\nparser = argparse.ArgumentParser(\"Continuous Normalizing Flow\")\nparser.add_argument(\"--data\", choices=[\"mnist\", \"svhn\", \"cifar10\", 'lsun_church'], type=str, default=\"mnist\")\nparser.add_argument(\"--dims\", type=str, default=\"8,32,32,8\")\nparser.add_argument(\"--strides\", type=str, default=\"2,2,1,-2,-2\")\nparser.add_argument(\"--num_blocks\", type=int, default=1, help='Number of stacked CNFs.')\n\nparser.add_argument(\"--conv\", type=eval, default=True, choices=[True, False])\nparser.add_argument(\n \"--layer_type\", type=str, default=\"ignore\",\n choices=[\"ignore\", \"concat\", \"concat_v2\", \"squash\", \"concatsquash\", \"concatcoord\", \"hyper\", \"blend\"]\n)\nparser.add_argument(\"--divergence_fn\", type=str, default=\"approximate\", choices=[\"brute_force\", \"approximate\"])\nparser.add_argument(\n \"--nonlinearity\", type=str, default=\"softplus\", choices=[\"tanh\", \"relu\", \"softplus\", \"elu\", \"swish\"]\n)\n\nparser.add_argument(\"--seed\", type=int, default=0)\n\nparser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)\nparser.add_argument('--atol', type=float, default=1e-5)\nparser.add_argument('--rtol', type=float, default=1e-5)\nparser.add_argument(\"--step_size\", type=float, default=None, help=\"Optional fixed step size.\")\n\nparser.add_argument('--gate', type=str, default='cnn1', choices=GATES)\nparser.add_argument('--scale', type=float, default=1.0)\nparser.add_argument('--scale_fac', type=float, default=1.0)\nparser.add_argument('--scale_std', type=float, default=1.0)\nparser.add_argument('--eta', default=0.1, type=float,\n help='tuning parameter that allows us to trade-off the competing goals of' \n 'minimizing the prediction loss and maximizing the gate rewards ')\nparser.add_argument('--rl-weight', default=0.01, type=float,\n help='rl weight')\n\nparser.add_argument('--gamma', default=0.99, type=float,\n help='discount factor, default: (0.99)')\n\nparser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])\nparser.add_argument('--test_atol', type=float, default=None)\nparser.add_argument('--test_rtol', type=float, default=None)\n\nparser.add_argument(\"--imagesize\", type=int, default=None)\nparser.add_argument(\"--alpha\", type=float, default=1e-6)\nparser.add_argument('--time_length', type=float, default=1.0)\nparser.add_argument('--train_T', type=eval, default=True)\n\nparser.add_argument(\"--num_epochs\", type=int, default=1000)\nparser.add_argument(\"--batch_size\", type=int, default=200)\nparser.add_argument(\n \"--batch_size_schedule\", type=str, default=\"\", help=\"Increases the batchsize at every given epoch, dash separated.\"\n)\nparser.add_argument(\"--test_batch_size\", type=int, default=200)\nparser.add_argument(\"--lr\", type=float, default=1e-3)\nparser.add_argument(\"--warmup_iters\", type=float, default=1000)\nparser.add_argument(\"--weight_decay\", type=float, default=0.0)\nparser.add_argument(\"--spectral_norm_niter\", type=int, default=10)\nparser.add_argument(\"--weight_y\", type=float, default=0.5)\n\nparser.add_argument(\"--add_noise\", type=eval, default=True, choices=[True, False])\nparser.add_argument(\"--batch_norm\", type=eval, default=False, choices=[True, False])\nparser.add_argument('--residual', type=eval, default=False, choices=[True, False])\nparser.add_argument('--autoencode', type=eval, default=False, choices=[True, False])\nparser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])\nparser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])\nparser.add_argument('--multiscale', type=eval, default=False, choices=[True, False])\nparser.add_argument('--parallel', type=eval, default=False, choices=[True, False])\nparser.add_argument('--conditional', type=eval, default=False, choices=[True, False])\nparser.add_argument('--controlled_tol', type=eval, default=False, choices=[True, False])\nparser.add_argument(\"--train_mode\", choices=[\"semisup\", \"sup\", \"unsup\"], type=str, default=\"semisup\")\nparser.add_argument(\"--condition_ratio\", type=float, default=0.5)\nparser.add_argument(\"--dropout_rate\", type=float, default=0.0)\n\n\n# Regularizations\nparser.add_argument('--l1int', type=float, default=None, help=\"int_t ||f||_1\")\nparser.add_argument('--l2int', type=float, default=None, help=\"int_t ||f||_2\")\nparser.add_argument('--dl2int', type=float, default=None, help=\"int_t ||f^T df/dt||_2\")\nparser.add_argument('--JFrobint', type=float, default=None, help=\"int_t ||df/dx||_F\")\nparser.add_argument('--JdiagFrobint', type=float, default=None, help=\"int_t ||df_i/dx_i||_F\")\nparser.add_argument('--JoffdiagFrobint', type=float, default=None, help=\"int_t ||df/dx - df_i/dx_i||_F\")\n\nparser.add_argument(\"--time_penalty\", type=float, default=0, help=\"Regularization on the end_time.\")\nparser.add_argument(\n \"--max_grad_norm\", type=float, default=1e10,\n help=\"Max norm of graidents (default is just stupidly high to avoid any clipping)\"\n)\n\nparser.add_argument(\"--begin_epoch\", type=int, default=1)\nparser.add_argument(\"--resume\", type=str, default=None)\nparser.add_argument(\"--save\", type=str, default=\"experiments/cnf\")\nparser.add_argument(\"--val_freq\", type=int, default=1)\nparser.add_argument(\"--log_freq\", type=int, default=1)\n\nargs = parser.parse_args()\n\nimport lib.odenvp_conditional_rl as odenvp\n \n# set seed\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\n# logger\nutils.makedirs(args.save)\nlogger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) # write to log file\nwriter = SummaryWriter(os.path.join(args.save, 'tensorboard')) # write to tensorboard\n\nif args.layer_type == \"blend\":\n logger.info(\"!! Setting time_length from None to 1.0 due to use of Blend layers.\")\n args.time_length = 1.0\n\nlogger.info(args)\n\n\ndef add_noise(x):\n \"\"\"\n [0, 1] -> [0, 255] -> add noise -> [0, 1]\n \"\"\"\n if args.add_noise:\n noise = x.new().resize_as_(x).uniform_()\n x = x * 255 + noise\n x = x / 256\n return x\n\n\ndef update_lr(optimizer, itr):\n iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0)\n lr = args.lr * iter_frac\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef get_train_loader(train_set, epoch):\n if args.batch_size_schedule != \"\":\n epochs = [0] + list(map(int, args.batch_size_schedule.split(\"-\")))\n n_passed = sum(np.array(epochs) <= epoch)\n current_batch_size = int(args.batch_size * n_passed)\n else:\n current_batch_size = args.batch_size\n train_loader = torch.utils.data.DataLoader(\n dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True\n )\n logger.info(\"===> Using batch size {}. Total {} iterations/epoch.\".format(current_batch_size, len(train_loader)))\n return train_loader\n\n\ndef get_dataset(args):\n trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])\n\n if args.data == \"mnist\":\n im_dim = 1\n im_size = 28 if args.imagesize is None else args.imagesize\n train_set = dset.MNIST(root=\"../data\", train=True, transform=trans(im_size), download=True)\n test_set = dset.MNIST(root=\"../data\", train=False, transform=trans(im_size), download=True)\n elif args.data == \"svhn\":\n im_dim = 3\n im_size = 32 if args.imagesize is None else args.imagesize\n train_set = dset.SVHN(root=\"../data\", split=\"train\", transform=trans(im_size), download=True)\n test_set = dset.SVHN(root=\"../data\", split=\"test\", transform=trans(im_size), download=True)\n elif args.data == \"cifar10\":\n im_dim = 3\n im_size = 32 if args.imagesize is None else args.imagesize\n train_set = dset.CIFAR10(\n root=\"../data\", train=True, transform=tforms.Compose([\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ]), download=True\n )\n test_set = dset.CIFAR10(root=\"../data\", train=False, transform=trans(im_size), download=True)\n elif args.data == 'celeba':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.CelebA(\n train=True, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.CelebA(\n train=False, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n elif args.data == 'lsun_church':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.LSUN(\n '../data', ['church_outdoor_train'], transform=tforms.Compose([\n tforms.Resize(96),\n tforms.RandomCrop(64),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.LSUN(\n '../data', ['church_outdoor_val'], transform=tforms.Compose([\n tforms.Resize(96),\n tforms.RandomCrop(64),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n ) \n elif args.data == 'imagenet_64':\n im_dim = 3\n im_size = 64 if args.imagesize is None else args.imagesize\n train_set = dset.ImageFolder(\n train=True, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.RandomHorizontalFlip(),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n test_set = dset.ImageFolder(\n train=False, transform=tforms.Compose([\n tforms.ToPILImage(),\n tforms.Resize(im_size),\n tforms.ToTensor(),\n add_noise,\n ])\n )\n \n data_shape = (im_dim, im_size, im_size)\n if not args.conv:\n data_shape = (im_dim * im_size * im_size,)\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True\n )\n return train_set, test_loader, data_shape\n\n\ndef compute_bits_per_dim(x, model):\n zero = torch.zeros(x.shape[0], 1).to(x)\n\n # Don't use data parallelize if batch size is small.\n # if x.shape[0] < 200:\n # model = model.module\n \n z, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward\n\n logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)\n logpx = logpz - delta_logp\n\n logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches\n bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)\n\n return bits_per_dim, atol, rtol, logp_actions, nfe\n\ndef compute_bits_per_dim_conditional(x, y, model):\n zero = torch.zeros(x.shape[0], 1).to(x)\n y_onehot = thops.onehot(y, num_classes=model.module.y_class).to(x)\n\n # Don't use data parallelize if batch size is small.\n # if x.shape[0] < 200:\n # model = model.module\n \n z, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward\n \n dim_sup = int(args.condition_ratio * np.prod(z.size()[1:]))\n \n # prior\n mean, logs = model.module._prior(y_onehot)\n\n logpz_sup = modules.GaussianDiag.logp(mean, logs, z[:, 0:dim_sup]).view(-1,1) # logp(z)_sup\n logpz_unsup = standard_normal_logprob(z[:, dim_sup:]).view(z.shape[0], -1).sum(1, keepdim=True)\n logpz = logpz_sup + logpz_unsup\n logpx = logpz - delta_logp\n\n logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches\n bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)\n \n # dropout\n if args.dropout_rate > 0:\n zsup = model.module.dropout(z[:, 0:dim_sup])\n else:\n zsup = z[:, 0:dim_sup]\n \n # compute xentropy loss\n y_logits = model.module.project_class(zsup)\n loss_xent = model.module.loss_class(y_logits, y.to(x.get_device()))\n y_predicted = np.argmax(y_logits.cpu().detach().numpy(), axis=1)\n\n return bits_per_dim, loss_xent, y_predicted, atol, rtol, logp_actions, nfe\n\ndef create_model(args, data_shape, regularization_fns):\n hidden_dims = tuple(map(int, args.dims.split(\",\")))\n strides = tuple(map(int, args.strides.split(\",\")))\n\n if args.multiscale:\n model = odenvp.ODENVP(\n (args.batch_size, *data_shape),\n n_blocks=args.num_blocks,\n intermediate_dims=hidden_dims,\n nonlinearity=args.nonlinearity,\n alpha=args.alpha,\n cnf_kwargs={\"T\": args.time_length, \"train_T\": args.train_T, \"regularization_fns\": regularization_fns, \"solver\": args.solver, \"atol\": args.atol, \"rtol\": args.rtol, \"scale\": args.scale, \"scale_fac\": args.scale_fac, \"scale_std\": args.scale_std, \"gate\": args.gate},\n condition_ratio=args.condition_ratio,\n dropout_rate=args.dropout_rate,)\n elif args.parallel:\n model = multiscale_parallel.MultiscaleParallelCNF(\n (args.batch_size, *data_shape),\n n_blocks=args.num_blocks,\n intermediate_dims=hidden_dims,\n alpha=args.alpha,\n time_length=args.time_length,\n )\n else:\n if args.autoencode:\n\n def build_cnf():\n autoencoder_diffeq = layers.AutoencoderDiffEqNet(\n hidden_dims=hidden_dims,\n input_shape=data_shape,\n strides=strides,\n conv=args.conv,\n layer_type=args.layer_type,\n nonlinearity=args.nonlinearity,\n )\n odefunc = layers.AutoencoderODEfunc(\n autoencoder_diffeq=autoencoder_diffeq,\n divergence_fn=args.divergence_fn,\n residual=args.residual,\n rademacher=args.rademacher,\n )\n cnf = layers.CNF(\n odefunc=odefunc,\n T=args.time_length,\n regularization_fns=regularization_fns,\n solver=args.solver,\n )\n return cnf\n else:\n\n def build_cnf():\n diffeq = layers.ODEnet(\n hidden_dims=hidden_dims,\n input_shape=data_shape,\n strides=strides,\n conv=args.conv,\n layer_type=args.layer_type,\n nonlinearity=args.nonlinearity,\n )\n odefunc = layers.ODEfunc(\n diffeq=diffeq,\n divergence_fn=args.divergence_fn,\n residual=args.residual,\n rademacher=args.rademacher,\n )\n cnf = layers.CNF(\n odefunc=odefunc,\n T=args.time_length,\n train_T=args.train_T,\n regularization_fns=regularization_fns,\n solver=args.solver,\n )\n return cnf\n\n chain = [layers.LogitTransform(alpha=args.alpha)] if args.alpha > 0 else [layers.ZeroMeanTransform()]\n chain = chain + [build_cnf() for _ in range(args.num_blocks)]\n if args.batch_norm:\n chain.append(layers.MovingBatchNorm2d(data_shape[0]))\n model = layers.SequentialFlow(chain)\n return model\n\n\nif __name__ == \"__main__\":\n\n # get deivce\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)\n\n # load dataset\n train_set, test_loader, data_shape = get_dataset(args)\n\n # build model\n regularization_fns, regularization_coeffs = create_regularization_fns(args)\n model = create_model(args, data_shape, regularization_fns)\n\n if args.spectral_norm: add_spectral_norm(model, logger)\n set_cnf_options(args, model)\n\n logger.info(model)\n logger.info(\"Number of trainable parameters: {}\".format(count_parameters(model)))\n \n writer.add_text('info', \"Number of trainable parameters: {}\".format(count_parameters(model)))\n\n # optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n \n # set initial iter\n itr = 1\n \n # set the meters\n time_epoch_meter = utils.RunningAverageMeter(0.97)\n time_meter = utils.RunningAverageMeter(0.97)\n loss_meter = utils.RunningAverageMeter(0.97) # track total loss\n nll_meter = utils.RunningAverageMeter(0.97) # track negative log-likelihood\n xent_meter = utils.RunningAverageMeter(0.97) # track xentropy score\n error_meter = utils.RunningAverageMeter(0.97) # track error score\n steps_meter = utils.RunningAverageMeter(0.97)\n grad_meter = utils.RunningAverageMeter(0.97)\n tt_meter = utils.RunningAverageMeter(0.97)\n\n # restore parameters\n if args.resume is not None:\n checkpt = torch.load(args.resume, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpt[\"state_dict\"])\n if \"optim_state_dict\" in checkpt.keys():\n optimizer.load_state_dict(checkpt[\"optim_state_dict\"])\n # Manually move optimizer state to device.\n for state in optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = cvt(v)\n args.begin_epoch = checkpt['epoch'] + 1\n itr = checkpt['iter'] + 1\n time_epoch_meter.set(checkpt['epoch_time_avg'])\n time_meter.set(checkpt['time_train'])\n loss_meter.set(checkpt['loss_train'])\n nll_meter.set(checkpt['bits_per_dim_train'])\n xent_meter.set(checkpt['xent_train'])\n error_meter.set(checkpt['error_train'])\n steps_meter.set(checkpt['nfe_train'])\n grad_meter.set(checkpt['grad_train'])\n tt_meter.set(checkpt['total_time_train'])\n\n if torch.cuda.is_available():\n model = torch.nn.DataParallel(model).cuda()\n\n # For visualization.\n if args.conditional:\n dim_unsup = int((1.0 - args.condition_ratio) * np.prod(data_shape))\n fixed_y = torch.from_numpy(np.arange(model.module.y_class)).repeat(model.module.y_class).type(torch.long).to(device, non_blocking=True)\n fixed_y_onehot = thops.onehot(fixed_y, num_classes=model.module.y_class)\n with torch.no_grad():\n mean, logs = model.module._prior(fixed_y_onehot)\n fixed_z_sup = modules.GaussianDiag.sample(mean, logs)\n fixed_z_unsup = cvt(torch.randn(model.module.y_class**2, dim_unsup))\n fixed_z = torch.cat((fixed_z_sup, fixed_z_unsup),1)\n else:\n fixed_z = cvt(torch.randn(100, *data_shape))\n \n\n if args.spectral_norm and not args.resume: spectral_norm_power_iteration(model, 500)\n\n best_loss_nll = float(\"inf\")\n best_error_score = float(\"inf\")\n \n for epoch in range(args.begin_epoch, args.num_epochs + 1):\n start_epoch = time.time()\n model.train()\n train_loader = get_train_loader(train_set, epoch)\n for _, (x, y) in enumerate(train_loader):\n start = time.time()\n update_lr(optimizer, itr)\n optimizer.zero_grad()\n\n if not args.conv:\n x = x.view(x.shape[0], -1)\n\n # cast data and move to device\n x = cvt(x)\n \n # compute loss\n if args.conditional:\n loss_nll, loss_xent, y_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, model)\n if args.train_mode == \"semisup\":\n loss = loss_nll + args.weight_y * loss_xent\n elif args.train_mode == \"sup\":\n loss = loss_xent\n elif args.train_mode == \"unsup\":\n loss = loss_nll\n else:\n raise ValueError('Choose supported train_mode: semisup, sup, unsup')\n error_score = 1. - np.mean(y_predicted.astype(int) == y.numpy()) \n \n else:\n loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)\n loss_nll, loss_xent, error_score = loss, 0., 0.\n \n if regularization_coeffs:\n reg_states = get_regularization(model, regularization_coeffs)\n reg_loss = sum(\n reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0\n )\n loss = loss + reg_loss\n total_time = count_total_time(model)\n loss = loss + total_time * args.time_penalty\n\n # re-weight the gate rewards\n normalized_eta = args.eta / len(logp_actions)\n \n # collect cumulative future rewards\n R = - loss\n cum_rewards = []\n for r in nfe[::-1]:\n R = -normalized_eta * r.view(-1,1) + args.gamma * R\n cum_rewards.insert(0,R)\n \n # apply REINFORCE\n rl_loss = 0\n for lpa, r in zip(logp_actions, cum_rewards):\n rl_loss = rl_loss - lpa.view(-1,1) * args.rl_weight * r\n \n loss = loss + rl_loss.mean()\n \n loss.backward()\n \n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n\n if args.spectral_norm: spectral_norm_power_iteration(model, args.spectral_norm_niter)\n \n time_meter.update(time.time() - start)\n loss_meter.update(loss.item())\n nll_meter.update(loss_nll.item())\n if args.conditional:\n xent_meter.update(loss_xent.item())\n else:\n xent_meter.update(loss_xent)\n error_meter.update(error_score)\n steps_meter.update(count_nfe_gate(model))\n grad_meter.update(grad_norm)\n tt_meter.update(total_time)\n \n for idx in range(len(model.module.transforms)):\n for layer in model.module.transforms[idx].chain:\n if hasattr(layer, 'atol'):\n layer.odefunc.after_odeint()\n \n # write to tensorboard\n writer.add_scalars('time', {'train_iter': time_meter.val}, itr)\n writer.add_scalars('loss', {'train_iter': loss_meter.val}, itr)\n writer.add_scalars('bits_per_dim', {'train_iter': nll_meter.val}, itr)\n writer.add_scalars('xent', {'train_iter': xent_meter.val}, itr)\n writer.add_scalars('error', {'train_iter': error_meter.val}, itr)\n writer.add_scalars('nfe', {'train_iter': steps_meter.val}, itr)\n writer.add_scalars('grad', {'train_iter': grad_meter.val}, itr)\n writer.add_scalars('total_time', {'train_iter': tt_meter.val}, itr)\n\n if itr % args.log_freq == 0:\n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_%i'%tol_indx, {'train': atol[tol_indx].mean()}, itr)\n writer.add_scalars('rtol_%i'%tol_indx, {'train': rtol[tol_indx].mean()}, itr)\n \n log_message = (\n \"Iter {:04d} | Time {:.4f}({:.4f}) | Bit/dim {:.4f}({:.4f}) | Xent {:.4f}({:.4f}) | Loss {:.4f}({:.4f}) | Error {:.4f}({:.4f}) \"\n \"Steps {:.0f}({:.2f}) | Grad Norm {:.4f}({:.4f}) | Total Time {:.2f}({:.2f})\".format(\n itr, time_meter.val, time_meter.avg, nll_meter.val, nll_meter.avg, xent_meter.val, xent_meter.avg, loss_meter.val, loss_meter.avg, error_meter.val, error_meter.avg, steps_meter.val, steps_meter.avg, grad_meter.val, grad_meter.avg, tt_meter.val, tt_meter.avg\n )\n )\n if regularization_coeffs:\n log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)\n logger.info(log_message)\n writer.add_text('info', log_message, itr)\n\n itr += 1\n \n # compute test loss\n model.eval()\n if epoch % args.val_freq == 0:\n with torch.no_grad():\n # write to tensorboard\n writer.add_scalars('time', {'train_epoch': time_meter.avg}, epoch)\n writer.add_scalars('loss', {'train_epoch': loss_meter.avg}, epoch)\n writer.add_scalars('bits_per_dim', {'train_epoch': nll_meter.avg}, epoch)\n writer.add_scalars('xent', {'train_epoch': xent_meter.avg}, epoch)\n writer.add_scalars('error', {'train_epoch': error_meter.avg}, epoch)\n writer.add_scalars('nfe', {'train_epoch': steps_meter.avg}, epoch)\n writer.add_scalars('grad', {'train_epoch': grad_meter.avg}, epoch)\n writer.add_scalars('total_time', {'train_epoch': tt_meter.avg}, epoch)\n \n start = time.time()\n logger.info(\"validating...\")\n writer.add_text('info', \"validating...\", epoch)\n losses_nll = []; losses_xent = []; losses = []\n total_correct = 0\n \n for (x, y) in test_loader:\n if not args.conv:\n x = x.view(x.shape[0], -1)\n x = cvt(x)\n if args.conditional:\n loss_nll, loss_xent, y_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, model)\n if args.train_mode == \"semisup\":\n loss = loss_nll + args.weight_y * loss_xent\n elif args.train_mode == \"sup\":\n loss = loss_xent\n elif args.train_mode == \"unsup\":\n loss = loss_nll\n else:\n raise ValueError('Choose supported train_mode: semisup, sup, unsup')\n total_correct += np.sum(y_predicted.astype(int) == y.numpy())\n else:\n loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)\n loss_nll, loss_xent = loss, 0.\n losses_nll.append(loss_nll.cpu().numpy()); losses.append(loss.cpu().numpy())\n if args.conditional: \n losses_xent.append(loss_xent.cpu().numpy())\n else:\n losses_xent.append(loss_xent)\n \n loss_nll = np.mean(losses_nll); loss_xent = np.mean(losses_xent); loss = np.mean(losses)\n error_score = 1. - total_correct / len(test_loader.dataset)\n time_epoch_meter.update(time.time() - start_epoch)\n \n # write to tensorboard\n test_time_spent = time.time() - start\n writer.add_scalars('time', {'validation': test_time_spent}, epoch)\n writer.add_scalars('epoch_time', {'validation': time_epoch_meter.val}, epoch)\n writer.add_scalars('bits_per_dim', {'validation': loss_nll}, epoch)\n writer.add_scalars('xent', {'validation': loss_xent}, epoch)\n writer.add_scalars('loss', {'validation': loss}, epoch)\n writer.add_scalars('error', {'validation': error_score}, epoch)\n \n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)\n writer.add_scalars('rtol_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)\n \n log_message = \"Epoch {:04d} | Time {:.4f}, Epoch Time {:.4f}({:.4f}), Bit/dim {:.4f}(best: {:.4f}), Xent {:.4f}, Loss {:.4f}, Error {:.4f}(best: {:.4f})\".format(epoch, time.time() - start, time_epoch_meter.val, time_epoch_meter.avg, loss_nll, best_loss_nll, loss_xent, loss, error_score, best_error_score)\n logger.info(log_message)\n writer.add_text('info', log_message, epoch)\n \n for name, param in model.named_parameters():\n writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)\n \n \n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"epoch_%i_checkpt.pth\"%epoch))\n \n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"current_checkpt.pth\"))\n \n if loss_nll < best_loss_nll:\n best_loss_nll = loss_nll\n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"best_nll_checkpt.pth\"))\n \n if args.conditional:\n if error_score < best_error_score:\n best_error_score = error_score\n utils.makedirs(args.save)\n torch.save({\n \"args\": args,\n \"state_dict\": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"iter\": itr-1,\n \"error\": error_score,\n \"loss\": loss,\n \"xent\": loss_xent,\n \"bits_per_dim\": loss_nll,\n \"best_bits_per_dim\": best_loss_nll,\n \"best_error_score\": best_error_score,\n \"epoch_time\": time_epoch_meter.val,\n \"epoch_time_avg\": time_epoch_meter.avg,\n \"time\": test_time_spent,\n \"error_train\": error_meter.avg,\n \"loss_train\": loss_meter.avg,\n \"xent_train\": xent_meter.avg,\n \"bits_per_dim_train\": nll_meter.avg,\n \"total_time_train\": tt_meter.avg,\n \"time_train\": time_meter.avg,\n \"nfe_train\": steps_meter.avg,\n \"grad_train\": grad_meter.avg,\n }, os.path.join(args.save, \"best_error_checkpt.pth\"))\n \n\n # visualize samples and density\n with torch.no_grad():\n fig_filename = os.path.join(args.save, \"figs\", \"{:04d}.jpg\".format(epoch))\n utils.makedirs(os.path.dirname(fig_filename))\n generated_samples, atol, rtol, logp_actions, nfe = model(fixed_z, reverse=True)\n generated_samples = generated_samples.view(-1, *data_shape)\n for tol_indx in range(len(atol)):\n writer.add_scalars('atol_gen_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)\n writer.add_scalars('rtol_gen_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)\n save_image(generated_samples, fig_filename, nrow=10)\n if args.data == \"mnist\":\n writer.add_images('generated_images', generated_samples.repeat(1,3,1,1), epoch)\n else:\n writer.add_images('generated_images', generated_samples.repeat(1,1,1,1), epoch)\nNamespace(JFrobint=None, JdiagFrobint=None, JoffdiagFrobint=None, add_noise=True, alpha=1e-06, atol=1e-05, autoencode=False, batch_norm=False, batch_size=900, batch_size_schedule='', begin_epoch=1, condition_ratio=0.5, conditional=True, controlled_tol=False, conv=True, data='cifar10', dims='64,64,64', divergence_fn='approximate', dl2int=None, dropout_rate=0.5, eta=0.1, gamma=0.99, gate='cnn1', imagesize=None, l1int=None, l2int=None, layer_type='concat', log_freq=10, lr=0.001, max_grad_norm=10000000000.0, multiscale=True, nonlinearity='softplus', num_blocks=2, num_epochs=1000, parallel=False, rademacher=True, residual=False, resume=None, rl_weight=0.01, rtol=1e-05, save='../experiments_published/cnf_conditional_disentangle_cifar10_bs900_sratio_0_5_drop_0_5_rl_stdscale_15_run3', scale=1.0, scale_fac=1.0, scale_std=15.0, seed=3, solver='dopri5', spectral_norm=False, spectral_norm_niter=10, step_size=None, strides='1,1,1,1', test_atol=None, test_batch_size=500, test_rtol=None, test_solver=None, time_length=1.0, time_penalty=0, train_T=True, train_mode='semisup', val_freq=1, warmup_iters=1000, weight_decay=0.0, weight_y=0.5)\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4ac04b159000d7e9e9d8c3ac642f080ca3f347eb
| 160,834 |
ipynb
|
Jupyter Notebook
|
m04-perception/lab.ipynb
|
yy/dviz_course
|
5c00da15c6f92f991f13db87a2bd4c6854d55a71
|
[
"MIT"
] | null | null | null |
m04-perception/lab.ipynb
|
yy/dviz_course
|
5c00da15c6f92f991f13db87a2bd4c6854d55a71
|
[
"MIT"
] | null | null | null |
m04-perception/lab.ipynb
|
yy/dviz_course
|
5c00da15c6f92f991f13db87a2bd4c6854d55a71
|
[
"MIT"
] | null | null | null | 119.224611 | 47,948 | 0.822724 |
[
[
[
"# W3 Lab: Perception\n\nIn this lab, we will learn basic usage of `pandas` library and then perform a small experiment to test the perception of length and area. ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Vega datasets \n\nBefore going into the perception experiment, let's first talk about some handy datasets that you can play with. \n\nIt's nice to have clean datasets handy to practice data visualization. There is a nice small package called [`vega-datasets`](https://github.com/altair-viz/vega_datasets), from the [altair project](https://github.com/altair-viz). \n\nYou can install the package by running\n\n $ pip install vega-datasets\n \nor \n\n $ pip3 install vega-datasets\n \nOnce you install the package, you can import and see the list of datasets:",
"_____no_output_____"
]
],
[
[
"from vega_datasets import data\n\ndata.list_datasets()",
"_____no_output_____"
]
],
[
[
"or you can work with only smaller, local datasets. ",
"_____no_output_____"
]
],
[
[
"from vega_datasets import local_data\nlocal_data.list_datasets()",
"_____no_output_____"
]
],
[
[
"Ah, we have the `anscombe` data here! Let's see the description of the dataset. ",
"_____no_output_____"
]
],
[
[
"local_data.anscombe.description",
"_____no_output_____"
]
],
[
[
"## Anscombe's quartet dataset\n\nHow does the actual data look like? Very conveniently, calling the dataset returns a Pandas dataframe for you. ",
"_____no_output_____"
]
],
[
[
"df = local_data.anscombe()\ndf.head()",
"_____no_output_____"
]
],
[
[
"**Q1: can you draw a scatterplot of the dataset \"I\"?** You can filter the dataframe based on the `Series` column and use `plot` function that you used for the Snow's map. ",
"_____no_output_____"
]
],
[
[
"# TODO: put your code here\n",
"_____no_output_____"
]
],
[
[
"## Some histograms with pandas ",
"_____no_output_____"
],
[
"Let's look at a slightly more complicated dataset.",
"_____no_output_____"
]
],
[
[
"car_df = local_data.cars()\ncar_df.head()",
"_____no_output_____"
]
],
[
[
"Pandas provides useful summary functions. It identifies numerical data columns and provides you with a table of summary statistics. ",
"_____no_output_____"
]
],
[
[
"car_df.describe()",
"_____no_output_____"
]
],
[
[
"If you ask to draw a histogram, you get all of them. :)",
"_____no_output_____"
]
],
[
[
"car_df.hist()",
"_____no_output_____"
]
],
[
[
"Well this is too small. You can check out [the documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html) and change the size of the figure. \n\n**Q2: by consulting the documentation, can you make the figure larger so that we can see all the labels clearly? And then make the layout 2 x 3 not 3 x 2, then change the number of bins to 20?**",
"_____no_output_____"
]
],
[
[
"# TODO: put your code here\n",
"_____no_output_____"
]
],
[
[
"## Your own psychophysics experiment!",
"_____no_output_____"
],
[
"Let's do an experiment! The procedure is as follows:\n\n1. Generate a random number between \\[1, 10\\];\n1. Use a horizontal bar to represent the number, i.e., the length of the bar is equal to the number;\n1. Guess the length of the bar by comparing it to two other bars with length 1 and 10 respectively;\n1. Store your guess (perceived length) and actual length to two separate lists;\n1. Repeat the above steps many times;\n1. How does the perception of length differ from that of area?.\n\nFirst, let's define the length of a short and a long bar. We also create two empty lists to store perceived and actual length.",
"_____no_output_____"
]
],
[
[
"import random\nimport time\nimport numpy as np\n\nl_short_bar = 1\nl_long_bar = 10\n\nperceived_length_list = []\nactual_length_list = []",
"_____no_output_____"
]
],
[
[
"### Perception of length\n\nLet's run the experiment.\n\nThe [**`random`**](https://docs.python.org/3.6/library/random.html) module in Python provides various random number generators, and the [**`random.uniform(a,b)`**](https://docs.python.org/3.6/library/random.html#random.uniform) function returns a floating point number in \\[a,b\\]. \n\nWe can plot horizontal bars using the [**`pyplot.barh()`**](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.barh) function. Using this function, we can produce a bar graph that looks like this:",
"_____no_output_____"
]
],
[
[
"mystery_length = random.uniform(1, 10) # generate a number between 1 and 10. this is the *actual* length.\n\nplt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], align='center')\nplt.yticks(np.arange(3), ('1', '?', '10'))\nplt.xticks([]) # no hint!",
"_____no_output_____"
]
],
[
[
"Btw, `np.arange` is used to create a simple integer list `[0, 1, 2]`. ",
"_____no_output_____"
]
],
[
[
"np.arange(3)",
"_____no_output_____"
]
],
[
[
"Now let's define a function to perform the experiment once. When you run this function, it picks a random number between 1.0 and 10.0 and show the bar chart. Then it asks you to input your estimate of the length of the middle bar. It then saves that number to the `perceived_length_list` and the actual answer to the `actual_length_list`. ",
"_____no_output_____"
]
],
[
[
"def run_exp_once():\n mystery_length = random.uniform(1, 10) # generate a number between 1 and 10. \n\n plt.barh(np.arange(3), [l_short_bar, mystery_length, l_long_bar], height=0.5, align='center')\n plt.yticks(np.arange(3), ('1', '?', '10'))\n plt.xticks([]) # no hint!\n plt.show()\n \n try:\n perceived_length_list.append( float(input()) )\n except:\n print(\"This should only fail in workflow. If you are running this in browser, this won't fail.\")\n pass\n actual_length_list.append(mystery_length)",
"_____no_output_____"
],
[
"run_exp_once()",
"_____no_output_____"
]
],
[
[
"Now, run the experiment many times to gather your data. Check the two lists to make sure that you have the proper dataset. The length of the two lists should be the same. ",
"_____no_output_____"
]
],
[
[
"# TODO: Run your experiment many times here\n",
"_____no_output_____"
]
],
[
[
"### Plotting the result\n\nNow we can draw the scatter plot of perceived and actual length. The `matplotlib`'s [**`scatter()`**](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter) function will do this. This is the backend of the pandas' scatterplot. Here is an example of how to use `scatter`:",
"_____no_output_____"
]
],
[
[
"plt.scatter(x=[1,5,10], y=[1,10, 5])",
"_____no_output_____"
]
],
[
[
"**Q3: Now plot your result using the `scatter()` function. You should also use `plt.title()`, `plt.xlabel()`, and `plt.ylabel()` to label your axes and the plot itself.**",
"_____no_output_____"
]
],
[
[
"# TODO: put your code here\n",
"_____no_output_____"
]
],
[
[
"After plotting, let's fit the relation between actual and perceived lengths using a polynomial function. We can easily do it using [**`curve_fit(f, x, y)`**](http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html) in Scipy, which is to fit $x$ and $y$ using the function `f`. In our case, $f = a*x^b +c$. For instance, we can check whether this works by creating a fake dataset that follows the exact form:",
"_____no_output_____"
]
],
[
[
"from scipy.optimize import curve_fit\n\ndef func(x, a, b, c):\n return a * np.power(x, b) + c\n\nx = np.arange(20) # [0,1,2,3, ..., 19]\ny = np.power(x, 2) # [0,1,4,9, ... ]\n\npopt, pcov = curve_fit(func, x, y)\nprint('{:.2f} x^{:.2f} + {:.2f}'.format(*popt))",
"1.00 x^2.00 + 0.00\n"
]
],
[
[
"In order to plot the function to check the relationship between the actual and perceived lenghts, you can use two variables `x` and `y` to plot the relationship where `x` equals to a series of continuous numbers. For example, if your x axis ranges from 1 to 9 then the variable `x` could be equal to `np.linspace(1, 10, 50)`. The variable `y` will contain the equation that you get from `popt`. For example, if you get equation `1.00 x^2.00 + 0.00` then the variable `y` would be equal to `1.0 * x**2.0 + 0`. \n\nAfter assigning `x` and `y` variables you will plot them in combination with the scatter plot of actual and perceived values to check if you get a linear relationship or not.",
"_____no_output_____"
],
[
"**Q4: Now fit your data!** Do you see roughly linear relationship between the actual and the perceived lengths? It's ok if you don't!",
"_____no_output_____"
]
],
[
[
"# TODO: your code here\n",
"_____no_output_____"
]
],
[
[
"### Perception of area\n\nSimilar to the above experiment, we now represent a random number as a circle, and the area of the circle is equal to the number.\n\nFirst, calculate the radius of a circle from its area and then plot using the **`Circle()`** function. `plt.Circle((0,0), r)` will plot a circle centered at (0,0) with radius `r`.",
"_____no_output_____"
]
],
[
[
"n1 = 0.005\nn2 = 0.05\n\nradius1 = np.sqrt(n1/np.pi) # area = pi * r * r\nradius2 = np.sqrt(n2/np.pi)\nrandom_radius = np.sqrt(n1*random.uniform(1,10)/np.pi)\n\nplt.axis('equal')\nplt.axis('off')\ncirc1 = plt.Circle( (0,0), radius1, clip_on=False )\ncirc2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )\nrand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )\n\nplt.gca().add_artist(circ1)\nplt.gca().add_artist(circ2)\nplt.gca().add_artist(rand_circ)",
"_____no_output_____"
]
],
[
[
"Let's have two lists for this experiment. ",
"_____no_output_____"
]
],
[
[
"perceived_area_list = []\nactual_area_list = []",
"_____no_output_____"
]
],
[
[
"And define a function for the experiment. ",
"_____no_output_____"
]
],
[
[
"def run_area_exp_once(n1=0.005, n2=0.05): \n radius1 = np.sqrt(n1/np.pi) # area = pi * r * r\n radius2 = np.sqrt(n2/np.pi)\n \n mystery_number = random.uniform(1,10)\n random_radius = np.sqrt(n1*mystery_number/math.pi)\n\n plt.axis('equal')\n plt.axis('off')\n circ1 = plt.Circle( (0,0), radius1, clip_on=False )\n circ2 = plt.Circle( (4*radius2,0), radius2, clip_on=False )\n rand_circ = plt.Circle((2*radius2,0), random_radius, clip_on=False )\n plt.gca().add_artist(circ1)\n plt.gca().add_artist(circ2)\n plt.gca().add_artist(rand_circ) \n plt.show()\n \n perceived_area_list.append( float(input()) )\n actual_area_list.append(mystery_number)",
"_____no_output_____"
]
],
[
[
"**Q5: Now you can run the experiment many times, plot the result, and fit a power-law curve!** ",
"_____no_output_____"
]
],
[
[
"# TODO: put your code here. You can use multiple cells. ",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"What is your result? How are the exponents different from each other? ",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac05a070371b437eae3b9698a89e3cbee69a056
| 48,949 |
ipynb
|
Jupyter Notebook
|
tensorflow/models/samples/core/get_started/eager.ipynb
|
Sioxas/python
|
2ac94bd31f39219f4a8698286dfe8453a8a67947
|
[
"MIT"
] | 1 |
2018-06-20T09:07:07.000Z
|
2018-06-20T09:07:07.000Z
|
tensorflow/models/samples/core/get_started/eager.ipynb
|
Sioxas/python
|
2ac94bd31f39219f4a8698286dfe8453a8a67947
|
[
"MIT"
] | null | null | null |
tensorflow/models/samples/core/get_started/eager.ipynb
|
Sioxas/python
|
2ac94bd31f39219f4a8698286dfe8453a8a67947
|
[
"MIT"
] | null | null | null | 42.825022 | 1,053 | 0.579399 |
[
[
[
"##### Copyright 2018 The TensorFlow Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Custom Training Walkthrough\n\n\n<table align=\"left\"><td>\n<a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/samples/core/get_started/eager.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n</td><td>\n<a target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/samples/core/get_started/eager.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on Github</a></td></table>\n\n",
"_____no_output_____"
],
[
"This guide uses machine learning to *categorize* Iris flowers by species. It uses [TensorFlow](https://www.tensorflow.org)'s eager execution to:\n1. Build a model,\n2. Train this model on example data, and\n3. Use the model to make predictions about unknown data.\n\nMachine learning experience isn't required, but you'll need to read some Python code. For more eager execution guides and examples, see [these notebooks](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/notebooks).\n\n## TensorFlow programming\n\nThere are many [TensorFlow APIs](https://www.tensorflow.org/api_docs/python/) available, but start with these high-level TensorFlow concepts:\n\n* Enable an [eager execution](https://www.tensorflow.org/programmers_guide/eager) development environment,\n* Import data with the [Datasets API](https://www.tensorflow.org/programmers_guide/datasets),\n* Build models and layers with TensorFlow's [Keras API](https://keras.io/getting-started/sequential-model-guide/).\n\nThis tutorial is structured like many TensorFlow programs:\n\n1. Import and parse the data sets.\n2. Select the type of model.\n3. Train the model.\n4. Evaluate the model's effectiveness.\n5. Use the trained model to make predictions.\n\nFor more TensorFlow examples, see the [Get Started](https://www.tensorflow.org/get_started/) and [Tutorials](https://www.tensorflow.org/tutorials/) sections. To learn machine learning basics, consider taking the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/).\n\n## Run the notebook\n\nThis tutorial is available as an interactive [Colab notebook](https://colab.research.google.com) that can execute and modify Python code directly in the browser. The notebook handles setup and dependencies while you \"play\" cells to run the code blocks. This is a fun way to explore the program and test ideas.\n\nIf you are unfamiliar with Python notebook environments, there are a couple of things to keep in mind:\n\n1. Executing code requires connecting to a runtime environment. In the Colab notebook menu, select *Runtime > Connect to runtime...*\n2. Notebook cells are arranged sequentially to gradually build the program. Typically, later code cells depend on prior code cells, though you can always rerun a code block. To execute the entire notebook in order, select *Runtime > Run all*. To rerun a code cell, select the cell and click the *play icon* on the left.",
"_____no_output_____"
],
[
"## Setup program",
"_____no_output_____"
],
[
"### Install the latest version of TensorFlow\n\nThis tutorial uses eager execution, which is available in [TensorFlow 1.8](https://www.tensorflow.org/install/). (You may need to restart the runtime after upgrading.)",
"_____no_output_____"
]
],
[
[
"!pip install --upgrade tensorflow",
"_____no_output_____"
]
],
[
[
"### Configure imports and eager execution\n\nImport the required Python modules—including TensorFlow—and enable eager execution for this program. Eager execution makes TensorFlow evaluate operations immediately, returning concrete values instead of creating a [computational graph](https://www.tensorflow.org/programmers_guide/graphs) that is executed later. If you are used to a REPL or the `python` interactive console, this feels familiar.\n\nOnce eager execution is enabled, it *cannot* be disabled within the same program. See the [eager execution guide](https://www.tensorflow.org/programmers_guide/eager) for more details.",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\n\ntf.enable_eager_execution()\n\nprint(\"TensorFlow version: {}\".format(tf.VERSION))\nprint(\"Eager execution: {}\".format(tf.executing_eagerly()))",
"_____no_output_____"
]
],
[
[
"## The Iris classification problem\n\nImagine you are a botanist seeking an automated way to categorize each Iris flower you find. Machine learning provides many algorithms to statistically classify flowers. For instance, a sophisticated machine learning program could classify flowers based on photographs. Our ambitions are more modest—we're going to classify Iris flowers based on the length and width measurements of their [sepals](https://en.wikipedia.org/wiki/Sepal) and [petals](https://en.wikipedia.org/wiki/Petal).\n\nThe Iris genus entails about 300 species, but our program will only classify the following three:\n\n* Iris setosa\n* Iris virginica\n* Iris versicolor\n\n<table>\n <tr><td>\n <img src=\"https://www.tensorflow.org/images/iris_three_species.jpg\"\n alt=\"Petal geometry compared for three iris species: Iris setosa, Iris virginica, and Iris versicolor\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 1.</b> <a href=\"https://commons.wikimedia.org/w/index.php?curid=170298\">Iris setosa</a> (by <a href=\"https://commons.wikimedia.org/wiki/User:Radomil\">Radomil</a>, CC BY-SA 3.0), <a href=\"https://commons.wikimedia.org/w/index.php?curid=248095\">Iris versicolor</a>, (by <a href=\"https://commons.wikimedia.org/wiki/User:Dlanglois\">Dlanglois</a>, CC BY-SA 3.0), and <a href=\"https://www.flickr.com/photos/33397993@N05/3352169862\">Iris virginica</a> (by <a href=\"https://www.flickr.com/photos/33397993@N05\">Frank Mayfield</a>, CC BY-SA 2.0).<br/> \n </td></tr>\n</table>\n\nFortunately, someone has already created a [data set of 120 Iris flowers](https://en.wikipedia.org/wiki/Iris_flower_data_set) with the sepal and petal measurements. This is a classic dataset that is popular for beginner machine learning classification problems.",
"_____no_output_____"
],
[
"## Import and parse the training dataset\n\nDownload the dataset file and convert it to a structure that can be used by this Python program.\n\n### Download the dataset\n\nDownload the training dataset file using the [tf.keras.utils.get_file](https://www.tensorflow.org/api_docs/python/tf/keras/utils/get_file) function. This returns the file path of the downloaded file.",
"_____no_output_____"
]
],
[
[
"train_dataset_url = \"http://download.tensorflow.org/data/iris_training.csv\"\n\ntrain_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),\n origin=train_dataset_url)\n\nprint(\"Local copy of the dataset file: {}\".format(train_dataset_fp))",
"_____no_output_____"
]
],
[
[
"### Inspect the data\n\nThis dataset, `iris_training.csv`, is a plain text file that stores tabular data formatted as comma-separated values (CSV). Use the `head -n5` command to take a peak at the first five entries:",
"_____no_output_____"
]
],
[
[
"!head -n5 {train_dataset_fp}",
"_____no_output_____"
]
],
[
[
"From this view of the dataset, notice the following:\n\n1. The first line is a header containing information about the dataset:\n * There are 120 total examples. Each example has four features and one of three possible label names. \n2. Subsequent rows are data records, one *[example](https://developers.google.com/machine-learning/glossary/#example)* per line, where:\n * The first four fields are *[features](https://developers.google.com/machine-learning/glossary/#feature)*: these are characteristics of an example. Here, the fields hold float numbers representing flower measurements.\n * The last column is the *[label](https://developers.google.com/machine-learning/glossary/#label)*: this is the value we want to predict. For this dataset, it's an integer value of 0, 1, or 2 that corresponds to a flower name.\n\nLet's write that out in code:",
"_____no_output_____"
]
],
[
[
"# column order in CSV file\ncolumn_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']\n\nfeature_names = column_names[:-1]\nlabel_name = column_names[-1]\n\nprint(\"Features: {}\".format(feature_names))\nprint(\"Label: {}\".format(label_name))",
"_____no_output_____"
]
],
[
[
"Each label is associated with string name (for example, \"setosa\"), but machine learning typically relies on numeric values. The label numbers are mapped to a named representation, such as:\n\n* `0`: Iris setosa\n* `1`: Iris versicolor\n* `2`: Iris virginica\n\nFor more information about features and labels, see the [ML Terminology section of the Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/framing/ml-terminology).",
"_____no_output_____"
]
],
[
[
"class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']",
"_____no_output_____"
]
],
[
[
"### Create a `tf.data.Dataset`\n\nTensorFlow's [Dataset API](https://www.tensorflow.org/programmers_guide/datasets) handles many common cases for loading data into a model. This is a high-level API for reading data and transforming it into a form used for training. See the [Datasets Quick Start guide](https://www.tensorflow.org/get_started/datasets_quickstart) for more information.\n\n\nSince the dataset is a CSV-formatted text file, use the the [make_csv_dataset](https://www.tensorflow.org/api_docs/python/tf/contrib/data/make_csv_dataset) function to parse the data into a suitable format. Since this function generates data for training models, the default behavior is to shuffle the data (`shuffle=True, shuffle_buffer_size=10000`), and repeat the dataset forever (`num_epochs=None`). We also set the [batch_size](https://developers.google.com/machine-learning/glossary/#batch_size) parameter.",
"_____no_output_____"
]
],
[
[
"batch_size = 32\n\ntrain_dataset = tf.contrib.data.make_csv_dataset(\n train_dataset_fp,\n batch_size, \n column_names=column_names,\n label_name=label_name,\n num_epochs=1)",
"_____no_output_____"
]
],
[
[
"The `make_csv_dataset` function returns a `tf.data.Dataset` of `(features, label)` pairs, where `features` is a dictionary: `{'feature_name': value}`\n\nWith eager execution enabled, these `Dataset` objects are iterable. Let's look at a batch of features:",
"_____no_output_____"
]
],
[
[
"features, labels = next(iter(train_dataset))\n\nfeatures",
"_____no_output_____"
]
],
[
[
"Notice that like-features are grouped together, or *batched*. Each example row's fields are appended to the corresponding feature array. Change the `batch_size` to set the number of examples stored in these feature arrays.\n\nYou can start to see some clusters by plotting a few features from the batch:",
"_____no_output_____"
]
],
[
[
"plt.scatter(features['petal_length'],\n features['sepal_length'],\n c=labels,\n cmap='viridis')\n\nplt.xlabel(\"Petal length\")\nplt.ylabel(\"Sepal length\");",
"_____no_output_____"
]
],
[
[
"To simplify the model building step, create a function to repackage the features dictionary into a single array with shape: `(batch_size, num_features)`.\n\nThis function uses the [tf.stack](https://www.tensorflow.org/api_docs/python/tf/stack) method which takes values from a list of tensors and creates a combined tensor at the specified dimension.",
"_____no_output_____"
]
],
[
[
"def pack_features_vector(features, labels):\n \"\"\"Pack the features into a single array.\"\"\"\n features = tf.stack(list(features.values()), axis=1)\n return features, labels",
"_____no_output_____"
]
],
[
[
"Then use the [tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/dataset/map) method to pack the `features` of each `(features,label)` pair into the training dataset:",
"_____no_output_____"
]
],
[
[
"train_dataset = train_dataset.map(pack_features_vector)",
"_____no_output_____"
]
],
[
[
"The features element of the `Dataset` are now arrays with shape `(batch_size, num_features)`. Let's look at the first few examples:",
"_____no_output_____"
]
],
[
[
"features, labels = next(iter(train_dataset))\n\nprint(features[:5])",
"_____no_output_____"
]
],
[
[
"## Select the type of model\n\n### Why model?\n\nA *[model](https://developers.google.com/machine-learning/crash-course/glossary#model)* is the relationship between features and the label. For the Iris classification problem, the model defines the relationship between the sepal and petal measurements and the predicted Iris species. Some simple models can be described with a few lines of algebra, but complex machine learning models have a large number of parameters that are difficult to summarize.\n\nCould you determine the relationship between the four features and the Iris species *without* using machine learning? That is, could you use traditional programming techniques (for example, a lot of conditional statements) to create a model? Perhaps—if you analyzed the dataset long enough to determine the relationships between petal and sepal measurements to a particular species. And this becomes difficult—maybe impossible—on more complicated datasets. A good machine learning approach *determines the model for you*. If you feed enough representative examples into the right machine learning model type, the program will figure out the relationships for you.\n\n### Select the model\n\nWe need to select the kind of model to train. There are many types of models and picking a good one takes experience. This tutorial uses a neural network to solve the Iris classification problem. *[Neural networks](https://developers.google.com/machine-learning/glossary/#neural_network)* can find complex relationships between features and the label. It is a highly-structured graph, organized into one or more *[hidden layers](https://developers.google.com/machine-learning/glossary/#hidden_layer)*. Each hidden layer consists of one or more *[neurons](https://developers.google.com/machine-learning/glossary/#neuron)*. There are several categories of neural networks and this program uses a dense, or *[fully-connected neural network](https://developers.google.com/machine-learning/glossary/#fully_connected_layer)*: the neurons in one layer receive input connections from *every* neuron in the previous layer. For example, Figure 2 illustrates a dense neural network consisting of an input layer, two hidden layers, and an output layer:\n\n<table>\n <tr><td>\n <img src=\"https://www.tensorflow.org/images/custom_estimators/full_network.png\"\n alt=\"A diagram of the network architecture: Inputs, 2 hidden layers, and outputs\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 2.</b> A neural network with features, hidden layers, and predictions.<br/> \n </td></tr>\n</table>\n\nWhen the model from Figure 2 is trained and fed an unlabeled example, it yields three predictions: the likelihood that this flower is the given Iris species. This prediction is called *[inference](https://developers.google.com/machine-learning/crash-course/glossary#inference)*. For this example, the sum of the output predictions is 1.0. In Figure 2, this prediction breaks down as: `0.03` for *Iris setosa*, `0.95` for *Iris versicolor*, and `0.02` for *Iris virginica*. This means that the model predicts—with 95% probability—that an unlabeled example flower is an *Iris versicolor*.",
"_____no_output_____"
],
[
"### Create a model using Keras\n\nThe TensorFlow [tf.keras](https://www.tensorflow.org/api_docs/python/tf/keras) API is the preferred way to create models and layers. This makes it easy to build models and experiment while Keras handles the complexity of connecting everything together.\n\nThe [tf.keras.Sequential](https://www.tensorflow.org/api_docs/python/tf/keras/Sequential) model is a linear stack of layers. Its constructor takes a list of layer instances, in this case, two [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) layers with 10 nodes each, and an output layer with 3 nodes representing our label predictions. The first layer's `input_shape` parameter corresponds to the number of features from the dataset, and is required.",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)), # input shape required\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(3)\n])",
"_____no_output_____"
]
],
[
[
"The *[activation function](https://developers.google.com/machine-learning/crash-course/glossary#activation_function)* determines the output shape of each node in the layer. These non-linearities are important—without them the model would be equivalent to a single layer. There are many [available activations](https://www.tensorflow.org/api_docs/python/tf/keras/activations), but [ReLU](https://developers.google.com/machine-learning/crash-course/glossary#ReLU) is common for hidden layers.\n\nThe ideal number of hidden layers and neurons depends on the problem and the dataset. Like many aspects of machine learning, picking the best shape of the neural network requires a mixture of knowledge and experimentation. As a rule of thumb, increasing the number of hidden layers and neurons typically creates a more powerful model, which requires more data to train effectively.",
"_____no_output_____"
],
[
"### Using the model\n\nLet's have a quick look at what this model does to a batch of features:",
"_____no_output_____"
]
],
[
[
"predictions = model(features)\npredictions[:5]",
"_____no_output_____"
]
],
[
[
"Here, each example returns a [logit](https://developers.google.com/machine-learning/crash-course/glossary#logit) for each class. \n\nTo convert these logits to a probability for each class, use the [softmax](https://developers.google.com/machine-learning/crash-course/glossary#softmax) function:",
"_____no_output_____"
]
],
[
[
"tf.nn.softmax(predictions[:5])",
"_____no_output_____"
]
],
[
[
"Taking the `tf.argmax` across classes gives us the predicted class index. But, the model hasn't been trained yet, so these aren't good predictions.",
"_____no_output_____"
]
],
[
[
"print(\"Prediction: {}\".format(tf.argmax(predictions, axis=1)))\nprint(\" Labels: {}\".format(labels))",
"_____no_output_____"
]
],
[
[
"## Train the model\n\n*[Training](https://developers.google.com/machine-learning/crash-course/glossary#training)* is the stage of machine learning when the model is gradually optimized, or the model *learns* the dataset. The goal is to learn enough about the structure of the training dataset to make predictions about unseen data. If you learn *too much* about the training dataset, then the predictions only work for the data it has seen and will not be generalizable. This problem is called *[overfitting](https://developers.google.com/machine-learning/crash-course/glossary#overfitting)*—it's like memorizing the answers instead of understanding how to solve a problem.\n\nThe Iris classification problem is an example of *[supervised machine learning](https://developers.google.com/machine-learning/glossary/#supervised_machine_learning)*: the model is trained from examples that contain labels. In *[unsupervised machine learning](https://developers.google.com/machine-learning/glossary/#unsupervised_machine_learning)*, the examples don't contain labels. Instead, the model typically finds patterns among the features.",
"_____no_output_____"
],
[
"### Define the loss and gradient function\n\nBoth training and evaluation stages need to calculate the model's *[loss](https://developers.google.com/machine-learning/crash-course/glossary#loss)*. This measures how off a model's predictions are from the desired label, in other words, how bad the model is performing. We want to minimize, or optimize, this value.\n\nOur model will calculate its loss using the [tf.keras.losses.categorical_crossentropy](https://www.tensorflow.org/api_docs/python/tf/losses/sparse_softmax_cross_entropy) function which takes the model's class probability predictions and the desired label, and returns the average loss across the examples.",
"_____no_output_____"
]
],
[
[
"def loss(model, x, y):\n y_ = model(x)\n return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)\n\n\nl = loss(model, features, labels)\nprint(\"Loss test: {}\".format(l))",
"_____no_output_____"
]
],
[
[
"Use the [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) context to calculate the *[gradients](https://developers.google.com/machine-learning/crash-course/glossary#gradient)* used to optimize our model. For more examples of this, see the [eager execution guide](https://www.tensorflow.org/programmers_guide/eager).",
"_____no_output_____"
]
],
[
[
"def grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets)\n return loss_value, tape.gradient(loss_value, model.trainable_variables)",
"_____no_output_____"
]
],
[
[
"### Create an optimizer\n\nAn *[optimizer](https://developers.google.com/machine-learning/crash-course/glossary#optimizer)* applies the computed gradients to the model's variables to minimize the `loss` function. You can think of the loss function as a curved surface (see Figure 3) and we want to find its lowest point by walking around. The gradients point in the direction of steepest ascent—so we'll travel the opposite way and move down the hill. By iteratively calculating the loss and gradient for each batch, we'll adjust the model during training. Gradually, the model will find the best combination of weights and bias to minimize loss. And the lower the loss, the better the model's predictions.\n\n<table>\n <tr><td>\n <img src=\"https://cs231n.github.io/assets/nn3/opt1.gif\" width=\"70%\"\n alt=\"Optimization algorthims visualized over time in 3D space.\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 3.</b> Optimization algorithms visualized over time in 3D space. (Source: <a href=\"http://cs231n.github.io/neural-networks-3/\">Stanford class CS231n</a>, MIT License)<br/> \n </td></tr>\n</table>\n\nTensorFlow has many [optimization algorithms](https://www.tensorflow.org/api_guides/python/train) available for training. This model uses the [tf.train.GradientDescentOptimizer](https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer) that implements the *[stochastic gradient descent](https://developers.google.com/machine-learning/crash-course/glossary#gradient_descent)* (SGD) algorithm. The `learning_rate` sets the step size to take for each iteration down the hill. This is a *hyperparameter* that you'll commonly adjust to achieve better results.",
"_____no_output_____"
],
[
"Let's setup the optimizer and the `global_step` counter:",
"_____no_output_____"
]
],
[
[
"optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n\nglobal_step = tf.train.get_or_create_global_step()",
"_____no_output_____"
]
],
[
[
"We'll use this to calculate a single optimization step:",
"_____no_output_____"
]
],
[
[
"loss_value, grads = grad(model, features, labels)\n\nprint(\"Step: {}, Initial Loss: {}\".format(global_step.numpy(),\n loss_value.numpy()))\n\noptimizer.apply_gradients(zip(grads, model.variables), global_step)\n\nprint(\"Step: {}, Loss: {}\".format(global_step.numpy(),\n loss(model, features, labels).numpy()))",
"_____no_output_____"
]
],
[
[
"### Training loop\n\nWith all the pieces in place, the model is ready for training! A training loop feeds the dataset examples into the model to help it make better predictions. The following code block sets up these training steps:\n\n1. Iterate each *epoch*. An epoch is one pass through the dataset.\n2. Within an epoch, iterate over each example in the training `Dataset` grabbing its *features* (`x`) and *label* (`y`).\n3. Using the example's features, make a prediction and compare it with the label. Measure the inaccuracy of the prediction and use that to calculate the model's loss and gradients.\n4. Use an `optimizer` to update the model's variables.\n5. Keep track of some stats for visualization.\n6. Repeat for each epoch.\n\nThe `num_epochs` variable is the amount of times to loop over the dataset collection. Counter-intuitively, training a model longer does not guarantee a better model. `num_epochs` is a *[hyperparameter](https://developers.google.com/machine-learning/glossary/#hyperparameter)* that you can tune. Choosing the right number usually requires both experience and experimentation.",
"_____no_output_____"
]
],
[
[
"## Note: Rerunning this cell uses the same model variables\n\n# keep results for plotting\ntrain_loss_results = []\ntrain_accuracy_results = []\n\nnum_epochs = 201\n\nfor epoch in range(num_epochs):\n epoch_loss_avg = tfe.metrics.Mean()\n epoch_accuracy = tfe.metrics.Accuracy()\n\n # Training loop - using batches of 32\n for x, y in train_dataset:\n # Optimize the model\n loss_value, grads = grad(model, x, y)\n optimizer.apply_gradients(zip(grads, model.variables),\n global_step)\n\n # Track progress\n epoch_loss_avg(loss_value) # add current batch loss\n # compare predicted label to actual label\n epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)\n\n # end epoch\n train_loss_results.append(epoch_loss_avg.result())\n train_accuracy_results.append(epoch_accuracy.result())\n \n if epoch % 50 == 0:\n print(\"Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}\".format(epoch,\n epoch_loss_avg.result(),\n epoch_accuracy.result()))",
"_____no_output_____"
]
],
[
[
"### Visualize the loss function over time",
"_____no_output_____"
],
[
"While it's helpful to print out the model's training progress, it's often *more* helpful to see this progress. [TensorBoard](https://www.tensorflow.org/programmers_guide/summaries_and_tensorboard) is a nice visualization tool that is packaged with TensorFlow, but we can create basic charts using the `matplotlib` module.\n\nInterpreting these charts takes some experience, but you really want to see the *loss* go down and the *accuracy* go up.",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))\nfig.suptitle('Training Metrics')\n\naxes[0].set_ylabel(\"Loss\", fontsize=14)\naxes[0].plot(train_loss_results)\n\naxes[1].set_ylabel(\"Accuracy\", fontsize=14)\naxes[1].set_xlabel(\"Epoch\", fontsize=14)\naxes[1].plot(train_accuracy_results);",
"_____no_output_____"
]
],
[
[
"## Evaluate the model's effectiveness\n\nNow that the model is trained, we can get some statistics on its performance.\n\n*Evaluating* means determining how effectively the model makes predictions. To determine the model's effectiveness at Iris classification, pass some sepal and petal measurements to the model and ask the model to predict what Iris species they represent. Then compare the model's prediction against the actual label. For example, a model that picked the correct species on half the input examples has an *[accuracy](https://developers.google.com/machine-learning/glossary/#accuracy)* of `0.5`. Figure 4 shows a slightly more effective model, getting 4 out of 5 predictions correct at 80% accuracy:\n\n<table cellpadding=\"8\" border=\"0\">\n <colgroup>\n <col span=\"4\" >\n <col span=\"1\" bgcolor=\"lightblue\">\n <col span=\"1\" bgcolor=\"lightgreen\">\n </colgroup>\n <tr bgcolor=\"lightgray\">\n <th colspan=\"4\">Example features</th>\n <th colspan=\"1\">Label</th>\n <th colspan=\"1\" >Model prediction</th>\n </tr>\n <tr>\n <td>5.9</td><td>3.0</td><td>4.3</td><td>1.5</td><td align=\"center\">1</td><td align=\"center\">1</td>\n </tr>\n <tr>\n <td>6.9</td><td>3.1</td><td>5.4</td><td>2.1</td><td align=\"center\">2</td><td align=\"center\">2</td>\n </tr>\n <tr>\n <td>5.1</td><td>3.3</td><td>1.7</td><td>0.5</td><td align=\"center\">0</td><td align=\"center\">0</td>\n </tr>\n <tr>\n <td>6.0</td> <td>3.4</td> <td>4.5</td> <td>1.6</td> <td align=\"center\">1</td><td align=\"center\" bgcolor=\"red\">2</td>\n </tr>\n <tr>\n <td>5.5</td><td>2.5</td><td>4.0</td><td>1.3</td><td align=\"center\">1</td><td align=\"center\">1</td>\n </tr>\n <tr><td align=\"center\" colspan=\"6\">\n <b>Figure 4.</b> An Iris classifier that is 80% accurate.<br/> \n </td></tr>\n</table>",
"_____no_output_____"
],
[
"### Setup the test dataset\n\nEvaluating the model is similar to training the model. The biggest difference is the examples come from a separate *[test set](https://developers.google.com/machine-learning/crash-course/glossary#test_set)* rather than the training set. To fairly assess a model's effectiveness, the examples used to evaluate a model must be different from the examples used to train the model.\n\nThe setup for the test `Dataset` is similar to the setup for training `Dataset`. Download the CSV text file and parse that values, then give it a little shuffle:",
"_____no_output_____"
]
],
[
[
"test_url = \"http://download.tensorflow.org/data/iris_test.csv\"\n\ntest_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url),\n origin=test_url)",
"_____no_output_____"
],
[
"test_dataset = tf.contrib.data.make_csv_dataset(\n train_dataset_fp,\n batch_size, \n column_names=column_names,\n label_name='species',\n num_epochs=1,\n shuffle=False)\n\ntest_dataset = test_dataset.map(pack_features_vector)",
"_____no_output_____"
]
],
[
[
"### Evaluate the model on the test dataset\n\nUnlike the training stage, the model only evaluates a single [epoch](https://developers.google.com/machine-learning/glossary/#epoch) of the test data. In the following code cell, we iterate over each example in the test set and compare the model's prediction against the actual label. This is used to measure the model's accuracy across the entire test set.",
"_____no_output_____"
]
],
[
[
"test_accuracy = tfe.metrics.Accuracy()\n\nfor (x, y) in test_dataset:\n logits = model(x)\n prediction = tf.argmax(logits, axis=1, output_type=tf.int32)\n test_accuracy(prediction, y)\n\nprint(\"Test set accuracy: {:.3%}\".format(test_accuracy.result()))",
"_____no_output_____"
]
],
[
[
"We can see on the last batch, for example, the model is usually correct:",
"_____no_output_____"
]
],
[
[
"tf.stack([y,prediction],axis=1)",
"_____no_output_____"
]
],
[
[
"## Use the trained model to make predictions\n\nWe've trained a model and \"proven\" that it's good—but not perfect—at classifying Iris species. Now let's use the trained model to make some predictions on [unlabeled examples](https://developers.google.com/machine-learning/glossary/#unlabeled_example); that is, on examples that contain features but not a label.\n\nIn real-life, the unlabeled examples could come from lots of different sources including apps, CSV files, and data feeds. For now, we're going to manually provide three unlabeled examples to predict their labels. Recall, the label numbers are mapped to a named representation as:\n\n* `0`: Iris setosa\n* `1`: Iris versicolor\n* `2`: Iris virginica",
"_____no_output_____"
]
],
[
[
"predict_dataset = tf.convert_to_tensor([\n [5.1, 3.3, 1.7, 0.5,],\n [5.9, 3.0, 4.2, 1.5,],\n [6.9, 3.1, 5.4, 2.1]\n])\n\npredictions = model(predict_dataset)\n\nfor i, logits in enumerate(predictions):\n class_idx = tf.argmax(logits).numpy()\n p = tf.nn.softmax(logits)[class_idx]\n name = class_names[class_idx]\n print(\"Example {} prediction: {} ({:4.1f}%)\".format(i, name, 100*p))",
"_____no_output_____"
]
],
[
[
"These predictions look good!\n\nTo dig deeper into machine learning models, take a look at the TensorFlow [Programmer's Guide](https://www.tensorflow.org/programmers_guide/) and check out the [community](https://www.tensorflow.org/community/).",
"_____no_output_____"
],
[
"## Next steps\n\nFor more eager execution guides and examples, see [these notebooks](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/notebooks).",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4ac05d71377c246db669093c4155d101b477ae4b
| 5,316 |
ipynb
|
Jupyter Notebook
|
naics.ipynb
|
MAPC/lrrp-business-search
|
a8f278fd786d013eb144c4f575c9bc6decd17357
|
[
"MIT"
] | null | null | null |
naics.ipynb
|
MAPC/lrrp-business-search
|
a8f278fd786d013eb144c4f575c9bc6decd17357
|
[
"MIT"
] | null | null | null |
naics.ipynb
|
MAPC/lrrp-business-search
|
a8f278fd786d013eb144c4f575c9bc6decd17357
|
[
"MIT"
] | null | null | null | 31.455621 | 114 | 0.590482 |
[
[
[
"import pandas as pd \nimport numpy as np",
"_____no_output_____"
],
[
"name = 'Revere' #name of the town\ntown = pd.read_csv(name+'-google.csv') #file name, in this case they all followed format \"town-google.csv\"",
"_____no_output_____"
],
[
"#strip all white space and split the types into a list for easier searching\ntown['type_list'] = town['types'].str.replace(' ','').str.split(',')",
"_____no_output_____"
],
[
"#code_dict maps tags to 4 digit NAICS codes\n#sometimes >4 digits if the more specigic naics code is trivially easy to find\n#these are basically all assgined by hand\ncode_dict = {}\ncode_dict['bar'] = 7224\ncode_dict['liquor_store'] = 4248\ncode_dict['grocery_or_supermarket'] = 4244\ncode_dict['secondary_school'] = 6111\ncode_dict['school'] = 6111\ncode_dict['lodging'] = 7211\ncode_dict['car_dealer'] = 4411\ncode_dict['bakery'] = 4452\ncode_dict['car_repair'] = 8111\ncode_dict['jewelry_store'] = 4239\ncode_dict['bank'] = 5221\ncode_dict['department_store'] = 4521\ncode_dict['gym'] = 7139\ncode_dict['dentist'] = 6212\ncode_dict['hardware_store'] = 4237\ncode_dict['furniture_store'] = 4232\ncode_dict['pharmacy'] = 4461\ncode_dict['drugstore'] = 4461\ncode_dict['clothing_store'] = 4481\ncode_dict['pet_store'] = 4539\ncode_dict['electronics_store'] = 4431\ncode_dict['local_government_office'] = 9211\ncode_dict['city_hall'] = 9211\ncode_dict['place_of_worship'] = 8131\ncode_dict['electrician'] = 2382\ncode_dict['restaurant'] = 7225\ncode_dict['convenience_store'] = 44512\ncode_dict['shoe_store'] = 4482\ncode_dict['hair_care'] = 81211\ncode_dict['doctor'] = 6211\ncode_dict['insurance_agency'] = 5242\ncode_dict['lawyer'] = 5411\ncode_dict['veterinary_care'] = 54194\ncode_dict['book_store'] = 451211\ncode_dict['university'] = 6113\ncode_dict['funeral_home'] = 8122\ncode_dict['post_office'] = 4911\ncode_dict['library'] = 51912\ncode_dict['roofing_contractor'] = 2381\ncode_dict['storage'] = 4931\ncode_dict['atm'] = 5221 #used for credit union\ncode_dict['movie_theater'] = 5121\ncode_dict['florist'] = 4531\ncode_dict['beauty_salon'] = 8121\ncode_dict['spa'] = 8121\ncode_dict['real_estate_agency'] = 5312\ncode_dict['home_goods_store'] = 4422\ncode_dict['movie_rental'] = 5322\ncode_dict['hospital'] = 6221\ncode_dict['moving_company'] = 4842\ncode_dict['police'] = 9221",
"_____no_output_____"
],
[
"#iterate through, assigning NAICS codes based on the dictionary above\n#the remaining set will keep track of tags for any business that still does not yet have a code asssigned\n#use the remaining set to add more keys to the dictionary\nremaining = set()\nfor row in range(len(town)):\n types = town['type_list'][row]\n for elem in types:\n if elem in code_dict:\n town.at[row, 'naics'] = code_dict[elem]\n break\n else:\n #if the loop finishes, then we didn't find any match\n remaining |= set(town['type_list'][row]) \nprint(remaining) #show what tags are still remaining",
"_____no_output_____"
],
[
"#show all of the rows that were not assigned a NAICS code\n#these typically will be tags like \"point_of_interest\" or \"establishment\"\ntown.loc[town['naics'] != town['naics']]",
"_____no_output_____"
],
[
"#delete the extra column\ndel town['type_list']\n\n#save the file, name formatting can be changed to liking\ntown.to_csv(name+'-naics.csv', index = False)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac061839e262d7fed95c9ab00c5b3b7e74363da
| 514,470 |
ipynb
|
Jupyter Notebook
|
PyCon_US.ipynb
|
cecivieira/cotas-genero-e-proposicoes-legislativas
|
925127a80633ac846d709f54cb1ccb4063c1d9d5
|
[
"MIT"
] | 6 |
2021-11-08T18:51:53.000Z
|
2021-11-10T17:09:07.000Z
|
PyCon_US.ipynb
|
cecivieira/cotas-genero-e-proposicoes-legislativas
|
925127a80633ac846d709f54cb1ccb4063c1d9d5
|
[
"MIT"
] | null | null | null |
PyCon_US.ipynb
|
cecivieira/cotas-genero-e-proposicoes-legislativas
|
925127a80633ac846d709f54cb1ccb4063c1d9d5
|
[
"MIT"
] | null | null | null | 76.79803 | 54,784 | 0.766223 |
[
[
[
"# Pre-procesamiento de datos",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"## Candidaturas elegidas\n\nPrincipales transformaciones: \n- Selección de atributos\n- Tratamiento de valores faltantes",
"_____no_output_____"
]
],
[
[
"import glob\nimport nltk\nimport re\n\nimport pandas as pd\n\nfrom string import punctuation",
"_____no_output_____"
],
[
"df_deputadas_1934_2023 = pd.read_csv('dados/deputadas_1934_2023.csv')",
"_____no_output_____"
],
[
"df_deputadas_1934_2023.shape",
"_____no_output_____"
],
[
"df_deputadas_1934_2023.head(5)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Candidaturas elegidas: Selección de atributos para análisis </div>",
"_____no_output_____"
]
],
[
[
"df_deputadas = df_deputadas_1934_2023[['id', 'siglaPartido', 'siglaUf',\n 'idLegislatura', 'sexo']]",
"_____no_output_____"
],
[
"df_deputadas.head(5)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Candidaturas elegidas: Ajuste de los valores faltantes </div>",
"_____no_output_____"
]
],
[
[
"df_deputadas.isnull().sum(axis = 0)",
"_____no_output_____"
],
[
"df_deputadas['siglaPartido'].fillna('sem partido', inplace=True)",
"/home/cecivieira/Projetos/cotas-genero-e-proposicoes-legislativas/.venv/lib/python3.8/site-packages/pandas/core/generic.py:6392: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n return self._update_inplace(result)\n"
],
[
"df_deputadas.isnull().sum(axis = 0)",
"_____no_output_____"
],
[
"df_deputadas.to_csv('dados/candidaturas_eleitas(1).csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Legislaturas\n\nPrincipales tranformaciones:\n- Convertir fecha completa en año",
"_____no_output_____"
]
],
[
[
"tipo_data = ['dataInicio', 'dataFim']\ndf_legislaturas = pd.read_csv('dados/legislaturas_1934_2023.csv', parse_dates=tipo_data)",
"_____no_output_____"
],
[
"df_legislaturas.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 21 entries, 0 to 20\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 21 non-null int64 \n 1 uri 21 non-null object \n 2 dataInicio 21 non-null datetime64[ns]\n 3 dataFim 21 non-null datetime64[ns]\ndtypes: datetime64[ns](2), int64(1), object(1)\nmemory usage: 800.0+ bytes\n"
],
[
"df_legislaturas.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Legislaturas: extracción de año </div>",
"_____no_output_____"
]
],
[
[
"df_legislaturas['dataInicio'] = df_legislaturas['dataInicio'].dt.year",
"_____no_output_____"
],
[
"df_legislaturas['dataFim'] = df_legislaturas['dataFim'].dt.year",
"_____no_output_____"
],
[
"df_legislaturas.head()",
"_____no_output_____"
],
[
"df_legislaturas.to_csv('dados/legislaturas_1934_2023_limpas(1).csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Proposiciones legislativas\n\nPrincipales transformaciones:\n- Selección de los tipos de propuestas legislativas deseadas\n- Selección de atributos\n- Ajustes de valores faltantes\n- Extracción de palabras claves de las ementas\n - Remoción de stopwords, meses, puntuación, números\n - Remoción de palabras con menos de 3 caracteres y semanticamente irrelevantes\n - Remoción de bigramas semanticamente irrelevantes",
"_____no_output_____"
]
],
[
[
"lista_proposicoes = glob.glob('dados/proposicoes/propo*')",
"_____no_output_____"
],
[
"tipos_dados = {\n 'id': object,\n 'uri': object,\n 'siglaTipo': object,\n 'numero': object,\n 'ano': int,\n 'codTipo': object,\n 'descricaoTipo': object,\n 'ementa': object,\n 'ementaDetalhada': object,\n 'keywords': object, \n 'uriOrgaoNumerador': object,\n 'uriPropAnterior': object,\n 'uriPropPrincipal': object,\n 'uriPropPosterior': object,\n 'urlInteiroTeor': object, \n 'urnFinal': object, \n 'ultimoStatus_sequencia': object,\n 'ultimoStatus_uriRelator': object, \n 'ultimoStatus_idOrgao': object,\n 'ultimoStatus_siglaOrgao': object, \n 'ultimoStatus_uriOrgao': object,\n 'ultimoStatus_regime': object, \n 'ultimoStatus_descricaoTramitacao': object,\n 'ultimoStatus_idTipoTramitacao': object, \n 'ultimoStatus_descricaoSituacao': object,\n 'ultimoStatus_idSituacao': object, \n 'ultimoStatus_despacho': object, \n 'ultimoStatus_url': object\n}\n\ntipo_data = ['dataApresentacao', 'ultimoStatus_dataHora']",
"_____no_output_____"
],
[
"lista_df = []\n\nfor proposicao in lista_proposicoes:\n df_proposicao = pd.read_csv(proposicao, sep=';', dtype=tipos_dados, parse_dates=tipo_data)\n lista_df.append(df_proposicao)",
"_____no_output_____"
],
[
"df_proposicao_1934_2021 = pd.concat(lista_df, axis=0, ignore_index=True)\ndf_proposicao_1934_2021.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Selección de los tipos de propuestas legislativas </div>\n\n- Projeto de Decreto Legislativo [SF] (PDL)\n- Projeto de Decreto Legislativo [CD] (PDC)\n- Projeto de Decreto Legislativo [CN] (PDN)\n- Projeto de Decreto Legislativo [SF] (PDS)\n- Proposta de Emenda à Constituição (PEC)\n- Projeto de Lei (PL)\n- Projeto de Lei da Câmara (PLC)\n- Projeto de Lei Complementar (PLP)\n- Projeto de Lei de Conversão (PLV)\n- Projeto de Resolução da Câmara dos Deputados (PRC)",
"_____no_output_____"
]
],
[
[
"tipos_proposicoes = ['PDS', 'PDC', 'PDN', 'PEC', 'PL', 'PLC', 'PLP', 'PLV', 'PRC']",
"_____no_output_____"
],
[
"df_proposicoes_tipos_desejados = df_proposicao_1934_2021[df_proposicao_1934_2021['siglaTipo'].isin(tipos_proposicoes)].copy()",
"_____no_output_____"
],
[
"df_proposicoes_tipos_desejados.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Selección de atributos para análisis </div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes = df_proposicoes_tipos_desejados[['id','siglaTipo','ano', 'codTipo', 'descricaoTipo',\n 'ementa', 'ementaDetalhada', 'keywords']].copy()",
"_____no_output_____"
],
[
"df_proposicoes.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Ajuste de valores faltantes </div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes.isnull().sum(axis = 0)",
"_____no_output_____"
],
[
"df_proposicoes[\n (df_proposicoes['ementa'].isnull()) & \n (df_proposicoes['ementaDetalhada'].isnull()) & \n (df_proposicoes['keywords'].isnull())].head()",
"_____no_output_____"
],
[
"df_proposicoes.dropna(axis=0, subset=['ementa'], inplace=True)",
"_____no_output_____"
],
[
"df_proposicoes.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Normalización de las keywords existentes </div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_com_keywords = df_proposicoes[df_proposicoes['keywords'].notna()].copy()",
"_____no_output_____"
],
[
"df_proposicoes[df_proposicoes['keywords'].notna()]",
"_____no_output_____"
],
[
"nltk.download('punkt')\nnltk.download('stopwords')",
"[nltk_data] Downloading package punkt to /home/cecivieira/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package stopwords to\n[nltk_data] /home/cecivieira/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Funcciones para borrar la puntuación, preposiciones, números y artículos</div>",
"_____no_output_____"
]
],
[
[
"meses = ['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho','agosto', 'setembro', 'outubro', 'novembro', 'dezembro']",
"_____no_output_____"
],
[
"def define_stopwords_punctuation():\n stopwords = nltk.corpus.stopwords.words('portuguese') + meses\n pontuacao = list(punctuation)\n stopwords.extend(pontuacao)\n return stopwords",
"_____no_output_____"
],
[
"def remove_stopwords_punctuation_da_sentenca(texto):\n padrao_digitos = r'[0-9]'\n texto = re.sub(padrao_digitos, '', texto)\n palavras = nltk.tokenize.word_tokenize(texto.lower())\n stopwords = define_stopwords_punctuation()\n keywords = [palavra for palavra in palavras if palavra not in stopwords]\n return keywords",
"_____no_output_____"
],
[
"df_proposicoes_com_keywords['keywords'] = df_proposicoes_com_keywords['keywords'].apply(remove_stopwords_punctuation_da_sentenca)",
"_____no_output_____"
],
[
"def converte_lista_string(lista):\n return ','.join([palavra for palavra in lista])",
"_____no_output_____"
],
[
"df_proposicoes_com_keywords['keywords'] = df_proposicoes_com_keywords['keywords'].apply(converte_lista_string)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Borra las proposiciones que quedaron sin keywords despues de la limpieza</div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_com_keywords = df_proposicoes_com_keywords[df_proposicoes_com_keywords['keywords'] != '']",
"_____no_output_____"
],
[
"df_proposicoes_com_keywords.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Saca `keywords` de la columna `ementa` </div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_sem_keywords = df_proposicoes[df_proposicoes['keywords'].isna()].copy()",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords['keywords'] = df_proposicoes_sem_keywords['ementa'].apply(remove_stopwords_punctuation_da_sentenca)",
"_____no_output_____"
],
[
"lista_keywords = []\nlista_keywords_temp = df_proposicoes_sem_keywords['keywords'].tolist()\n_ = [lista_keywords.extend(item) for item in lista_keywords_temp]",
"_____no_output_____"
],
[
"palavras_para_descarte = [item for item in set(lista_keywords) if len(item) <= 3]",
"_____no_output_____"
],
[
"substantivos_nao_descartaveis = ['cão', 'mãe', 'oab', 'boa', 'pré', 'voz', 'rui', 'uva', 'gás', 'glp', 'apa']",
"_____no_output_____"
],
[
"palavras_para_descarte_refinada = [palavra for palavra in palavras_para_descarte if palavra not in substantivos_nao_descartaveis]",
"_____no_output_____"
],
[
"def remove_palavras_para_descarte_da_sentenca(texto):\n keywords = []\n for palavra in texto:\n if palavra not in palavras_para_descarte_refinada:\n keywords.append(palavra)\n return keywords",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords['keywords'] = df_proposicoes_sem_keywords['keywords'].apply(remove_palavras_para_descarte_da_sentenca)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Tratamiento de bigramas </div>",
"_____no_output_____"
]
],
[
[
"def gera_n_grams(texto, ngram=2):\n temporario = zip(*[texto[indice:] for indice in range(0,ngram)])\n resultado = [' '.join(ngram) for ngram in temporario]\n return resultado",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords['bigrams'] = df_proposicoes_sem_keywords['keywords'].apply(gera_n_grams)",
"_____no_output_____"
],
[
"lista_ngrams = []\nlista_ngrams_temp = df_proposicoes_sem_keywords['bigrams'].tolist()\n_ = [lista_ngrams.extend(item) for item in lista_ngrams_temp]",
"_____no_output_____"
],
[
"bigrams_comuns = nltk.FreqDist(lista_ngrams).most_common(50)",
"_____no_output_____"
],
[
"lista_bigramas_comuns = [bigrama for bigrama, frequencia in bigrams_comuns]",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Selección de los bigramas semanticamente irrelevantes </div>",
"_____no_output_____"
]
],
[
[
"lista_bigramas_comuns_limpa = ['dispõe sobre', 'outras providências', 'nova redação', 'poder executivo', 'distrito federal',\n 'autoriza poder', 'federal outras','redação constituição', 'dispõe sôbre', 'código penal', 'artigo constituição',\n 'disposições constitucionais', 'altera dispõe', 'decreto-lei código', 'constitucionais transitórias', 'altera redação',\n 'abre ministério', 'executivo abrir', 'redação artigo', 'sobre criação', 'acrescenta parágrafo', 'parágrafo único',\n 'concede isenção', 'altera dispositivos', 'altera complementar', 'dispondo sobre', 'código processo', 'outras providências.',\n 'providências. historico', 'ministério fazenda', 'altera leis', 'programa nacional', 'quadro permanente', 'outras providencias',\n 'inciso constituição', 'abrir ministério', 'estabelece normas', 'ministério justiça', 'tempo serviço', 'instituto nacional',\n 'institui sistema', 'operações crédito', 'altera institui', 'dispõe sôbre']",
"_____no_output_____"
],
[
"palavras_para_descarte_origem_bigramas = []\n_ = [palavras_para_descarte_origem_bigramas.extend(bigrama.split(' ')) for bigrama in lista_bigramas_comuns_limpa]\npalavras_para_descarte_origem_bigramas_unicas = set(palavras_para_descarte_origem_bigramas)",
"_____no_output_____"
],
[
"def remove_palavras_origem_bigramas_da_sentenca(texto):\n keywords = []\n for palavra in texto:\n if palavra not in palavras_para_descarte_origem_bigramas_unicas:\n keywords.append(palavra)\n return keywords",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords['keywords'] = df_proposicoes_sem_keywords['keywords'].apply(remove_palavras_origem_bigramas_da_sentenca)",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords['keywords'] = df_proposicoes_sem_keywords['keywords'].apply(converte_lista_string)",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords = df_proposicoes_sem_keywords.drop(columns=['bigrams'])",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Borra las proposiciones que quedaron sin keywords despues de la limpieza</div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_sem_keywords = df_proposicoes_sem_keywords[df_proposicoes_sem_keywords['keywords'] != '']",
"_____no_output_____"
],
[
"df_proposicoes_sem_keywords[df_proposicoes_sem_keywords['keywords']== ''] ",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Proposiciones legislativas: Reuni los dataframes</div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_v_final = pd.concat([df_proposicoes_com_keywords, df_proposicoes_sem_keywords])",
"_____no_output_____"
],
[
"df_proposicoes_v_final.shape",
"_____no_output_____"
],
[
"df_proposicoes_v_final.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 152386 entries, 105 to 680357\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 152386 non-null object\n 1 siglaTipo 152386 non-null object\n 2 ano 152386 non-null int64 \n 3 codTipo 152386 non-null object\n 4 descricaoTipo 152386 non-null object\n 5 ementa 152386 non-null object\n 6 ementaDetalhada 42563 non-null object\n 7 keywords 152386 non-null object\ndtypes: int64(1), object(7)\nmemory usage: 10.5+ MB\n"
],
[
"df_proposicoes_v_final.to_csv('dados/proposicoes_legislativas_limpas(1).csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Creación de vocabulario\n\n\n\nAntes de hacer el análisis de los temas de las proposiciones hacía falta clasificarlas con un vocabulario controlado. Así que, usando el conjunto de datos \"temas de proposições\" clasifiqué algunas proposiciones relativas a protección de derechos de grupos históricamente marginados, a saber: campesinos, mujeres, población LGTQIA+, negros, ancianos, discapacitados, artistas, poblaciones económicamente vulnerables y pueblos indígenas.\n\nPrincipales etapas:\n- Reunir todas las palabras claves\n- Atribuir manualmente palabras a temas\n- Atribuir tema a proposiciones que contenía la palabra clave",
"_____no_output_____"
]
],
[
[
"proposicoes = pd.read_csv('dados/proposicoes_legislativas_limpas(1).csv')",
"_____no_output_____"
],
[
"proposicoes.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 152386 entries, 0 to 152385\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 152386 non-null int64 \n 1 siglaTipo 152386 non-null object\n 2 ano 152386 non-null int64 \n 3 codTipo 152386 non-null int64 \n 4 descricaoTipo 152386 non-null object\n 5 ementa 152386 non-null object\n 6 ementaDetalhada 42563 non-null object\n 7 keywords 152386 non-null object\ndtypes: int64(3), object(5)\nmemory usage: 9.3+ MB\n"
]
],
[
[
"Reunião de palavras chaves para classificação",
"_____no_output_____"
]
],
[
[
"keywords = proposicoes['keywords']",
"_____no_output_____"
],
[
"vocabulario = []",
"_____no_output_____"
],
[
"for proposicao in keywords:\n lista = proposicao.split(',')\n vocabulario.extend(lista)",
"_____no_output_____"
],
[
"vocabulario_unico = set(vocabulario)",
"_____no_output_____"
],
[
"with open('dados/vocabulario.txt', 'w') as palavras:\n for termo in vocabulario_unico:\n palavras.write(termo + '\\n')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Relacioné manualmente palabras claves a uno de los temas del conjunto de datos \"Temas\"</div>",
"_____no_output_____"
]
],
[
[
"vocabulario_temp = pd.read_csv('dados/temas_vocabulario.csv')",
"_____no_output_____"
],
[
"vocabulario_temp.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\"> Crié el vocabuario</div>",
"_____no_output_____"
]
],
[
[
"vocabulario = pd.DataFrame(columns=['cod', 'tema', 'palavra_chave'])",
"_____no_output_____"
],
[
"indices = vocabulario_temp.index",
"_____no_output_____"
],
[
"for indice in indices:\n descricao = vocabulario_temp['descricao'].iloc[indice]\n if type(descricao) == str:\n for palavra in descricao.split(' '):\n df = pd.DataFrame(data={'cod':vocabulario_temp['cod'].iloc[indice], 'tema':vocabulario_temp['nome'].iloc[indice], 'palavra_chave':[palavra]})\n vocabulario = pd.concat([vocabulario, df], ignore_index=True)\n ",
"_____no_output_____"
],
[
"vocabulario.sample(5)",
"_____no_output_____"
],
[
"vocabulario.shape",
"_____no_output_____"
],
[
"vocabulario = vocabulario[vocabulario['palavra_chave']!= ''].copy()\nvocabulario.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Atribuí el tema a las proposiciones que contenía la palabra en la columna `keyword`</div>",
"_____no_output_____"
]
],
[
[
"def atribui_tema(proposicao):\n for tema, palavra_chave in zip(vocabulario['tema'], vocabulario['palavra_chave']):\n if palavra_chave in proposicao:\n return tema",
"_____no_output_____"
],
[
"proposicoes['temas'] = proposicoes['keywords'].apply(atribui_tema)",
"_____no_output_____"
],
[
"proposicoes.to_csv('dados/proposicoes_legislativas_limpas_vocabulario(1).csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Modelo de aprendizaje de máquina\n\n\n\nHay que clasificar todas las proposiciones antes del análisis.\n\nPrincipales etapas:\n- Establece variable predictora: “ementa” y la de respuesta:\"temas\"\n- Encode da variable de respuesta utilizando preprocessing.LabelEncoder\n- Divide conjunto de datos para teste y entrenamiento\n- Convierte las ementas en vectores con HashingVectorizer\n- Crea el modelo de clasificación con RandomForestClassifier\n- Entrena el modelo\n- Evalua cualitativamente a partir de la comparación entre las clasificaciones de los conjuntos de prueba y entrenamiento\n\nAl final tenemos clasificadas solamente las proposiciones referentes a temática estudiada",
"_____no_output_____"
]
],
[
[
"from sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom keras.utils import np_utils\n\nimport nltk\nfrom nltk.corpus import stopwords\n\nimport pandas as pd\nimport numpy as np",
"2022-04-25 13:48:02.294118: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n2022-04-25 13:48:02.294190: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
]
],
[
[
"<div class=\"alert-warning\">Classifica proposições legislativas</div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes = pd.read_csv(\"dados/proposicoes_legislativas_limpas_vocabulario(1).csv\")\ndf_proposicoes_classificado = df_proposicoes.dropna(subset=[\"temas\"])\ndf_proposicoes_classificado = df_proposicoes_classificado[[\"ementa\",\"temas\"]]",
"_____no_output_____"
],
[
"df_proposicoes_classificado.shape",
"_____no_output_____"
],
[
"df_proposicoes_classificado.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Establece variable predictora: “ementa” y la de respuesta:\"temas\"</div>",
"_____no_output_____"
]
],
[
[
"sentences = df_proposicoes_classificado['ementa'].values",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Encode da variable de respuesta</div>",
"_____no_output_____"
]
],
[
[
"le = preprocessing.LabelEncoder()\nle.fit(df_proposicoes_classificado['temas'].unique())\n\ny = le.transform(df_proposicoes_classificado['temas'])",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Divide el conjunto de teste y entrenamiento</div>",
"_____no_output_____"
]
],
[
[
"sentences_train, sentences_test, y_train, y_test = train_test_split(\n sentences, y, test_size=0.25, random_state=1000)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Convierte las ementas en vectores con HashingVectorizer</div>",
"_____no_output_____"
]
],
[
[
"vectorizer = CountVectorizer()\nvectorizer.fit(sentences_train)\n\nX_train = vectorizer.transform(sentences_train)\nX_test = vectorizer.transform(sentences_test)\nX_train\n\nhasher = HashingVectorizer(\n n_features=10000,\n stop_words=stopwords.words('portuguese'),\n alternate_sign=False,\n norm=None,\n )\nhasher.fit(sentences_train)\nX_train_hasher = hasher.transform(sentences_train)\nX_test_hasher = hasher.transform(sentences_test)",
"_____no_output_____"
],
[
"X_train_hasher.shape",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Cría y entreina clasificador</div>",
"_____no_output_____"
]
],
[
[
"clf = RandomForestClassifier(n_estimators=200,random_state=0)\nclf.fit(X_train_hasher, y_train)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Verifica el coeficiente de determinación (R²)</div>",
"_____no_output_____"
]
],
[
[
"score = clf.score(X_test_hasher, y_test)\n\nprint(\"Acurácia:\", score)",
"Acurácia: 0.7920047496536711\n"
]
],
[
[
"<div class=\"alert-warning\">Avalia modelo cualitativamente</div>",
"_____no_output_____"
]
],
[
[
"df_random_forest_results = pd.DataFrame([sentences_test,le.inverse_transform(clf.predict(X_test_hasher))]).transpose().rename(columns={0:\"ementa\",1:\"tema\"})\ndf_random_forest_results.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Cría listado con probabilidades de clasificación de la proposición en cada tema</div>",
"_____no_output_____"
]
],
[
[
"predicted_probabilities = clf.predict_proba(X_test_hasher)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Selecciona el tema con mayor probabilidad para cada proposición</div>",
"_____no_output_____"
]
],
[
[
"df_random_forest_results[\"probabilidade_predicao\"] = np.amax(predicted_probabilities,axis=1)",
"_____no_output_____"
],
[
"df_random_forest_results.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Cría dataframe comparativo entre los temas preestablecidos y los clasificados por el clasificador</div>",
"_____no_output_____"
]
],
[
[
"df_ementas_test = pd.DataFrame([sentences_test,le.inverse_transform(y_test)]).transpose().rename(columns={0:\"ementa\",1:\"tema\"})",
"_____no_output_____"
],
[
"df_ementas_test.head()",
"_____no_output_____"
],
[
"df_avaliacao = df_random_forest_results.merge(df_ementas_test,left_on=\"ementa\",right_on=\"ementa\",suffixes=[\"_resposta_modelo\",\"_correto\"])\ndf_avaliacao[\"modelo_acertou\"] = df_avaliacao[\"tema_resposta_modelo\"] == df_avaliacao[\"tema_correto\"]\ndf_avaliacao[\"modelo_acertou\"] = df_avaliacao[\"modelo_acertou\"].replace({True: \"Sim\", False: \"Não\"})",
"_____no_output_____"
],
[
"df_avaliacao[\"modelo_acertou\"].value_counts()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Resumen de la validación</div>",
"_____no_output_____"
]
],
[
[
"df_avaliacao[df_avaliacao[\"probabilidade_predicao\"] >= 0.85][\"modelo_acertou\"].value_counts()",
"_____no_output_____"
],
[
"df_avaliacao.head()",
"_____no_output_____"
],
[
"df_ementas_test.tema.value_counts()",
"_____no_output_____"
],
[
"df_avaliacao.to_csv('dados/avaliacao-qualitativa-modelo-classificacao(1).csv')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Aplicación del modelo</div>",
"_____no_output_____"
]
],
[
[
"df_proposicoes_total = df_proposicoes[[\"ementa\",\"temas\"]]",
"_____no_output_____"
],
[
"ementas = df_proposicoes_total['ementa'].values",
"_____no_output_____"
],
[
"ementas_hasher = hasher.transform(ementas)",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas = pd.DataFrame([ementas,le.inverse_transform(clf.predict(ementas_hasher))]).transpose().rename(\n columns={0:\"ementa\",1:\"temas\"})",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas.head()",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 152386 entries, 0 to 152385\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ementa 152386 non-null object\n 1 temas 152386 non-null object\ndtypes: object(2)\nmemory usage: 2.3+ MB\n"
]
],
[
[
"Informar a probabilidade de acerto de cada tema",
"_____no_output_____"
]
],
[
[
"temas_probabilities = clf.predict_proba(ementas_hasher)",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas[\"probabilidade_predicao\"] = np.amax(temas_probabilities, axis=1)",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas.head()",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 152386 entries, 0 to 152385\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ementa 152386 non-null object \n 1 temas 152386 non-null object \n 2 probabilidade_predicao 152386 non-null float64\ndtypes: float64(1), object(2)\nmemory usage: 3.5+ MB\n"
]
],
[
[
"Limpa temas cuja a probabilidade de acerto é menor do que 85%",
"_____no_output_____"
]
],
[
[
"def retira_tema_com_baixa_probabilidade_acerto(proposicoes):\n if proposicoes['probabilidade_predicao'] >= 0.85:\n return proposicoes['temas']\n else:\n return np.nan",
"_____no_output_____"
],
[
"df_proposicoes_total_classificadas['temas'] = df_proposicoes_total_classificadas.apply(retira_tema_com_baixa_probabilidade_acerto, \n axis=1)",
"_____no_output_____"
]
],
[
[
"Reunir conjunto de dados de proposições legislativas com classificação realizada",
"_____no_output_____"
]
],
[
[
"df_proposicoes_classificador = df_proposicoes.join(df_proposicoes_total_classificadas, rsuffix='_classificador')",
"_____no_output_____"
],
[
"df_proposicoes_classificador.shape",
"_____no_output_____"
],
[
"df_proposicoes_classificador.head()",
"_____no_output_____"
],
[
"df_proposicoes_classificador.drop(columns=['temas', 'ementa_classificador', 'probabilidade_predicao'], inplace=True)",
"_____no_output_____"
],
[
"df_proposicoes_classificador.to_csv('dados/proposicoes_legislativas_limpas_classificadas(1).csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Análisis exploratorio de datos\n\n",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## 1. ¿Hubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 9.504/1997?\n**Hipótesis:** No huvo impacto positivo en el percentual de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 9.504/1997.",
"_____no_output_____"
]
],
[
[
"df_legislaturas = pd.read_csv('dados/legislaturas_1934_2023_limpas(1).csv')",
"_____no_output_____"
],
[
"df_legislaturas.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Determinar el período de los datos para el análisis (1995 a 2007)</div>",
"_____no_output_____"
]
],
[
[
"legislaturas_h1 = df_legislaturas[(df_legislaturas['id'] >= 50) & (df_legislaturas['id'] <= 53)]['id'].unique().tolist()",
"_____no_output_____"
],
[
"df_candidaturas_eleitas = pd.read_csv('dados/candidaturas_eleitas(1).csv')",
"_____no_output_____"
],
[
"df_candidaturas_eleitas_h1 = df_candidaturas_eleitas[df_candidaturas_eleitas['idLegislatura'].isin(legislaturas_h1)].copy()",
"_____no_output_____"
],
[
"df_candidaturas_eleitas_h1['idLegislatura'].unique()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Agrupar por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo = df_candidaturas_eleitas_h1.groupby(['idLegislatura', 'sexo']).size().to_frame('valorAbsoluto')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Estabelece el porcentaje de cada grupo en relación al total de diputados</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo['porcentagem'] = round(agrupa_sexo['valorAbsoluto'].div(\n agrupa_sexo.groupby('idLegislatura')['valorAbsoluto'].transform('sum')).mul(100), 2)",
"_____no_output_____"
],
[
"agrupa_sexo_df = agrupa_sexo.reset_index()",
"_____no_output_____"
],
[
"agrupa_sexo_df",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Prepara los datos para visualización</div>",
"_____no_output_____"
]
],
[
[
"mulher_h1 = agrupa_sexo_df[agrupa_sexo_df['sexo'] == 'F']['porcentagem'].tolist()\nhomem_h1 = agrupa_sexo_df[agrupa_sexo_df['sexo'] == 'M']['porcentagem'].tolist()",
"_____no_output_____"
],
[
"legislaturas_lista_h1 = agrupa_sexo_df['idLegislatura'].unique()",
"_____no_output_____"
],
[
"legislaturas_lista_h1 = df_legislaturas[(df_legislaturas['id'] >= 50) & \n (df_legislaturas['id'] <= 53)]['dataInicio'].unique().tolist()\nlegislaturas_lista_h1.sort()",
"_____no_output_____"
],
[
"legislaturas_lista_h1 = list(map(str, legislaturas_lista_h1))\nlegislaturas_lista_h1",
"_____no_output_____"
],
[
"agrupa_sexo_df2 = pd.DataFrame({'mulher': mulher_h1,\n 'homem': homem_h1\n }, index=legislaturas_lista_h1,\n )\nagrupa_sexo_df2.plot.line()\nagrupa_sexo_df2.to_csv('dados/analise_genero_1995_2007(1).csv')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Visualización por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_df2.plot.line(subplots=True)",
"_____no_output_____"
],
[
"diferenca_percentual_mulher_h1_total = mulher_h1[-1] - mulher_h1[0]",
"_____no_output_____"
],
[
"print(f'''\nHubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 9.504/1997? \\n \nHipótesis comprobada? Sí. \\n\nHubo aumento de {round(diferenca_percentual_mulher_h1_total, 2)}% en el total de mujeres elegidas, sin embargo es un porcentaje muy bajo para justificar como impacto positivo.\n''')",
"\nHubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 9.504/1997? \n \nHipótesis comprobada? Sí. \n\nHubo aumento de 1.44% en el total de mujeres elegidas, sin embargo es un porcentaje muy bajo para justificar como impacto positivo.\n\n"
]
],
[
[
"## 2. ¿Hubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 12.034/2009?\n**Hipótesis:** Huvo impacto positivo en el percentual de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 12.034/2009.",
"_____no_output_____"
],
[
"<div class=\"alert-warning\">Determinar el período de los datos para el análisis (2007 a 2019)</div>",
"_____no_output_____"
]
],
[
[
"legislaturas_h2 = df_legislaturas[(df_legislaturas['id'] >= 53) & (df_legislaturas['id'] <= 56)]['id'].unique().tolist()",
"_____no_output_____"
],
[
"df_candidaturas_eleitas_h2 = df_candidaturas_eleitas[df_candidaturas_eleitas['idLegislatura'].isin(legislaturas_h2)].copy()",
"_____no_output_____"
],
[
"df_candidaturas_eleitas_h2['idLegislatura'].unique()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Agrupar por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_h2 = df_candidaturas_eleitas_h2.groupby(['idLegislatura', 'sexo']).size().to_frame('valorAbsoluto')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Estabelece el porcentaje de cada grupo en relación al total de diputados</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_h2['porcentagem'] = round(agrupa_sexo_h2['valorAbsoluto'].div(agrupa_sexo_h2.groupby(\n 'idLegislatura')['valorAbsoluto'].transform('sum')).mul(100), 2)",
"_____no_output_____"
],
[
"agrupa_sexo_h2_df = agrupa_sexo_h2.reset_index()",
"_____no_output_____"
],
[
"agrupa_sexo_h2",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Prepara los datos para visualización</div>",
"_____no_output_____"
]
],
[
[
"mulher_h2 = agrupa_sexo_h2_df[agrupa_sexo_h2_df['sexo'] == 'F']['porcentagem'].tolist()\nhomem_h2 = agrupa_sexo_h2_df[agrupa_sexo_h2_df['sexo'] == 'M']['porcentagem'].tolist()",
"_____no_output_____"
],
[
"legislaturas_lista_h2 = agrupa_sexo_h2_df['idLegislatura'].unique()",
"_____no_output_____"
],
[
"legislaturas_lista_h2 = df_legislaturas[(df_legislaturas['id'] >= 53) & (df_legislaturas['id'] <= 56)\n ]['dataInicio'].unique().tolist()\nlegislaturas_lista_h2.sort()",
"_____no_output_____"
],
[
"legislaturas_lista_h2 = list(map(str, legislaturas_lista_h2))\nlegislaturas_lista_h2",
"_____no_output_____"
],
[
"agrupa_sexo_h2_df2 = pd.DataFrame({'mulher': mulher_h2,\n 'homem': homem_h2\n }, index=legislaturas_lista_h2,\n )\nagrupa_sexo_h2_df2.plot.line()\nagrupa_sexo_h2_df2.to_csv('dados/analise_genero_2007_2019(1).csv')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Visualización por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_h2_df2.plot.line(subplots=True)",
"_____no_output_____"
],
[
"diferenca_percentual_mulher_h2_total = mulher_h2[-1] - mulher_h2[0]",
"_____no_output_____"
],
[
"print(f'''\nHubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 12.034/2009? \\n \nHipótesis comprobada? Sí. \\n\nHubo aumento de {round(diferenca_percentual_mulher_h2_total, 2)}% en el total de mujeres elegidas.\n''')",
"\nHubo impacto positivo en la cantidad de mujeres elegidas para la Cámara en las 3 legislaciones subsecuentes a aprobación de la Ley 12.034/2009? \n \nHipótesis comprobada? Sí. \n\nHubo aumento de 6.52% en el total de mujeres elegidas.\n\n"
]
],
[
[
"## Evolução geral",
"_____no_output_____"
]
],
[
[
"legislaturas_todas = df_candidaturas_eleitas['idLegislatura'].unique()\nlegislaturas_todas",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Agrupar por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_todas = df_candidaturas_eleitas.groupby(['idLegislatura', 'sexo']).size().to_frame('valorAbsoluto')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Estabelece el porcentaje de cada grupo en relación al total de diputados</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_todas['porcentagem'] = round(agrupa_sexo_todas['valorAbsoluto'].div(agrupa_sexo_todas.groupby(\n 'idLegislatura')['valorAbsoluto'].transform('sum')).mul(100), 2)",
"_____no_output_____"
],
[
"agrupa_sexo_todas_df = agrupa_sexo_todas.reset_index()",
"_____no_output_____"
],
[
"agrupa_sexo_todas_df",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Prepara los datos para visualización</div>",
"_____no_output_____"
]
],
[
[
"mulher_todas = agrupa_sexo_todas_df[agrupa_sexo_todas_df['sexo'] == 'F']['porcentagem'].tolist()\nhomem_todas = agrupa_sexo_todas_df[agrupa_sexo_todas_df['sexo'] == 'M']['porcentagem'].tolist()",
"_____no_output_____"
],
[
"len(mulher_todas), len(homem_todas)",
"_____no_output_____"
],
[
"mulher_todas[:5]",
"_____no_output_____"
],
[
"mulher_todas.insert(2, 0)",
"_____no_output_____"
],
[
"len(mulher_todas), len(homem_todas)",
"_____no_output_____"
],
[
"mulher_todas[:5]",
"_____no_output_____"
],
[
"legislaturas_lista_todas = agrupa_sexo_todas_df['idLegislatura'].unique()",
"_____no_output_____"
],
[
"legislaturas_lista_todas = df_legislaturas['dataInicio'].unique().tolist()\nlegislaturas_lista_todas.sort()",
"_____no_output_____"
],
[
"legislaturas_lista_todas = list(map(str, legislaturas_lista_todas))",
"_____no_output_____"
],
[
"len(legislaturas_lista_todas), len(mulher_todas), len(homem_todas)",
"_____no_output_____"
],
[
"agrupa_sexo_todas_df2 = pd.DataFrame({'mulher': mulher_todas,\n 'homem': homem_todas\n }, index=legislaturas_lista_todas,\n )\nagrupa_sexo_todas_df2.plot.line()\nagrupa_sexo_todas_df2.to_csv('dados/analise_genero_1934_2023.csv')",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Visualización por género</div>",
"_____no_output_____"
]
],
[
[
"agrupa_sexo_h2_df2.plot.line(subplots=True)",
"_____no_output_____"
]
],
[
[
"## 3. ¿Teniendo en cuenta el tema de las proposiciones legislativas, hubo aumento de los que beneficia grupos históricamente marginados en el periodo entre 1934 y 2021?\n**Hipótesis:** Sí, hubo aumento en la cantidade anual de propuestas legislativas que beneficia los grupos historicamente marginados.",
"_____no_output_____"
]
],
[
[
"proposicoes = pd.read_csv('dados/proposicoes_legislativas_limpas_classificadas(1).csv')",
"_____no_output_____"
],
[
"proposicoes.head()",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Agrupa por año y cantidad de propuestas de los temas</div>",
"_____no_output_____"
]
],
[
[
"proposicoes_anuais = proposicoes[['ano', 'temas_classificador']].groupby(by=['ano']).count()",
"_____no_output_____"
],
[
"proposicoes_anuais.tail(10)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Visualización</div>",
"_____no_output_____"
]
],
[
[
"proposicoes_anuais.plot.line()",
"_____no_output_____"
],
[
"proposicoes_anuais = proposicoes_anuais.reset_index()\nproposicoes_anuais.to_csv('dados/proposicoes_anuais(1).csv', index=False)",
"_____no_output_____"
],
[
"print(f'''\nTeniendo en cuenta el tema de las proposiciones legislativas, hubo aumento de los que beneficia grupos historicamente marginalinados en el periodo entre 1934 e 2021? \\n\nHipótesis comprobada? Sí.\nApesar de las oscilaciones hay una tendencia de crecimiento positivo en la cantidad de propuestas que benefician los grupos historicamente marginados.\n''')",
"\nTeniendo en cuenta el tema de las proposiciones legislativas, hubo aumento de los que beneficia grupos historicamente marginalinados en el periodo entre 1934 e 2021? \n\nHipótesis comprobada? Sí.\nApesar de las oscilaciones hay una tendencia de crecimiento positivo en la cantidad de propuestas que benefician los grupos historicamente marginados.\n\n"
]
],
[
[
"## 4. ¿Cuál es el coeficiente de correlación entre la cantidad anual de las propuestas legislativas que benefician los grupos historicamente marginados y el porcentaje de mujeres elegidas para la Cámara de Diputados entre 1995 y 2019?\n**Hipótesis:** Bajo",
"_____no_output_____"
],
[
"<div class=\"alert-warning\">Une los dataframes de los análisis anteriores</div>",
"_____no_output_____"
]
],
[
[
"analise_genero_1995_2007 = pd.read_csv('dados/analise_genero_1995_2007(1).csv')\nanalise_genero_2007_2019 = pd.read_csv('dados/analise_genero_2007_2019(1).csv')",
"_____no_output_____"
],
[
"analise_genero_1995_2007.columns == analise_genero_2007_2019.columns",
"_____no_output_____"
],
[
"analise_genero_1995_2019 = pd.concat([analise_genero_1995_2007, analise_genero_2007_2019], ignore_index=True)",
"_____no_output_____"
],
[
"analise_genero_1995_2019",
"_____no_output_____"
],
[
"analise_genero_1995_2019.rename(columns={'Unnamed: 0': 'ano'}, inplace=True)",
"_____no_output_____"
],
[
"analise_genero_1995_2019.drop(index=3, inplace=True)",
"_____no_output_____"
],
[
"analise_genero_1995_2019",
"_____no_output_____"
],
[
"anos = analise_genero_1995_2019['ano'].tolist()\nanos.append(2021)\nanos",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Inserta el período completo de cada legislatura, teniendo en cuenta que la proporcionalidade de género se mantiene durante los 4 años de legislatura</div>",
"_____no_output_____"
]
],
[
[
"for ano in anos:\n mulher_percentual = analise_genero_1995_2019['mulher'][analise_genero_1995_2019['ano'] == ano].item()\n homem_percentual = analise_genero_1995_2019['homem'][analise_genero_1995_2019['ano'] == ano].item()\n if ano < 2021:\n dados = pd.DataFrame(data={\n 'ano': [ano+1, ano+2, ano+3], \n 'mulher': [mulher_percentual, mulher_percentual, mulher_percentual], \n 'homem': [homem_percentual, homem_percentual, homem_percentual]}\n )\n analise_genero_1995_2019 = pd.concat([analise_genero_1995_2019, dados])",
"_____no_output_____"
],
[
"analise_genero_1995_2019.sort_values(by=['ano'], inplace=True)",
"_____no_output_____"
],
[
"analise_genero_1995_2019.reset_index(drop=True, inplace=True)",
"_____no_output_____"
],
[
"analise_genero_1995_2019.tail()",
"_____no_output_____"
],
[
"analise_genero_1995_2019.drop(index=27, inplace=True) ",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Inserta el total anual de las propuestas en favor a los grupos historicamente marginados</div>",
"_____no_output_____"
]
],
[
[
"def insere_qnt_propostas(ano_candidaturas_eleitas):\n for ano, qnt_tema in zip(proposicoes_anuais['ano'], proposicoes_anuais['temas_classificador']):\n if ano == ano_candidaturas_eleitas:\n return qnt_tema",
"_____no_output_____"
],
[
"analise_genero_1995_2019['qnt_proposicoes'] = analise_genero_1995_2019['ano'].apply(insere_qnt_propostas)",
"_____no_output_____"
],
[
"analise_genero_1995_2019.head(10)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert-warning\">Cría la matriz de correlación</div>",
"_____no_output_____"
]
],
[
[
"correlacao = analise_genero_1995_2019[['mulher', 'homem', 'qnt_proposicoes']].corr(method='pearson')",
"_____no_output_____"
],
[
"coeficiente_correlacao_mulher_qnt_temas = round(correlacao['mulher']['qnt_proposicoes'],2)",
"_____no_output_____"
],
[
"correlacao_matriz_triangular = np.triu(np.ones_like(correlacao))",
"_____no_output_____"
],
[
"sns.heatmap(correlacao, annot=True, mask=correlacao_matriz_triangular)",
"_____no_output_____"
],
[
"correlacao.to_csv('dados/coeficiente_correlacao_mulher_qnt_temas(1).csv')",
"_____no_output_____"
],
[
"print(f'''¿Cuál es el coeficiente de correlación entre la cantidad anual de las propuestas legislativas que benefician los grupos historicamente marginados y el porcentaje de mujeres elegidas para la \nCámara de Diputados entre 1995 y 2019?\\n \nHipótesis comprobada? Sí. \\n\n- El coeficiente de correlación de Pearson es {coeficiente_correlacao_mulher_qnt_temas}, por lo tanto no se puede afirmar que hay correlación.\n''')",
"¿Cuál es el coeficiente de correlación entre la cantidad anual de las propuestas legislativas que benefician los grupos historicamente marginados y el porcentaje de mujeres elegidas para la \nCámara de Diputados entre 1995 y 2019?\n \nHipótesis comprobada? Sí. \n\n- El coeficiente de correlación de Pearson es -0.14, por lo tanto no se puede afirmar que hay correlación.\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac088d712bceb291f35dc0d6e34411b379be986
| 179,491 |
ipynb
|
Jupyter Notebook
|
brats/17_rise_multipixel.ipynb
|
andef4/thesis-code
|
d3fc90f65fb6d08b53fb93ceea80d0761f0d9eeb
|
[
"MIT"
] | 2 |
2019-07-24T12:21:59.000Z
|
2020-10-29T16:32:05.000Z
|
brats/17_rise_multipixel.ipynb
|
andef4/thesis-code
|
d3fc90f65fb6d08b53fb93ceea80d0761f0d9eeb
|
[
"MIT"
] | 7 |
2020-03-31T03:45:29.000Z
|
2022-03-11T23:43:54.000Z
|
brats/17_rise_multipixel.ipynb
|
andef4/thesis-code
|
d3fc90f65fb6d08b53fb93ceea80d0761f0d9eeb
|
[
"MIT"
] | 1 |
2020-02-28T13:49:01.000Z
|
2020-02-28T13:49:01.000Z
| 720.84739 | 49,768 | 0.951858 |
[
[
[
"import torch\nfrom dataset import load_dataset\nfrom basic_unet import UNet\nimport matplotlib.pyplot as plt\nfrom rise import RISE\nfrom pathlib import Path\nfrom plot_utils import plot_image_row\nfrom skimage.feature import canny",
"_____no_output_____"
],
[
"batch_size = 1\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntrain_loader, test_loader = load_dataset(batch_size)\nmodel = UNet(in_channels=4, out_channels=1)\nstate_dict = torch.load('models/3_basic_unet_flat_criterion_279_0.00000.pth')\nmodel.load_state_dict(state_dict)\nmodel = model.to(device)",
"_____no_output_____"
],
[
"sample = next(iter(test_loader))\n\nsegment = sample['segment']\nsegment = segment.squeeze()\n\nimage = sample['input'].to(device)\noutput = model(image)\noutput = output.detach().cpu().squeeze().numpy()\noutput = (output > output.mean())",
"_____no_output_____"
],
[
"class SegmentationRISE(RISE):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def forward(self, x):\n mask_count = self.N\n _, _, H, W = x.size()\n\n # generate new images by putting mask on top of original image\n stack = torch.mul(self.masks, x.data)\n\n output = model(x).squeeze()\n output = (output > output.mean())\n\n pixels = []\n for x in range(output.shape[0]):\n for y in range(output.shape[1]):\n if output[x][y]:\n pixels.append((x, y))\n\n pixels_per_batch = 1000\n saliencies = []\n for i in range(0, len(pixels), pixels_per_batch):\n current_pixels = pixels[i:i+pixels_per_batch]\n\n # run generated images through the model\n p = []\n for i in range(0, mask_count, self.gpu_batch):\n output_mask = self.model(stack[i:min(i + self.gpu_batch, mask_count)])\n pixel_classes = []\n for x, y in current_pixels:\n pixel_classes.append(output_mask[0][x][y])\n p.append(torch.tensor([pixel_classes]))\n p = torch.cat(p)\n p = p.to(device)\n\n # Number of classes\n CL = p.size(1)\n\n sal = torch.matmul(p.data.transpose(0, 1), self.masks.view(mask_count, H * W))\n\n sal = sal.view((CL, H, W))\n sal /= mask_count\n saliencies.append(sal)\n return saliencies\n\n\nmasks_path = Path('rise_masks.npy')\nexplainer = SegmentationRISE(model, (240, 240), batch_size)\nif not masks_path.exists():\n explainer.generate_masks(N=3000, s=8, p1=0.1, savepath=masks_path)\nelse:\n explainer.load_masks(masks_path)\n\nsaliencies = None\nwith torch.set_grad_enabled(False):\n saliencies = explainer(image)",
"_____no_output_____"
],
[
"plot_image_row([segment, output], labels=['Ground truth', 'Binarized network output'])\n\nprint('Saliency map, Saliency map overlayed on binarized network output (max)')\n\nmerged = torch.cat(saliencies)\nmaxed = torch.max(merged, dim=0)[0]\n\nplt.imshow(output, cmap='gray_r')\nedges = canny(image.cpu().numpy()[0][1], sigma=0.01)\nplt.imshow(edges, alpha=0.5, cmap='gray_r')\nplt.imshow(maxed.cpu(), cmap='jet', alpha=0.6)\nplt.show()\n\nplt.imshow(output, cmap='gray_r')\nplt.imshow(maxed.cpu(), cmap='jet', alpha=0.6)\nplt.show()\n\nprint('Saliency map, Saliency map overlayed on binarized network output (mean)')\nmean = torch.mean(merged, dim=0)\nplt.imshow(output, cmap='gray_r')\nplt.imshow(edges, alpha=0.5, cmap='gray_r')\nplt.imshow(mean.cpu(), cmap='jet', alpha=0.6)\nplt.show()\n\nplt.imshow(output, cmap='gray_r')\nplt.imshow(mean.cpu(), cmap='jet', alpha=0.6)\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4ac09cfa945d5bac405f0fc3615f959a2fc544e4
| 8,572 |
ipynb
|
Jupyter Notebook
|
software/notebooks/Test Head.ipynb
|
tigakub/reachy
|
6427f8f550387dedae2ed73b2f699df4fbe7e848
|
[
"Apache-1.1"
] | null | null | null |
software/notebooks/Test Head.ipynb
|
tigakub/reachy
|
6427f8f550387dedae2ed73b2f699df4fbe7e848
|
[
"Apache-1.1"
] | 1 |
2021-04-28T21:46:18.000Z
|
2021-04-28T21:46:18.000Z
|
software/notebooks/Test Head.ipynb
|
tigakub/reachy
|
6427f8f550387dedae2ed73b2f699df4fbe7e848
|
[
"Apache-1.1"
] | 1 |
2021-04-25T14:09:19.000Z
|
2021-04-25T14:09:19.000Z
| 20.264775 | 145 | 0.49685 |
[
[
[
"# Testing the Head",
"_____no_output_____"
],
[
"**Warning:** Before running this notebook, first make sure you understand the command you run and make sure that the robot can freely move.",
"_____no_output_____"
],
[
"**Note:** Also stop all other running Python script or notebook connected to the robot as only one connection can run at the same time.",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook\n\nimport time\nimport cv2 as cv\nimport numpy as np\n\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"from collections import OrderedDict\nfrom reachy import parts\n\ndef patch_head_config(head_cls):\n # if it's 'armv7l', assume that it's the raspberry pi 4 on reachy\n head_cls.dxl_motors = OrderedDict([\n ('left_antenna', {\n 'id': 30, 'offset': 26.0, 'orientation': 'direct',\n 'angle-limits': [-150, 150],\n }),\n ('right_antenna', {\n 'id': 31, 'offset': 90.0, 'orientation': 'direct',\n 'angle-limits': [-150, 150],\n }),\n ])\n \n return head_cls\n\ndef patch_head(head_cls):\n def __init__(self, io, default_camera='right'):\n \"\"\"Create new Head part.\"\"\"\n parts.part.ReachyPart.__init__(self, name='head', io=io)\n\n #self.neck = self.create_orbita_actuator('neck', Head.orbita_config)\n self.attach_dxl_motors(parts.Head.dxl_motors)\n #self.camera = self.io.find_dual_camera(default_camera)\n\n head_cls.__init__ = __init__\n\n return head_cls",
"_____no_output_____"
]
],
[
[
"## Connect to the head",
"_____no_output_____"
]
],
[
[
"from reachy import Reachy, parts",
"_____no_output_____"
],
[
"parts.Head = patch_head_config(parts.Head)",
"_____no_output_____"
],
[
"parts.Head = patch_head(parts.Head)",
"_____no_output_____"
],
[
"reachy = Reachy(\n head=parts.Head(io='/dev/ttyUSB*')\n #head=parts.Head(io='ws'),\n)",
"_____no_output_____"
]
],
[
[
"You can now connect your robot in Unity.",
"_____no_output_____"
],
[
"## Move the neck",
"_____no_output_____"
],
[
"Check that all 3 disks are present and ok.",
"_____no_output_____"
]
],
[
[
"for d in reachy.head.neck.disks:\n print(d, d.temperature)",
"_____no_output_____"
]
],
[
[
"Turn compliant/stiff and check that the head is free or fixed.",
"_____no_output_____"
]
],
[
[
"reachy.head.compliant = True",
"_____no_output_____"
],
[
"reachy.head.compliant = False",
"_____no_output_____"
]
],
[
[
"Go to the base position.",
"_____no_output_____"
]
],
[
[
"reachy.head.compliant = False\nreachy.head.look_at(1, 0, 0, duration=1, wait=True)",
"_____no_output_____"
]
],
[
[
"Play some random moves.",
"_____no_output_____"
]
],
[
[
"x = 0.5\ny = (2 * np.random.rand() - 1) * 0.25\nz = (2 * np.random.rand() - 1) * 0.25\n\nduration = 1\n\nreachy.head.look_at(x, y, z, duration=duration, wait=False)\n\nreal = []\n\nt0 = time.time()\nwhile time.time() - t0 < duration:\n real.append([d.rot_position for d in reachy.head.neck.disks])\n time.sleep(0.01)\n \nplt.figure()\nplt.plot(real)",
"_____no_output_____"
]
],
[
[
"## Move the antennas",
"_____no_output_____"
],
[
"Check that we have both antennas.",
"_____no_output_____"
],
[
"Turn them stiff.",
"_____no_output_____"
]
],
[
[
"for m in reachy.head.motors:\n m.compliant = False",
"_____no_output_____"
]
],
[
[
"Make them go to 0",
"_____no_output_____"
]
],
[
[
"for m in reachy.head.motors:\n m.goal_position = 0",
"_____no_output_____"
]
],
[
[
"Make them go to 45",
"_____no_output_____"
]
],
[
[
"for m in reachy.head.motors:\n m.goal_position = 45",
"_____no_output_____"
]
],
[
[
"(check that they both moved)",
"_____no_output_____"
],
[
"Make them go to 0 again",
"_____no_output_____"
]
],
[
[
"for m in reachy.head.motors:\n m.goal_position = 0",
"_____no_output_____"
]
],
[
[
"Make them follow a sinus for a few seconds.",
"_____no_output_____"
]
],
[
[
"t = np.linspace(0, 10, 1000)\npos = 30 * np.sin(2 * np.pi * 0.5 * t)\n\nfor p in pos:\n for m in reachy.head.motors:\n m.goal_position = p\n time.sleep(0.01)",
"_____no_output_____"
]
],
[
[
"## Access the cameras",
"_____no_output_____"
],
[
"*Note: the cameras don't seem to be working in the simulator for reachy v1.2.3. - PC*",
"_____no_output_____"
],
[
"Check the right camera.",
"_____no_output_____"
]
],
[
[
"success, img = reachy.head.right_camera.read()\n\nif success:\n plt.figure()\n plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB))",
"_____no_output_____"
]
],
[
[
"Check the left camera.",
"_____no_output_____"
]
],
[
[
"success, img = reachy.head.left_camera.read()\n\nif success:\n plt.figure()\n plt.imshow(cv.cvtColor(img, cv.COLOR_BGR2RGB))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac09fe981e530d7978ef9ac7effbbcd77535ce2
| 175,793 |
ipynb
|
Jupyter Notebook
|
TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb
|
FrancescoSaverioZuppichini/Transformers-Tutorials
|
a41dffb0b8029cbb72d4d5a65229adb27dc5cf8e
|
[
"MIT"
] | 2 |
2022-03-02T07:16:07.000Z
|
2022-03-02T07:16:18.000Z
|
TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb
|
FrancescoSaverioZuppichini/Transformers-Tutorials
|
a41dffb0b8029cbb72d4d5a65229adb27dc5cf8e
|
[
"MIT"
] | 14 |
2021-10-29T20:45:09.000Z
|
2021-12-22T22:51:38.000Z
|
TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb
|
FrancescoSaverioZuppichini/Transformers-Tutorials
|
a41dffb0b8029cbb72d4d5a65229adb27dc5cf8e
|
[
"MIT"
] | 1 |
2022-01-28T12:56:44.000Z
|
2022-01-28T12:56:44.000Z
| 40.328745 | 291 | 0.481299 |
[
[
[
"<a href=\"https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## Set-up environment",
"_____no_output_____"
]
],
[
[
"!pip install -q git+https://github.com/huggingface/transformers.git",
" Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\n\u001b[K |████████████████████████████████| 596 kB 7.2 MB/s \n\u001b[K |████████████████████████████████| 56 kB 6.4 MB/s \n\u001b[K |████████████████████████████████| 895 kB 59.6 MB/s \n\u001b[K |████████████████████████████████| 3.3 MB 48.1 MB/s \n\u001b[?25h Building wheel for transformers (PEP 517) ... \u001b[?25l\u001b[?25hdone\n"
],
[
"!pip install -q datasets jiwer",
"\u001b[K |████████████████████████████████| 290 kB 8.1 MB/s \n\u001b[K |████████████████████████████████| 243 kB 67.2 MB/s \n\u001b[K |████████████████████████████████| 125 kB 73.8 MB/s \n\u001b[K |████████████████████████████████| 1.3 MB 54.3 MB/s \n\u001b[K |████████████████████████████████| 50 kB 8.3 MB/s \n\u001b[K |████████████████████████████████| 160 kB 57.2 MB/s \n\u001b[K |████████████████████████████████| 271 kB 73.0 MB/s \n\u001b[?25h Building wheel for python-Levenshtein (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
]
],
[
[
"## Load IAM test set",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndf = pd.read_fwf('/content/drive/MyDrive/TrOCR/Tutorial notebooks/IAM/gt_test.txt', header=None)\ndf.rename(columns={0: \"file_name\", 1: \"text\"}, inplace=True)\ndel df[2]\ndf.head()",
"_____no_output_____"
],
[
"import torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\n\nclass IAMDataset(Dataset):\n def __init__(self, root_dir, df, processor, max_target_length=128):\n self.root_dir = root_dir\n self.df = df\n self.processor = processor\n self.max_target_length = max_target_length\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n # get file name + text \n file_name = self.df['file_name'][idx]\n text = self.df['text'][idx]\n # some file names end with jp instead of jpg, the two lines below fix this\n if file_name.endswith('jp'):\n file_name = file_name + 'g'\n # prepare image (i.e. resize + normalize)\n image = Image.open(self.root_dir + file_name).convert(\"RGB\")\n pixel_values = self.processor(image, return_tensors=\"pt\").pixel_values\n # add labels (input_ids) by encoding the text\n labels = self.processor.tokenizer(text, \n padding=\"max_length\", \n max_length=self.max_target_length).input_ids\n # important: make sure that PAD tokens are ignored by the loss function\n labels = [label if label != self.processor.tokenizer.pad_token_id else -100 for label in labels]\n\n encoding = {\"pixel_values\": pixel_values.squeeze(), \"labels\": torch.tensor(labels)}\n return encoding",
"_____no_output_____"
],
[
"from transformers import TrOCRProcessor\n\nprocessor = TrOCRProcessor.from_pretrained(\"microsoft/trocr-base-handwritten\")\ntest_dataset = IAMDataset(root_dir='/content/drive/MyDrive/TrOCR/Tutorial notebooks/IAM/image/',\n df=df,\n processor=processor)",
"_____no_output_____"
],
[
"from torch.utils.data import DataLoader\n\ntest_dataloader = DataLoader(test_dataset, batch_size=8)",
"_____no_output_____"
],
[
"batch = next(iter(test_dataloader))",
"_____no_output_____"
],
[
"for k,v in batch.items():\n print(k, v.shape)",
"pixel_values torch.Size([8, 3, 384, 384])\nlabels torch.Size([8, 128])\n"
],
[
"from transformers import TrOCRProcessor\n\nprocessor = TrOCRProcessor.from_pretrained(\"microsoft/trocr-base-handwritten\")",
"_____no_output_____"
],
[
"labels = batch[\"labels\"]\nlabels[labels == -100] = processor.tokenizer.pad_token_id\nlabel_str = processor.batch_decode(labels, skip_special_tokens=True)\nlabel_str",
"_____no_output_____"
]
],
[
[
"## Run evaluation",
"_____no_output_____"
]
],
[
[
"from transformers import VisionEncoderDecoderModel\nimport torch\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = VisionEncoderDecoderModel.from_pretrained(\"microsoft/trocr-base-handwritten\")\nmodel.to(device)",
"_____no_output_____"
],
[
"from datasets import load_metric\n\ncer = load_metric(\"cer\")",
"_____no_output_____"
],
[
"from tqdm.notebook import tqdm\n\nprint(\"Running evaluation...\")\n\nfor batch in tqdm(test_dataloader):\n # predict using generate\n pixel_values = batch[\"pixel_values\"].to(device)\n outputs = model.generate(pixel_values)\n\n # decode\n pred_str = processor.batch_decode(outputs, skip_special_tokens=True)\n labels = batch[\"labels\"]\n labels[labels == -100] = processor.tokenizer.pad_token_id\n label_str = processor.batch_decode(labels, skip_special_tokens=True)\n\n # add batch to metric\n cer.add_batch(predictions=pred_str, references=label_str)\n\nfinal_score = cer.compute()",
"Running evaluation...\n"
],
[
"print(\"Character error rate on test set:\", final_score)",
"0.038336078808735505\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0a8f21e2fbabc6a8433c45b62777e92e11489
| 5,737 |
ipynb
|
Jupyter Notebook
|
notebooks/tutorial/4 - Do datascience exploration.ipynb
|
pureskillgg/makenew-pyskill
|
3045f0639506fcaefd3191dada76277598bbb1eb
|
[
"MIT"
] | null | null | null |
notebooks/tutorial/4 - Do datascience exploration.ipynb
|
pureskillgg/makenew-pyskill
|
3045f0639506fcaefd3191dada76277598bbb1eb
|
[
"MIT"
] | null | null | null |
notebooks/tutorial/4 - Do datascience exploration.ipynb
|
pureskillgg/makenew-pyskill
|
3045f0639506fcaefd3191dada76277598bbb1eb
|
[
"MIT"
] | null | null | null | 25.497778 | 336 | 0.57748 |
[
[
[
"## Time to do some data science\n\nBefore creating a tome, we must decide on how to transform our data before concatenating. Therefore, we will explore the data for a single match. \n\nWe will investigate the number of footsteps players make as a function of rank, wins, and friendly commends.\n\nAfter we developed the code that does our data processing, we moved them to functions and put them in `pureskillgg_makenew_pyskill\\tutorial_datascience\\footsteps_example.py` so that we can import them in the next notebook. This avoids code duplication and will let the PureSkill.gg Coach import these functions in the future!\n\n_**Run this notebook as-is.**_",
"_____no_output_____"
]
],
[
[
"from pureskillgg_makenew_pyskill.notebook import setup_notebook",
"_____no_output_____"
],
[
"setup_notebook(silent=True)",
"_____no_output_____"
],
[
"# %load ../usual_suspects.py\n# pylint: disable=unused-import\nimport time\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom pureskillgg_dsdk.tome import create_tome_curator\n\npd.set_option(\"display.max_columns\", 150)\npd.set_option(\"display.max_rows\", 150)\npd.set_option(\"display.min_rows\", 150)\n# pd.set_option('display.float_format', '{:.4f}'.format)\n\ncurator = create_tome_curator()",
"_____no_output_____"
]
],
[
[
"## Read in one match worth of data\n\nThe tome curator also provides a convienent way to grab a random match to do some exploration on. The `get_single_match` method will return the DS Loader for that particular match.\n",
"_____no_output_____"
]
],
[
[
"# Just grab the first match\nmatch_loader = curator.get_match_by_index(0)\n\n# Get the manifest for these data.\nmanifest = match_loader.manifest\n\n# Read in all channels (you can read in a subset if you pass in reading_instructions).\ndata=match_loader.get_channels()",
"_____no_output_____"
]
],
[
[
"## Explore the CSDS\n\nThe CSDS files are rich in data. Feel free to explore them in depth. Here we use the manifest file to see the available channels and how many columns they contain.",
"_____no_output_____"
]
],
[
[
"for channel in manifest['channels']:\n print(channel['channel'], '-', len(channel['columns']), 'columns')",
"_____no_output_____"
]
],
[
[
"## Explore the relevant data and develop the engineering",
"_____no_output_____"
]
],
[
[
"# Inspect player_footstep dataframe\ndata['player_footstep'].head()",
"_____no_output_____"
],
[
"# Count up footsteps per player\ndf_footsteps_total = (\n data['player_footstep']\n .groupby('player_id_fixed', as_index=False)\n .size()\n .rename(columns={'size':'steps'})\n)\ndf_footsteps_total",
"_____no_output_____"
],
[
"# Inspect player_info dataframe\npi = data['player_info']\npi.head()",
"_____no_output_____"
],
[
"# Inspect player_info dataframe\npi_simple = pi[['player_id_fixed', 'commends_friendly', 'wins', 'rank']].groupby('player_id_fixed',as_index=False).max()\npi_simple",
"_____no_output_____"
],
[
"# Get the map name\nmap_name = data['header']['map_name'].iat[0]\nprint(map_name)",
"_____no_output_____"
],
[
"# Combine the data into a final dataframe\ndf_final = pd.merge(df_footsteps_total, pi_simple, how='left', on='player_id_fixed')\ndf_final['map_name'] = map_name\ndf_final",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0b704c184bbd94cc21b799579eb4abcdbadb9
| 1,866 |
ipynb
|
Jupyter Notebook
|
nbs/dl2/00_exports.ipynb
|
brandata/course-v3
|
386150421cbd1fca7eef6fba87a46c61c258afd1
|
[
"Apache-2.0"
] | null | null | null |
nbs/dl2/00_exports.ipynb
|
brandata/course-v3
|
386150421cbd1fca7eef6fba87a46c61c258afd1
|
[
"Apache-2.0"
] | null | null | null |
nbs/dl2/00_exports.ipynb
|
brandata/course-v3
|
386150421cbd1fca7eef6fba87a46c61c258afd1
|
[
"Apache-2.0"
] | null | null | null | 17.277778 | 58 | 0.466238 |
[
[
[
"#export\nTEST = 'test'",
"_____no_output_____"
]
],
[
[
"## Export",
"_____no_output_____"
]
],
[
[
"!python notebook2script.py 00_exports.ipynb",
"Converted 00_exports.ipynb to exp/nb_00.py\r\n"
]
],
[
[
"### How it works:",
"_____no_output_____"
]
],
[
[
"import json\nd = json.load(open('00_exports.ipynb','r'))['cells']",
"_____no_output_____"
],
[
"d[0]",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4ac0bf8be2d3b90eb6c17b3337a6bb1f3fb1d2f1
| 2,099 |
ipynb
|
Jupyter Notebook
|
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Week-2-Quiz.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 331 |
2019-10-22T09:06:28.000Z
|
2022-03-27T13:36:03.000Z
|
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Week-2-Quiz.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 8 |
2020-04-10T07:59:06.000Z
|
2022-02-06T11:36:47.000Z
|
Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-2/Week-2-Quiz.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 572 |
2019-07-28T23:43:35.000Z
|
2022-03-27T22:40:08.000Z
| 20.99 | 154 | 0.540734 |
[
[
[
"#### 1. A user wants to connect to a wireless network at a shopping center. What wireless network setting tells the user the name of the network?",
"_____no_output_____"
],
[
"##### Ans: SSID",
"_____no_output_____"
],
[
"#### 2. Which two statements characterize wireless network security? (Choose two.)",
"_____no_output_____"
],
[
"##### Ans: \n- Using the default IP address on an access point makes hacking easier.\n- With SSID broadcast disabled, an attacker must know the SSID to connect.",
"_____no_output_____"
],
[
"#### 3. What are two types of wired high-speed Internet connections? (Choose two.)",
"_____no_output_____"
],
[
"##### Ans: \n- DSL\n- cable",
"_____no_output_____"
],
[
"#### 4. What can be used to allow visitor mobile devices to connect to a wireless network and restrict access of those devices to only the Internet?",
"_____no_output_____"
],
[
"##### Ans: guest SSID",
"_____no_output_____"
],
[
"#### 5. Which type of device provides an Internet connection through the use of a phone jack?",
"_____no_output_____"
],
[
"##### Ans: DSL modem ",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4ac0c0946bf5ec8a4c0fe677dd9bacc62d438451
| 868,055 |
ipynb
|
Jupyter Notebook
|
LiveModel.ipynb
|
AlexTintin/Face_Recognition_CV_Project
|
6becb159dd3d8f547d617983bd422e3f2a9fb52e
|
[
"MIT"
] | null | null | null |
LiveModel.ipynb
|
AlexTintin/Face_Recognition_CV_Project
|
6becb159dd3d8f547d617983bd422e3f2a9fb52e
|
[
"MIT"
] | null | null | null |
LiveModel.ipynb
|
AlexTintin/Face_Recognition_CV_Project
|
6becb159dd3d8f547d617983bd422e3f2a9fb52e
|
[
"MIT"
] | null | null | null | 1,656.593511 | 185,820 | 0.959499 |
[
[
[
"import argparse\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\n\nimport datasets\nimport models.resnet as ResNet\nimport models.senet as SENet\nfrom liveview import LiveView\nimport utils\n\nconfigurations = {\n 1: dict(\n max_iteration=1000000,\n lr=1.0e-1,\n momentum=0.9,\n weight_decay=0.0,\n gamma=0.1, # \"lr_policy: step\"\n step_size=1000000, # \"lr_policy: step\"\n interval_validate=1000,\n ),\n}\n\ndef get_parameters(model, bias=False):\n for k, m in model._modules.items():\n if k == \"fc\" and isinstance(m, nn.Linear):\n if bias:\n yield m.bias\n else:\n yield m.weight\n\nN_IDENTITY = 8631 # the number of identities in VGGFace2 for which ResNet and SENet are trained\n\nparser = argparse.ArgumentParser(\"PyTorch Face Recognizer\")\nparser.add_argument('--arch_type', type=str, default='resnet50_ft', help='model type',\n choices=['resnet50_ft', 'senet50_ft', 'resnet50_scratch', 'senet50_scratch'])\nparser.add_argument('--log_file', type=str, default='/path/to/log_file', help='log file')\nparser.add_argument('--checkpoint_dir', type=str, default='/path/to/checkpoint_directory',\n help='checkpoints directory')\nparser.add_argument('--feature_dir', type=str, default='/path/to/feature_directory',\n help='directory where extracted features are saved')\nparser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys(),\n help='the number of settings and hyperparameters used in training')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size')\nparser.add_argument('--resume', type=str, default='', help='checkpoint file')\nparser.add_argument('--weight_file', type=str, default='./resnet50_ft_weight.pkl', help='weight file')\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--horizontal_flip', action='store_true',\n help='horizontally flip images specified in test_img_list_file')\n\nos.environ['CUDA_VISIBLE_DEVICES'] = str(0)\ncuda = torch.cuda.is_available()\nif cuda:\n print(\"torch.backends.cudnn.version: {}\".format(torch.backends.cudnn.version()))\n\ntorch.manual_seed(1337)\nif cuda:\n torch.cuda.manual_seed(1337)\n\n# 2. model\ninclude_top = True\nmodel = ResNet.resnet50(num_classes=N_IDENTITY, include_top=include_top)\n\n# print(model)\n\nstart_epoch = 0\nstart_iteration = 0\n\nresume = False\nif resume:\n checkpoint = torch.load(resume)\n model.load_state_dict(checkpoint['model_state_dict'])\n start_epoch = checkpoint['epoch']\n start_iteration = checkpoint['iteration']\n assert checkpoint['arch'] == 'resnet50_ft'\n print(\"Resume from epoch: {}, iteration: {}\".format(start_epoch, start_iteration))\nelse:\n utils.load_state_dict(model, './resnet50_ft_weight.pkl')\n\nif cuda:\n model = model.cuda()\n\n\nprint(\"MODEL LOADED!\")",
"torch.backends.cudnn.version: 7401\nMODEL LOADED!\n"
],
[
"import torch as t\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import models, transforms\n\nnormalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n)\npreprocess = transforms.Compose([\n transforms.ToTensor(),\n normalize\n])\n\ndef FeatureGeneration(model,targets):\n target_features = []\n\n for target in targets:\n batch = [preprocess(perspective).cuda() for perspective in target ]\n batch = t.stack(batch)\n target_features.append(model(batch))\n \n return target_features",
"_____no_output_____"
],
[
"from PIL import Image\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport numpy as np\n%matplotlib inline\n",
"_____no_output_____"
],
[
"shah = np.array(Image.open(\"Data/Dr_Shah.jpg\"))\nshah = shah[:175,25:200]\nplt.imshow(shah)\nprint(shah.shape)",
"(175, 175, 3)\n"
],
[
"shah = Image.fromarray(shah)\nshah = shah.resize((244,244),Image.BILINEAR)\nshah = np.array(shah)\nplt.imshow(shah)\nprint(shah.shape)",
"(244, 244, 3)\n"
],
[
"target = Image.open(\"Data/cvpr2019.jpg\")\ntarget = np.array(target)\nprint(target.shape)\nplt.imshow(target)",
"(698, 1586, 3)\n"
],
[
"ex = target[90:170,730:810]\nplt.imshow(ex)\nprint(ex.shape)",
"(80, 80, 3)\n"
],
[
"# Resizing up, factor of 3. 2100,4800. \ntarget = Image.open(\"Data/cvpr2019.jpg\")\ntarget = target.resize((4800,2100),Image.BILINEAR)\ntarget = np.array(target)\ntarget = target[200:1250,1000:3000]\nprint(target.shape)\nplt.imshow(target)",
"(1050, 2000, 3)\n"
],
[
"feat_gen = nn.Sequential(*list(model.children())[:-4])",
"_____no_output_____"
],
[
"torch.cuda.empty_cache()\n# torch.cuda.empty_cache()",
"_____no_output_____"
],
[
"face_features = FeatureGeneration(feat_gen,[[shah]])[0]\nsearch_features = FeatureGeneration(feat_gen,[[target]])[0]",
"_____no_output_____"
],
[
"output = (F.conv2d(search_features, face_features).squeeze(0)).squeeze(0).detach().cpu().numpy()",
"_____no_output_____"
],
[
"print(output.shape)\nh,w = output.shape\nplt.imshow(output)\nprint(np.amax(output))\npos = np.argmax(output)\nidx = (pos // 220), (pos%220)\npy,px = (pos // 220)/h, (pos%220)/w\nh_0,w_0,c_0 = target.shape\ntr_y,tr_x = int(h_0 * py), int(w_0 * px)\nprint(idx,py,px)\nprint(tr_y,tr_x)",
"(101, 220)\n442557.8\n(8, 153) 0.07920792079207921 0.6954545454545454\n83 1390\n"
],
[
"for w in range(5):\n for x in range(244):\n target[tr_y-w ,tr_x-x ] = [255,0,0]\n target[tr_y+244+w,tr_x-x ] = [255,0,0] \n for y in range(244):\n target[tr_y+y ,tr_x+w ] = [255,0,0]\n target[tr_y+y ,tr_x-244-w] = [255,0,0] \nplt.imshow(target)",
"_____no_output_____"
],
[
"ex = target[tr_y:tr_y+244,tr_x-244:tr_x]\nplt.imshow(ex)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0c0eeb4f1419c2c212a0929791d1692dc0be0
| 13,756 |
ipynb
|
Jupyter Notebook
|
examples/greenformer_factorize_bert.ipynb
|
SamuelCahyawijaya/greenformer
|
154ca1faf274e7551ee7dec2953a8be0ceb5058f
|
[
"MIT"
] | 3 |
2021-09-17T00:28:23.000Z
|
2022-03-17T23:28:41.000Z
|
examples/greenformer_factorize_bert.ipynb
|
SamuelCahyawijaya/greenformer
|
154ca1faf274e7551ee7dec2953a8be0ceb5058f
|
[
"MIT"
] | null | null | null |
examples/greenformer_factorize_bert.ipynb
|
SamuelCahyawijaya/greenformer
|
154ca1faf274e7551ee7dec2953a8be0ceb5058f
|
[
"MIT"
] | null | null | null | 20.748115 | 402 | 0.509014 |
[
[
[
"import os, sys\nimport torch\nfrom transformers import BertModel, BertConfig\nfrom greenformer import auto_fact\nfrom itertools import chain\n\nfrom os import path\nimport sys",
"_____no_output_____"
],
[
"def count_param(module, trainable=False):\n if trainable:\n return sum(p.numel() for p in module.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in module.parameters())",
"_____no_output_____"
]
],
[
[
"# Init Model",
"_____no_output_____"
]
],
[
[
"config = BertConfig.from_pretrained('bert-base-uncased')\nmodel = BertModel(config=config)\nmodel = BertModel.from_pretrained('bert-base-uncased')",
"Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertModel: ['cls.predictions.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.weight', 'cls.predictions.decoder.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.weight']\n- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
],
[
"count_param(model)",
"_____no_output_____"
]
],
[
[
"# Factorize Model",
"_____no_output_____"
],
[
"### Apply absolute rank",
"_____no_output_____"
]
],
[
[
"%%time\nfact_model = auto_fact(model, rank=256, deepcopy=True, solver='random', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 947 ms, sys: 512 ms, total: 1.46 s\nWall time: 401 ms\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=256, deepcopy=True, solver='svd', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 5min 57s, sys: 7.44 s, total: 6min 4s\nWall time: 23.7 s\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=256, deepcopy=True, solver='snmf', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 5min 33s, sys: 17 s, total: 5min 50s\nWall time: 23.6 s\n"
]
],
[
[
"### Apply percentage rank",
"_____no_output_____"
]
],
[
[
"%%time\nfact_model = auto_fact(model, rank=0.4, deepcopy=True, solver='random', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 1.51 s, sys: 521 ms, total: 2.03 s\nWall time: 428 ms\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=0.4, deepcopy=True, solver='svd', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 4min 33s, sys: 3.34 s, total: 4min 36s\nWall time: 17.7 s\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=0.4, deepcopy=True, solver='snmf', num_iter=20)\ncount_param(fact_model)",
"CPU times: user 5min 9s, sys: 14.1 s, total: 5min 23s\nWall time: 21.5 s\n"
]
],
[
[
"### Apply factorization only on specific modules",
"_____no_output_____"
]
],
[
[
"# Only factorize last 6 transformer layers and the pooler layer of the model\nfactorizable_submodules = list(model.encoder.layer[6:]) + [model.pooler]",
"_____no_output_____"
],
[
"%%time\nfact_model = auto_fact(model, rank=0.2, deepcopy=True, solver='random', num_iter=20, submodules=factorizable_submodules)\ncount_param(fact_model)",
"CPU times: user 1.01 s, sys: 388 ms, total: 1.39 s\nWall time: 197 ms\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=0.2, deepcopy=True, solver='svd', num_iter=20, submodules=factorizable_submodules)\ncount_param(fact_model)",
"CPU times: user 1min 16s, sys: 1.04 s, total: 1min 18s\nWall time: 5 s\n"
],
[
"%%time\nfact_model = auto_fact(model, rank=0.2, deepcopy=True, solver='snmf', num_iter=20, submodules=factorizable_submodules)\ncount_param(fact_model)",
"CPU times: user 1min 55s, sys: 2.34 s, total: 1min 57s\nWall time: 7.72 s\n"
]
],
[
[
"# Speed test on CPU",
"_____no_output_____"
],
[
"### Test Inference CPU",
"_____no_output_____"
]
],
[
[
"%%timeit\nwith torch.no_grad():\n y = model(torch.zeros(32,256, dtype=torch.long))",
"4.15 s ± 31.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"%%timeit\nwith torch.no_grad():\n y = fact_model(torch.zeros(32,256, dtype=torch.long))",
"3.01 s ± 37.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
],
[
[
"### Test Forward-Backward CPU",
"_____no_output_____"
]
],
[
[
"%%timeit\ny = model(torch.zeros(8,256, dtype=torch.long))\ny.last_hidden_state.sum().backward()",
"3.33 s ± 158 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"%%timeit\ny = fact_model(torch.zeros(8,256, dtype=torch.long))\ny.last_hidden_state.sum().backward()",
"2.41 s ± 144 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
],
[
[
"# Speed test on GPU",
"_____no_output_____"
],
[
"### Move models to GPU",
"_____no_output_____"
]
],
[
[
"model = model.cuda()\nfact_model = fact_model.cuda()",
"_____no_output_____"
]
],
[
[
"### Test Inference GPU",
"_____no_output_____"
]
],
[
[
"x = torch.zeros(64,256, dtype=torch.long).cuda()",
"_____no_output_____"
],
[
"%%timeit\nwith torch.no_grad():\n y = model(x)",
"785 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
],
[
"%%timeit\nwith torch.no_grad():\n y = fact_model(x)",
"683 ms ± 123 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
],
[
[
"### Test Forward-Backward GPU",
"_____no_output_____"
]
],
[
[
"x = torch.zeros(16,256, dtype=torch.long).cuda()",
"_____no_output_____"
],
[
"%%timeit\ny = model(x)\ny.last_hidden_state.sum().backward()",
"672 ms ± 15.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"%%timeit\ny = fact_model(x)\ny.last_hidden_state.sum().backward()",
"518 ms ± 14 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4ac0cb3b34c4dee88a00770172d41d64fe371c7c
| 3,215 |
ipynb
|
Jupyter Notebook
|
GeoJSON.ipynb
|
nhorlock/XeusClingTalk
|
44ef939920c0781fb285167932098e02d6bf4934
|
[
"MIT"
] | 4 |
2019-11-29T07:48:57.000Z
|
2021-01-16T13:46:25.000Z
|
GeoJSON.ipynb
|
carlosal1015/XeusClingTalk
|
44ef939920c0781fb285167932098e02d6bf4934
|
[
"MIT"
] | null | null | null |
GeoJSON.ipynb
|
carlosal1015/XeusClingTalk
|
44ef939920c0781fb285167932098e02d6bf4934
|
[
"MIT"
] | 1 |
2019-12-02T18:21:25.000Z
|
2019-12-02T18:21:25.000Z
| 19.603659 | 95 | 0.48958 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4ac0cc100728de8a85df596e8b49fc46a557bfdb
| 272,713 |
ipynb
|
Jupyter Notebook
|
90_workshops/202011_ac_training_school/1_overview_ac_satellite_data/05_sentinel3_NRT_SLSTR_AOD_load_browse.ipynb
|
trivedi-c/atm_Practical3
|
1cd1e0bd263d274cada781d871ad37314ebe139e
|
[
"MIT"
] | null | null | null |
90_workshops/202011_ac_training_school/1_overview_ac_satellite_data/05_sentinel3_NRT_SLSTR_AOD_load_browse.ipynb
|
trivedi-c/atm_Practical3
|
1cd1e0bd263d274cada781d871ad37314ebe139e
|
[
"MIT"
] | null | null | null |
90_workshops/202011_ac_training_school/1_overview_ac_satellite_data/05_sentinel3_NRT_SLSTR_AOD_load_browse.ipynb
|
trivedi-c/atm_Practical3
|
1cd1e0bd263d274cada781d871ad37314ebe139e
|
[
"MIT"
] | 1 |
2021-10-30T00:54:07.000Z
|
2021-10-30T00:54:07.000Z
| 192.45801 | 139,580 | 0.776127 |
[
[
[
"<img src='./img/EU-Copernicus-EUM_3Logos.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='40%'></img>",
"_____no_output_____"
],
[
"<br>",
"_____no_output_____"
],
[
"<a href=\"./00_index.ipynb\"><< Index </a><br>\n<a href=\"./04_sentinel3_NRT_SLSTR_FRP_load_browse.ipynb\"><< 04 - Sentinel-3 NRT SLSTR FRP - Load and browse </a><span style=\"float:right;\"><a href=\"./06_IASI_L2_load_browse.ipynb\">06 - IASI Level 2 - Load and browse >></a></span>",
"_____no_output_____"
],
[
"<br>",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-warning\">\n<b>LOAD, BROWSE AND VISUALIZE</b></div>",
"_____no_output_____"
],
[
"# Sentinel-3 Near Real Time SLSTR Aerosol Optical Depth (AOD)",
"_____no_output_____"
],
[
"The [Copernicus Sentinel-3 Near Real Time Aerosol Optical Depth (AOD)](https://www.eumetsat.int/website/home/News/DAT_5150095.html) product quantifies the abundance of all aerosol particles suspended in the air and monitors their global distribution and long-range transport, at the scale of 9.5 x 9.5 km2. \n\nIt is only applicable during daytime. The current version of the NRT S3 AOD product is considered as 'preliminary operational' over ocean surfaces, and 'demonstrational' over land surfaces. It is only applicable during daytime\n\nAll these observations are made available in less than three hours from the SLSTR observation sensing time.\n\nThe following workflow is based on an example of `Sentinel-3 Near Real Time SLSTR AOD` data on 1 October 2020. As a comparison, you see below the Sentinel-3 OLCI Red Green Blue composites for the same day, which clearly shows the smoke plumes along the Californian coast resulting from the fires.\n ",
"_____no_output_____"
],
[
"<br>",
"_____no_output_____"
],
[
"\n<div style='text-align:center;'>\n<figure><img src='./img/s3_olci_1203.png' width='80%'/>\n <figcaption><i>RGB composites of Sentinel-OLCI Level 1 data on 3 December 2019</i></figcaption>\n</figure>\n</div>\n ",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"### Outline\n \n \n* [Example: Australian Fires - December 2019](#australian_fires)\n * [1 - Load Sentinel-3 SLSTR AOD data](#load_cal)\n * [2 - Extract AOD variables](#extract_cal)\n * [3 - Visualize AOD Ocean and AOD land information](#visualize_cal)",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"#### Load required libraries",
"_____no_output_____"
]
],
[
[
"import xarray as xr\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.pyplot as pltfacebook\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER",
"_____no_output_____"
]
],
[
[
"<hr>",
"_____no_output_____"
],
[
"# <a id='australian_fires'></a>Example: Australian fires in December 2019",
"_____no_output_____"
],
[
"## <a id='load'></a>Load Sentinel-3 SLSTR AOD data",
"_____no_output_____"
],
[
"The Near-Real-Time Sentinel-3 Aerosol Optical Depth data are disseminated in `netCDF`. `netCDF` data can be loaded with the Python library [xarray](http://xarray.pydata.org/en/stable/) and its function `xr.open_dataset()`. \n\nYou see that the data file contains two `dimensions`:\n* `columns` and\n* `rows`.\n\nIt further contains an long list of `data variables`, including:\n* `AOD_550`,\n* `AOD_550_uncertainty`,\n* `AOD_550_Ocean_NonFiltered`,\n* `AOD_550_Land_Experimental_PostFiltered`,\n...\n\nA data file also contains a set of `attributes`, which give you more information about the data file and the data it contains, e.g the `start_time` and `stop_time` or the `product_name`.",
"_____no_output_____"
]
],
[
[
"file = xr.open_dataset('../eodata/sentinel3/slstr/2019/12/03/AOD_Australia_20191203.nc')\nfile",
"_____no_output_____"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"### <a id='extract'></a>Extract Aerosol Optical Depth variables",
"_____no_output_____"
],
[
"The next step is to extract the variables of interest. Let us select the following two variables:\n* `AOD_550`: it is the Aerosol Optical Depth at 550nm. (*Note: it only covers ocean surfaces.*)\n* `AOD_550_Land_Experimental_PostFiltered`: it is the Aerosol Optical Depth at 550nm. (*Note: it only covers land surfaces.*)\n\nBoth `DataArrays` have two dimensions (`rows` and `columns`) and the following attributes, which provide additional information about the variables:\n* `long_name`\n* `standard_name`\n* `valid_min`\n* `valid_max`\n* `coordinates`",
"_____no_output_____"
]
],
[
[
"aod_ocean = file.AOD_550\naod_land = file.AOD_550_Land_Experimental_PostFiltered\nprint(aod_ocean)\nprint(' ')\nprint(aod_land)",
"<xarray.DataArray 'AOD_550' (rows: 126, columns: 157)>\narray([[nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n ...,\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan]], dtype=float32)\nDimensions without coordinates: rows, columns\nAttributes:\n long_name: Aerosol optical thickness at 550 nm - Best quality (post-...\n standard_name: atmosphere_optical_thickness_due_to_ambient_aerosol\n valid_min: 0.0\n valid_max: 4.001\n coordinates: latitude, longitude\n \n<xarray.DataArray 'AOD_550_Land_Experimental_PostFiltered' (rows: 126, columns: 157)>\narray([[nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n ...,\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan],\n [nan, nan, nan, ..., nan, nan, nan]], dtype=float32)\nDimensions without coordinates: rows, columns\nAttributes:\n long_name: Aerosol optical thickness at 550 nm - Only over land surf...\n standard_name: atmosphere_optical_thickness_due_to_ambient_aerosol\n valid_min: 0.0\n valid_max: 4.001\n coordinates: latitude, longitude\n"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"You can also load `latitude` and `longitude` information, which can be used later for visualizing the variables.",
"_____no_output_____"
]
],
[
[
"lat_nc = file.latitude\nlon_nc = file.longitude\n\nlat_nc, lon_nc",
"_____no_output_____"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"### <a id='visualize'></a> Visualize AOD Ocean and AOD Land variables",
"_____no_output_____"
],
[
"The final step is to visualize both variables, Aerosol Optical Depth over ocean and land together in one plot. You can use matplotlib's function `pcolormesh` for it.\n\nLet us define a visualisation function called [visualize_pcolormesh_aod](./functions.ipynb#visualize_pcolormesh_aod) which visualizes both AOD variables together onto a map. The function takes the following keyword arguments (kwargs):\n* `aod_ocean`: DataArray with AOD values over ocean\n* `aod_land`: DataArray with AOD values over land\n* `latitude`: DataArray with latitude information\n* `longitude`: DataArray with longitude information\n* `title`: Title of the plot\n* `unit`: Unit of AOD\n* `vmin` and `vmax`: Minimum and maximum values to be displayed on the map\n* `color_scale`: Color scale the data shall be represented\n* `projection`: Projection of the map",
"_____no_output_____"
]
],
[
[
"def visualize_pcolormesh_aod(aod_ocean, aod_land, latitude, longitude, title, unit, vmin, vmax, color_scale, projection):\n fig=plt.figure(figsize=(12, 12))\n\n ax=plt.axes(projection=projection)\n ax.coastlines(linewidth=1.5, linestyle='solid', color='k', zorder=10)\n\n gl = ax.gridlines(draw_labels=True, linestyle='--')\n gl.top_lables=False\n gl.right_labels=False\n gl.xformatter=LONGITUDE_FORMATTER\n gl.yformatter=LATITUDE_FORMATTER\n gl.xlabel_style={'size':12}\n gl.ylabel_style={'size':12}\n\n\n img1 = plt.pcolormesh(longitude, latitude, aod_ocean, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=color_scale)\n img2 = plt.pcolormesh(longitude, latitude, aod_land, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=color_scale)\n ax.set_title(title, fontsize=20, pad=20.0)\n\n cbar = fig.colorbar(img1, ax=ax, orientation='vertical', fraction=0.04, pad=0.05)\n cbar.set_label(unit, fontsize=16)\n cbar.ax.tick_params(labelsize=14)\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"Now, let us apply the function [visualize_pcolormesh](./functions.ipynb#visualize_pcolormesh) to visualize both variables, AOD Ocean and AOD Land.",
"_____no_output_____"
]
],
[
[
"visualize_pcolormesh_aod(aod_ocean, \n aod_land, \n lat_nc, lon_nc, \n 'Aerosol Optical Thickness at 550 nm', \n '~', \n 0.,\n 1.0,\n cm.RdYlBu_r,\n ccrs.Mercator())",
"_____no_output_____"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"<br>",
"_____no_output_____"
],
[
"<a href=\"./00_index.ipynb\"><< Index </a><br>\n<a href=\"./04_sentinel3_NRT_SLSTR_FRP_load_browse.ipynb\"><< 04 - Sentinel-3 NRT SLSTR FRP - Load and browse </a><span style=\"float:right;\"><a href=\"./06_IASI_L2_load_browse.ipynb\">06 - IASI Level 2 - Load and browse >></a></span>",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"<img src='./img/copernicus_logo.png' alt='Logo EU Copernicus' align='right' width='20%'><br><br><br><br>\n\n<p style=\"text-align:right;\">This project is licensed under the <a href=\"./LICENSE\">MIT License</a> and is developed under a Copernicus contract.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4ac0d61ffc5d294cf66318d0cd46053c2775928b
| 298,824 |
ipynb
|
Jupyter Notebook
|
docs/projects/estimating-keane-and-wolpin-1997-msm.ipynb
|
MaxBlesch/respy
|
ddde5dec1dc650684d4d601e74d6a4f96d0f80ed
|
[
"MIT"
] | null | null | null |
docs/projects/estimating-keane-and-wolpin-1997-msm.ipynb
|
MaxBlesch/respy
|
ddde5dec1dc650684d4d601e74d6a4f96d0f80ed
|
[
"MIT"
] | null | null | null |
docs/projects/estimating-keane-and-wolpin-1997-msm.ipynb
|
MaxBlesch/respy
|
ddde5dec1dc650684d4d601e74d6a4f96d0f80ed
|
[
"MIT"
] | null | null | null | 384.586873 | 52,672 | 0.93614 |
[
[
[
"# Keane and Wolpin (1997)\n**Parameter Estimation via the Method of Simulated Moments (MSM)**",
"_____no_output_____"
],
[
"In their seminal paper on the career decisions of young men, Keane and Wolpin (1997) estimate a life-cycle model for occupational choice based on NLSY data for young white men. The paper contains a basic and an extended specification of the model. Both models allow for five choice alternatives in each period: white collar sector work, blue collar sector work, military work, school, and staying home. Choice options come with pecuniary and/or non-pecuniary rewards. Agents are assumed to be forward-looking and act under uncertainty because of the occurrence of alternative-specific shocks that affect the current reward of alternatives and only become known to individuals in the period they occur in. Individuals thus form expectations about future shocks and in each period choose the option that maximizes the expected present value of current and future lifetime rewards.\n\nThe extended model compared to the base specification expands the model by introducing more complex skill technology functions that for example allow for skill depreciation and age effects, job mobility and search costs, non-pecuniary rewards for work, re-entry costs for school, and some common returns for school.",
"_____no_output_____"
]
],
[
[
"<div class=\"d-flex flex-row gs-torefguide\">\n <span class=\"badge badge-info\">Explanations</span>\n\n For extensive model description see <a\n href=\"../explanations/economic_model.html.\">The Economic Model</a>.\n</div>",
"_____no_output_____"
]
],
[
[
"`respy` is able to solve, simulate, and estimate both model specifications. Within `respy`, they are referred to as `kw_97_basic` and `kw_97_extended`. However, using the parameters from the paper, `respy` returns life-cycle patterns that differ from the ones presented in the paper, prompting us to re-estimate them using the Method of Simulated Moments (MSM). The model specification can be loaded using the function `get_example_model` as demonstrated below. The returned parameter vector contains the estimated parameters from the paper and the returned DataFrame contains the 'observed' NLSY data.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport respy as rp\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"params_basic_kw, options, data_obs = rp.get_example_model(\"kw_97_basic\")",
"_____no_output_____"
]
],
[
[
"## Choice patterns and Rewards for Parameters in `kw_97_basic`\n\nTo investigate the parameter specification presented for the basic model in Keane and Wolpin (1997), we will look at the choice frequencies in each period and compare them to the observed data. While the NLSY data is only observed for the first 11 years, the models can be used to predict choices over the entire work life of agents. The standard time horizon in `kw_97_basic` is 50 periods since Keane and Wolpin (1997) fix the terminal age to 65 with individuals entering the sample at age 15. We will thus inspect how well the model generated by the parameters can fit the observed data, as well as the predictions it makes for the rest of the life-cycle.",
"_____no_output_____"
],
[
"### Choice Patterns\n\nAs a first step, we will look at the choices of agents over time. To do this, we can simulate data based on the parameters from `kw_97_basic` and compute the choice frequencies in each period. We then plot them against the observed choices.",
"_____no_output_____"
]
],
[
[
"simulate = rp.get_simulate_func(params_basic_kw, options)",
"_____no_output_____"
],
[
"data_sim_kw = simulate(params_basic_kw)",
"_____no_output_____"
],
[
"def calc_choice_frequencies(df):\n \"\"\"Compute choice frequencies.\"\"\"\n return df.groupby(\"Period\").Choice.value_counts(normalize=True).unstack()",
"_____no_output_____"
],
[
"choices_obs = calc_choice_frequencies(data_obs)",
"_____no_output_____"
],
[
"choices_obs = calc_choice_frequencies(data_obs)\nchoices_kw = calc_choice_frequencies(data_sim_kw)",
"_____no_output_____"
],
[
"def plot_moments(moments_obs, moments_sim, labels, colors):\n \"\"\"Plot moments.\"\"\"\n plt.figure(figsize=(14, 4))\n for i, (label, color) in enumerate(zip(labels, colors)):\n plt.subplot(1, 5, i + 1)\n plt.tight_layout()\n plt.title(label.capitalize())\n plt.xlabel(\"Period\")\n plt.plot(moments_sim[label], color=color)\n plt.plot(moments_obs[label], color=\"black\", linestyle=\"dashed\")\n plt.ylim(0, 1)\n plt.xlim(0, 50)",
"_____no_output_____"
],
[
"choices = [\"blue_collar\", \"white_collar\", \"school\", \"home\", \"military\"]\ncolors = [\"tab:blue\", \"tab:orange\", \"tab:green\", \"tab:red\", \"tab:purple\"]",
"_____no_output_____"
]
],
[
[
"The plots below show the choice frequencies of individuals for the five different choice alternatives. The colored lines represent the simulated dataset while the black dotted lines show the choices observed in the NLSY data. The simulated data does not seem to fit the observed data very well. The percentage of individuals choosing the white collar occupation is too high while all other choices are very underrepresented in the simulated data. ",
"_____no_output_____"
]
],
[
[
"plot_moments(\n moments_obs=choices_obs, moments_sim=choices_kw, labels=choices, colors=colors,\n)",
"_____no_output_____"
]
],
[
[
"### Experience-Wage Profiles over the Life-Cycle \n\nAs a next step, we will inspect the experience-wage profiles suggested by the model. The function below computes the wages of a skill **type 0** individual (skill endowment types are a source of heterogeneity between individuals in the model) with **10 years of schooling** for the given wage parameters if they enter an occupation in period 0 and stay in that occupation for their entire life-cycle.",
"_____no_output_____"
]
],
[
[
"def get_experience_profile(params, options, occupation):\n\n # To fix ideas we look at a Type 0 individual with 10 years of schooling\n # who immediately starts to work in the labor market.\n covars = [1, 10, 0, 0, 0, 0, 0, 0, 0]\n\n wages = list()\n for period in range(options[\"n_periods\"]):\n if occupation == \"blue_collar\":\n covars[3] = period\n covars[4] = period ** 2 / 100\n elif occupation == \"white_collar\":\n covars[2] = period\n covars[3] = period ** 2 / 100\n\n wage = np.exp(np.dot(covars, params.loc[f\"wage_{occupation}\", \"value\"]))\n wages.append(wage)\n\n return wages",
"_____no_output_____"
],
[
"def plot_experience_profiles(params, options):\n\n colors = [\"tab:blue\", \"tab:orange\"]\n occupations = [\"blue_collar\", \"white_collar\"]\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 4))\n for i, (label, color) in enumerate(zip(occupations, colors)):\n wage_profile = get_experience_profile(params, options, label)\n ax[i].plot(range(options[\"n_periods\"]), wage_profile, color=color)\n ax[i].set_xlabel(\"Experience in Periods\")\n ax[i].set_ylabel(\"Wages\")\n ax[i].set_title(label)\n plt.tight_layout()",
"_____no_output_____"
]
],
[
[
"We can then plot the wage-experience profiles of the three occupations. The wage profiles do not seem very realistic, as especially the white collar occupation sees unlimited wage growth into the millions. The curve is missing the characteristic flattening (slowed and sometimes even negative wage growth) in later stages of life that is well documented in the life-cycle wage literature (Heckman et al., 2006).\n\nThe military option is purposely left out in this plot since the low number of observations in the NLSY data for this occupation does not allow for the construction of an appropriate experience-wage profile.",
"_____no_output_____"
]
],
[
[
"plot_experience_profiles(params_basic_kw, options)",
"_____no_output_____"
]
],
[
[
"Since this flattening characteristic is controlled by the exponential term in the wage equation, we can see if adjusting this parameter improves the wage profile for the white collar occupation. The parameter specification in the paper gives this parameter a value of -0.0461. We will choose a smaller value in an attempt to flatten the curve in later periods.",
"_____no_output_____"
]
],
[
[
"params_new = params_basic_kw.copy()\nparams_new.loc[(\"wage_white_collar\", \"exp_white_collar_square\"), \"value\"] = -0.15",
"_____no_output_____"
]
],
[
[
"As the plots below show, the new value for `(wage_white_collar, exp_white_collar_square)` produces a more realistic wage profile and wage for the white collar occupation:",
"_____no_output_____"
]
],
[
[
"plot_experience_profiles(params_new, options)",
"_____no_output_____"
]
],
[
[
"## Estimation of the Basic Model\n\nSince there seems to be a possibility of improvements, we attempt to estimate the parameters via MSM to improve the fit. The estimation setup for MSM follows the pattern already established in other articles of this documentation. For the estimation we use moments that capture the choice frequencies for each period and mean wages as well as their standard deviation. The weighting matrix used is a diagonal inverse variance weighting matrix. Interested readers can refer to the guides below for more information on MSM estimation with `respy`.",
"_____no_output_____"
]
],
[
[
"<div class=\"d-flex flex-row gs-torefguide\">\n <span class=\"badge badge-info\">How-to-Guide</span>\n\n For extensive model description see <a\n href=\"../how_to_guides/msm.html\">How to Specify a Criterion Function for MSM</a>.\n</div>",
"_____no_output_____"
],
[
"<div class=\"d-flex flex-row gs-torefguide\">\n <span class=\"badge badge-info\">How-to-Guide</span>\n\n Find out more about estimation in <a\n href=\"../how_to_guides/msm_estimation_exercise.html\">How to Estimate Model Parameters with MSM</a>.\n</div>",
"_____no_output_____"
]
],
[
[
"### Choice Patterns\n\nWe will first investigate the choice patterns of individuals over the 11 observed periods and the predicted choices in later periods. The plot below shows the choice frequencies for the observed data and simulated data for the specification in Keane and Wolpin (1997) and our estimates respectively in a stacked area plot. The newly estimated parameters are named with the suffix `_respy`.",
"_____no_output_____"
]
],
[
[
"params_basic_respy, _, _ = rp.get_example_model(\"kw_97_basic_respy\")",
"_____no_output_____"
],
[
"data_sim_new = simulate(params_basic_respy)",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))\ncalc_choice_frequencies(data_sim_kw)[choices].plot(\n kind=\"area\",\n stacked=True,\n ax=axes[0],\n xlim=[0, 11],\n title=\"Simulated Choices KW 97\",\n linewidth=0.1,\n)\ncalc_choice_frequencies(data_obs)[choices].plot(\n kind=\"area\", stacked=True, ax=axes[1], title=\"Observed Choices\", linewidth=0.1\n)\ncalc_choice_frequencies(data_sim_new)[choices].plot(\n kind=\"area\",\n stacked=True,\n ax=axes[2],\n xlim=[0, 11],\n title=\"Simulated Choices New Estimates\",\n linewidth=0.1,\n)",
"_____no_output_____"
]
],
[
[
"Plotting the choices separately against their observed counterpart also reveals a much better fit.",
"_____no_output_____"
]
],
[
[
"choices_new = calc_choice_frequencies(data_sim_new)",
"_____no_output_____"
],
[
"plot_moments(\n moments_obs=choices_obs, moments_sim=choices_new, labels=choices, colors=colors,\n)",
"_____no_output_____"
]
],
[
[
"### Experience-Wage Profiles\n\nThe wage profiles have attained a more realistic shape although the earned wages in later periods are still unreasonably high. These problems in wage growth are similar to the ones shown by Keane and Wolpin (1997) for the basic specification and give way to the expanded model, which promises a more reasonable development of life-cycle wages.",
"_____no_output_____"
]
],
[
[
"plot_experience_profiles(params_new, options)",
"_____no_output_____"
]
],
[
[
"## Estimation of the Extended Model\n\nIn addition to the basic model parameters, we also re-estimate the extended model specified in Keane and Wolpin (1997). Since the parameter space for this model is much larger than the basic specification, we expand the number of moments used for estimation. The new sets of moments used are conditional on the period and initial level of schooling of individuals. Specifically, we compute the choice frequencies and wage statistics for two initial schooling groups: those with up to 9 years of schooling at age 16 and those with 10 years or more. Furthermore, the moments for the wage distribution are expanded to include the median and 25% as well as 75% percentile for each initial schooling group in each period. \n\nThe plots below show the choice frequencies for the parameters from the paper and the newly estimated parameters. While the fit seems much better for the newly estimated parameters, the choice patterns they suggest over the life-cycle in some cases are a bit more extreme than the ones presented in Keane and Wolpin (1997), especially for the blue and white collar occupations.\n\nThis is not necessarily surprising as the model is fit on only 11 years of data, while the extrapolation is applied to 50 periods. Multiple other parameter estimates (not shown here) with a similar within-sample fit predict very different choice patterns over the life-cycle. **The parameters presented here should thus be used and interpreted with caution.**",
"_____no_output_____"
]
],
[
[
"params_extended_kw, options_extended, _ = rp.get_example_model(\"kw_97_extended\")",
"_____no_output_____"
],
[
"simulate_extended = rp.get_simulate_func(params_extended_kw, options_extended)",
"_____no_output_____"
],
[
"data_sim_extended_kw = simulate_extended(params_extended_kw)",
"_____no_output_____"
]
],
[
[
"### Choice Frequencies for Extended Parametrization from Keane and Wolpin (1997)",
"_____no_output_____"
]
],
[
[
"choices_extended_kw = calc_choice_frequencies(data_sim_extended_kw)\nplot_moments(\n moments_obs=choices_obs, moments_sim=choices_extended_kw, labels=choices, colors=colors,\n)",
"_____no_output_____"
]
],
[
[
"### Choice Frequencies for Estimated Extended Parameters",
"_____no_output_____"
]
],
[
[
"params_extended_respy, _, _ = rp.get_example_model(\"kw_97_extended_respy\")",
"_____no_output_____"
],
[
"data_sim_extended_respy = simulate_extended(params_extended_respy)",
"_____no_output_____"
],
[
"choices_extended_respy = calc_choice_frequencies(data_sim_extended_respy)\nplot_moments(\n moments_obs=choices_obs, moments_sim=choices_extended_respy, labels=choices, colors=colors,\n)\n",
"_____no_output_____"
]
],
[
[
"## References",
"_____no_output_____"
],
[
"- Heckman, J. J., Lochner, L. J., & Todd, P. E. (2006). [Earnings functions, rates of return and treatment effects: The Mincer equation and beyond](https://www.sciencedirect.com/science/article/pii/S1574069206010075). In Hanushek, E. & Welch, F., editors, *Handbook of the Economics of Education*, volume 1, pages 307–458. Elsevier Science, Amsterdam, Netherlands.\n\n- Keane, M. P. and Wolpin, K. I. (1997). [The Career Decisions of Young Men](https://www.jstor.org/stable/10.1086/262080?seq=1). *Journal of Political Economy*, 105(3): 473-522.",
"_____no_output_____"
]
]
] |
[
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"raw",
"raw"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4ac0ddd020eff0158d74bf763f9843a286875fc1
| 16,569 |
ipynb
|
Jupyter Notebook
|
cvc-hotel-price-webscraper.ipynb
|
thenielfarias/cvc-price-webscraper
|
9cab2db8e36bd818928e293859786b8157e8f31d
|
[
"MIT"
] | null | null | null |
cvc-hotel-price-webscraper.ipynb
|
thenielfarias/cvc-price-webscraper
|
9cab2db8e36bd818928e293859786b8157e8f31d
|
[
"MIT"
] | null | null | null |
cvc-hotel-price-webscraper.ipynb
|
thenielfarias/cvc-price-webscraper
|
9cab2db8e36bd818928e293859786b8157e8f31d
|
[
"MIT"
] | null | null | null | 51.778125 | 2,397 | 0.545356 |
[
[
[
"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import *\nimport pandas as pd\nfrom time import sleep\nimport os\n\n\nnome_hoteis = []\npreco_hoteis = []\n\nclass ScrappyCvc:\n\n def iniciar(self):\n self.raspagem_de_dados()\n\n def raspagem_de_dados(self):\n \n checkin = \"2021-11-15\"\n checkout = \"2021-11-16\"\n destinoId = \"6162\"\n occ = \"2\"\n chrome_options = Options()\n chrome_options.add_experimental_option(\n 'excludeSwitches', ['enable-logging'])\n chrome_options.add_argument('--lang=pt-BR')\n chrome_options.add_argument('--disable-notifications')\n self.driver = webdriver.Chrome(options=chrome_options)\n self.driver.set_window_size(800, 700)\n self.link = f'https://www.cvc.com.br/hotel/search?CheckIn={checkin}&CheckOut={checkout}&Location=%20-%20%20,%20Brasil&ZoneId={destinoId}&Rooms=1&Adults={occ}&Children=0&ChildAges=;&City=&State=&Country=Brasil&Name='\n self.lista_nome_hoteis = []\n self.lista_preco_hoteis = []\n self.driver.get(self.link)\n sleep(2)\n \n last_height = self.driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n sleep(2)\n new_height = self.driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n lista_length = self.driver.find_elements(By.CLASS_NAME, 'buttonDetailPayments')\n lista_max = len(lista_length)\n \n for p in range(1):\n item = 1\n for i in range(lista_max):\n c = 1\n while c < lista_max:\n try:\n lista_nomes = self.driver.find_elements(By.XPATH, f'/html/body/div[1]/div[2]/div/div/div[2]/div/div[{item}]/div/div[2]/div/div[1]/h2')\n self.lista_nome_hoteis.append(lista_nomes[0].text)\n sleep(1)\n lista_precos = self.driver.find_elements(By.XPATH, f'/html/body/div[1]/div[2]/div/div/div[2]/div/div[{item}]/div/div[3]/div/div[1]/div[1]/div[2]')\n self.lista_preco_hoteis.append(lista_precos[0].text) \n sleep(1)\n item += 1\n except:\n c += 1\n item += 1 \n \n print(f'\\u001b[32m{\"Resultados:\"}\\u001b[0m')\n print(self.lista_nome_hoteis)\n print(self.lista_preco_hoteis)\n \n for nome in self.lista_nome_hoteis:\n nome_hoteis.append(nome)\n for preco in self.lista_preco_hoteis:\n preco_hoteis.append(preco)\n \nstart = ScrappyCvc()\nstart.iniciar()",
"\u001b[32mResultados:\u001b[0m\n['Hotel Granville', 'Hotel Tibagi', 'Caravelle Palace Hotel', 'Hotel Pinheirinho', 'Golden Park Curitiba', 'Master Express Curitiba', 'Ibis Budget Curitiba Centro', 'Bristol Portal Do Iguaçu', 'Bristol Upper', 'Ibis Curitiba Batel', 'Ibis Styles Curitiba Batel', 'Bristol Metropolitan', 'San Juan Executive Curitiba', 'Dan Inn Curitiba', 'Pousada Betânia', 'Alta Reggia Plaza Hotel', 'Ibis Curitiba Aeroporto', 'Nacional Inn Curitiba Torres', 'Slim Curitiba Alto Da Xv', 'Nacional Inn Curitiba Estação', 'Mercure Curitiba Aeroporto', 'Ibis Styles Curitiba Centro Cívico', 'Roochelle Hotel', 'Go Inn Curitiba', 'Blue Tree Towers Saint Michel - Curitiba', 'Hotel Aladdin', 'Victoria Villa Curitiba', 'Ibis Styles Curitiba Santa Felicidade', 'Slim Curitiba Av. Das Torres', 'Mabu Curitiba Business', 'Saint Emilion Hotel', 'San Juan Royal Curitiba', 'Trevi Hotel & Business', 'Bristol Centro Cívico', 'Ibis Curitiba Shopping', 'Batel - Rio Hotel By Bourbon Curitiba', 'Hotel Intercity Curitiba – Centro Cívico', 'Hotel Intercity Curitiba Batel', 'Deville Curitiba Batel', 'Hotel Del Rey - Curitiba', 'Hotel Moov Curitiba', 'Hotel Del Rey Curitiba', 'Hotel Sol', 'San Juan Johnscher Curitiba', 'Astron Suítes São José Dos Pinhais', 'Slaviero Essential Curitiba Shopping', 'Slaviero Essential Curitiba Batel', 'Rockefeller By Slaviero Hotéis', 'Mercure Curitiba Golden', 'Mercure Curitiba Sete De Setembro', 'Ibis Styles Curitiba Aeroporto', 'Slaviero Essential Curitiba Centro', 'Hotel Confiance Barigui', 'Pestana Curitiba Hotel', 'Bristol Brasil 500', 'Transamerica Prime Batel Curitiba', 'Grand Mercure Curitiba Rayon', 'Bourbon Curitiba Convention Hotel', 'Hotel Flat Petras', 'Hotel Centro Europeu', 'Quality Hotel Curitiba', 'Mercure Curitiba Batel', 'Full Jazz By Slaviero Hotéis', 'Nh Curitiba The Five', 'Novotel Curitiba Batel', 'Blue Tree Towers Batel Curitiba', 'Radisson Hotel Curitiba', 'Nomaa Hotel']\n['111', '111', '136', '137', '137', '156', '161', '164', '171', '171', '173', '178', '184', '184', '187', '187', '188', '189', '195', '196', '198', '199', '200', '205', '209', '210', '213', '216', '216', '219', '225', '226', '227', '230', '230', '235', '236', '236', '236', '240', '244', '245', '246', '246', '247', '251', '252', '252', '259', '259', '266', '274', '282', '283', '288', '305', '320', '322', '324', '327', '332', '338', '342', '351', '373', '382', '407', '629']\n"
],
[
"print(nome_hoteis)",
"['Hotel Granville', 'Hotel Tibagi', 'Caravelle Palace Hotel', 'Hotel Pinheirinho', 'Golden Park Curitiba', 'Master Express Curitiba', 'Ibis Budget Curitiba Centro', 'Bristol Portal Do Iguaçu', 'Bristol Upper', 'Ibis Curitiba Batel', 'Ibis Styles Curitiba Batel', 'Bristol Metropolitan', 'San Juan Executive Curitiba', 'Dan Inn Curitiba', 'Pousada Betânia', 'Alta Reggia Plaza Hotel', 'Ibis Curitiba Aeroporto', 'Nacional Inn Curitiba Torres', 'Slim Curitiba Alto Da Xv', 'Nacional Inn Curitiba Estação', 'Mercure Curitiba Aeroporto', 'Ibis Styles Curitiba Centro Cívico', 'Roochelle Hotel', 'Go Inn Curitiba', 'Blue Tree Towers Saint Michel - Curitiba', 'Hotel Aladdin', 'Victoria Villa Curitiba', 'Ibis Styles Curitiba Santa Felicidade', 'Slim Curitiba Av. Das Torres', 'Mabu Curitiba Business', 'Saint Emilion Hotel', 'San Juan Royal Curitiba', 'Trevi Hotel & Business', 'Bristol Centro Cívico', 'Ibis Curitiba Shopping', 'Batel - Rio Hotel By Bourbon Curitiba', 'Hotel Intercity Curitiba – Centro Cívico', 'Hotel Intercity Curitiba Batel', 'Deville Curitiba Batel', 'Hotel Del Rey - Curitiba', 'Hotel Moov Curitiba', 'Hotel Del Rey Curitiba', 'Hotel Sol', 'San Juan Johnscher Curitiba', 'Astron Suítes São José Dos Pinhais', 'Slaviero Essential Curitiba Shopping', 'Slaviero Essential Curitiba Batel', 'Rockefeller By Slaviero Hotéis', 'Mercure Curitiba Golden', 'Mercure Curitiba Sete De Setembro', 'Ibis Styles Curitiba Aeroporto', 'Slaviero Essential Curitiba Centro', 'Hotel Confiance Barigui', 'Pestana Curitiba Hotel', 'Bristol Brasil 500', 'Transamerica Prime Batel Curitiba', 'Grand Mercure Curitiba Rayon', 'Bourbon Curitiba Convention Hotel', 'Hotel Flat Petras', 'Hotel Centro Europeu', 'Quality Hotel Curitiba', 'Mercure Curitiba Batel', 'Full Jazz By Slaviero Hotéis', 'Nh Curitiba The Five', 'Novotel Curitiba Batel', 'Blue Tree Towers Batel Curitiba', 'Radisson Hotel Curitiba', 'Nomaa Hotel']\n"
],
[
"print(preco_hoteis)",
"['111', '111', '136', '137', '137', '156', '161', '164', '171', '171', '173', '178', '184', '184', '187', '187', '188', '189', '195', '196', '198', '199', '200', '205', '209', '210', '213', '216', '216', '219', '225', '226', '227', '230', '230', '235', '236', '236', '236', '240', '244', '245', '246', '246', '247', '251', '252', '252', '259', '259', '266', '274', '282', '283', '288', '305', '320', '322', '324', '327', '332', '338', '342', '351', '373', '382', '407', '629']\n"
],
[
"data_hoteis = []\nfor el in zip(nome_hoteis, preco_hoteis):\n data_hoteis.append(el)\ndata_hoteis_dict = dict(data_hoteis)\nprint(data_hoteis_dict)",
"{'Hotel Granville': '111', 'Hotel Tibagi': '111', 'Caravelle Palace Hotel': '136', 'Hotel Pinheirinho': '137', 'Golden Park Curitiba': '137', 'Master Express Curitiba': '156', 'Ibis Budget Curitiba Centro': '161', 'Bristol Portal Do Iguaçu': '164', 'Bristol Upper': '171', 'Ibis Curitiba Batel': '171', 'Ibis Styles Curitiba Batel': '173', 'Bristol Metropolitan': '178', 'San Juan Executive Curitiba': '184', 'Dan Inn Curitiba': '184', 'Pousada Betânia': '187', 'Alta Reggia Plaza Hotel': '187', 'Ibis Curitiba Aeroporto': '188', 'Nacional Inn Curitiba Torres': '189', 'Slim Curitiba Alto Da Xv': '195', 'Nacional Inn Curitiba Estação': '196', 'Mercure Curitiba Aeroporto': '198', 'Ibis Styles Curitiba Centro Cívico': '199', 'Roochelle Hotel': '200', 'Go Inn Curitiba': '205', 'Blue Tree Towers Saint Michel - Curitiba': '209', 'Hotel Aladdin': '210', 'Victoria Villa Curitiba': '213', 'Ibis Styles Curitiba Santa Felicidade': '216', 'Slim Curitiba Av. Das Torres': '216', 'Mabu Curitiba Business': '219', 'Saint Emilion Hotel': '225', 'San Juan Royal Curitiba': '226', 'Trevi Hotel & Business': '227', 'Bristol Centro Cívico': '230', 'Ibis Curitiba Shopping': '230', 'Batel - Rio Hotel By Bourbon Curitiba': '235', 'Hotel Intercity Curitiba – Centro Cívico': '236', 'Hotel Intercity Curitiba Batel': '236', 'Deville Curitiba Batel': '236', 'Hotel Del Rey - Curitiba': '240', 'Hotel Moov Curitiba': '244', 'Hotel Del Rey Curitiba': '245', 'Hotel Sol': '246', 'San Juan Johnscher Curitiba': '246', 'Astron Suítes São José Dos Pinhais': '247', 'Slaviero Essential Curitiba Shopping': '251', 'Slaviero Essential Curitiba Batel': '252', 'Rockefeller By Slaviero Hotéis': '252', 'Mercure Curitiba Golden': '259', 'Mercure Curitiba Sete De Setembro': '259', 'Ibis Styles Curitiba Aeroporto': '266', 'Slaviero Essential Curitiba Centro': '274', 'Hotel Confiance Barigui': '282', 'Pestana Curitiba Hotel': '283', 'Bristol Brasil 500': '288', 'Transamerica Prime Batel Curitiba': '305', 'Grand Mercure Curitiba Rayon': '320', 'Bourbon Curitiba Convention Hotel': '322', 'Hotel Flat Petras': '324', 'Hotel Centro Europeu': '327', 'Quality Hotel Curitiba': '332', 'Mercure Curitiba Batel': '338', 'Full Jazz By Slaviero Hotéis': '342', 'Nh Curitiba The Five': '351', 'Novotel Curitiba Batel': '373', 'Blue Tree Towers Batel Curitiba': '382', 'Radisson Hotel Curitiba': '407', 'Nomaa Hotel': '629'}\n"
],
[
"df_data_hoteis = pd.DataFrame(list(data_hoteis_dict.items()),\n columns=['Nome', 'Preço'])\ndisplay(df_data_hoteis)",
"_____no_output_____"
],
[
"df_data_hoteis.to_csv('report.csv', index=False)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0ef4f76097038f47cfb8d259cfd5adc71b4ca
| 28,435 |
ipynb
|
Jupyter Notebook
|
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/07_DATABASES/NoSQL/L05.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | null | null | null |
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/07_DATABASES/NoSQL/L05.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | null | null | null |
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/07_DATABASES/NoSQL/L05.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | 2 |
2022-02-09T15:41:33.000Z
|
2022-02-11T07:47:40.000Z
| 41.329942 | 689 | 0.536487 |
[
[
[
"# DS108 Databases : Lesson Ten Companion Notebook",
"_____no_output_____"
],
[
"### Table of Contents <a class=\"anchor\" id=\"DS108L10_toc\"></a>\n\n* [Table of Contents](#DS108L10_toc)\n * [Page 1 - Overview](#DS108L10_page_1)\n * [Page 2 - Sharding](#DS108L10_page_2)\n * [Page 3 - More Methods](#DS108L10_page_3)\n * [Page 4 - Key Terms](#DS108L10_page_4)\n * [Page 5 - Lesson 5 Hands On](#DS108L10_page_5)\n\n ",
"_____no_output_____"
],
[
"<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n\n# Page 1 - Overview of this Module<a class=\"anchor\" id=\"DS108L10_page_1\"></a>\n\n[Back to Top](#DS108L10_toc)\n\n<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">",
"_____no_output_____"
]
],
[
[
"from IPython.display import VimeoVideo\n# Tutorial Video Name: Sharding, More Methods and Project\nVimeoVideo('245797657', width=720, height=480)",
"_____no_output_____"
]
],
[
[
"# Overview\n\nDuring this last lesson, you will be learning about a few more in-depth NoSQL terms and methods. You will also be working on an in-depth Lesson 5 HandsOn for NoSQL. It is time to dive right into Sharding.",
"_____no_output_____"
],
[
"<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n\n# Page 2 - Sharding<a class=\"anchor\" id=\"DS108L10_page_2\"></a>\n\n[Back to Top](#DS108L10_toc)\n\n<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n",
"_____no_output_____"
],
[
"# Sharding\n\n**Sharding** is a way to spread data across multiple machines and servers. MongoDB uses Sharding to support deployments and applications that contain huge data sets. This is because when database systems have large data sets, a single server may have trouble keeping up with all the data. There are _two_ ways to deal with a situation like this: *Vertical* or *Horizontal* Scaling.\n\n---\n\n## Vertical Scaling\n\n**Vertical Scaling** involves ways to increase the capacity of a server, such as using a much more powerful CPU, adding more RAM, or increasing the amount of storage space. There are limitations when using _Vertical Scaling_ because there may be restrictions on how much storage one machine can handle. Also, cloud-based providers have a maximum for how much storage they have.\n\n---\n\n## Horizontal Scaling\n\n**Horizontal Scaling** is the process of spreading out the dataset between multiple servers and increasing the storage to those servers as needed. Even if a single machine out of the many handling the data may not be super high-speed, overall, it may increase the efficiency of the application having many machines. If the dataset expands, all that is needed is to add servers to handle that data as needed. MongoDB supports _Horizontal Scaling_ through _Sharding_.\n\n---\n\n## Enable Sharding\n\n**Sharding** is something that is done at a very high level in your database, usually on the admin side of the database. The following command is used when you would like to create Sharding in your database:\n\n```js\ndb.runCommand({\n shardCollection: \"<database>.<collection>\",\n key: <shardkey>,\n unique: <boolean>,\n numInitialChunks: <integer>,\n collation: { locale: \"simple\" }\n})\n```\n\nAs you can see, there are several options available to you when running this command; however, only the last is optional. Now it's time to explore these parts:\n\n* **shardCollection:** How do you name which collection in which database you would like to shard. It will always be a string.\n\n* **key:** The index specification document to use as the shard key. The shard key determines how MongoDB distributes the documents among the shards.\n\n* **unique:** When true, the unique option ensures that the underlying index enforces a unique constraint. Hashed shard keys do not support unique constraints. Defaults to false.\n\n* **numInitialChunks:** Specifies the number of chunks to initially create when sharding a collection that is empty with a hashed shard key. Then, MongoDB will create and balance chunks across the cluster. The `numInitialChunks` must be less than 8192 per shard.\n * MongoDB divides sharded data into chunks. Each chunk has an inclusive lower and exclusive upper range based on the shard key.\n\n* **collation:** _Optional._ If the collection specified to shardCollection has a default collation, you must include a collation document with `{ locale : \"simple\" }`, or the shardCollection command fails. At least one of the indexes whose fields support the shard key pattern must have a simple collation.\n * Collation allows users to specify language-specific string comparison rules, such as letter case and accent marks.\n\n<div class=\"panel panel-success\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Additional Info!</h3>\n </div>\n <div class=\"panel-body\">\n <p><b>Sharding</b> can get quite complicated quickly, but you now have a basic understanding of what sharding is and how you can accomplish it. The documentation on <b>Sharding</b> is extensive, so if you would like to read more about it, you can visit MongoDB's documentation website <a href=\"https://docs.mongodb.com/manual/sharding/\" target=\"_blank\">here</a>.</p>\n </div>\n</div>\n",
"_____no_output_____"
],
[
"<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n\n# Page 3 - More Methods<a class=\"anchor\" id=\"DS108L10_page_3\"></a>\n\n[Back to Top](#DS108L10_toc)\n\n<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n",
"_____no_output_____"
],
[
"# More Methods\n\nNow that you have made it this far in NoSQL, it is time to look into a few more available methods when working with a collection. Some of these methods can be in-depth, but it is good to know they are available to you.\n\n---\n\n## aggregate()\n\nThis method calculates the aggregate (total) values for data in a collection. Below is the syntax:\n\n```js\ndb.collectionName.aggregate(pipeline, options);\n```\n\nBelow is a description of the parameters of the above query:\n\n* **pipeline:** An array that is a sequence of data aggregation operations or stages.\n <div class=\"panel panel-success\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Additional Info!</h3>\n </div>\n <div class=\"panel-body\">\n <p>There are many pipeline stages, which you can read about <a href=\"https://docs.mongodb.com/v3.0/reference/operator/aggregation-pipeline/\" target=\"_blank\">here</a>.</p>\n </div>\n </div>\n\n* **options:** _Optional_, additional documents that are passed in when using aggregate.\n <div class=\"panel panel-success\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Additional Info!</h3>\n </div>\n <div class=\"panel-body\">\n <p>There are many options available to the aggregate method, which you can read about <a href=\"https://docs.mongodb.com/v3.0/reference/method/db.collection.aggregate/#db.collection.aggregate\">here</a>.</p>\n </div>\n </div>\n\n---\n\n## count()\n\nThis method will count and return the number of results based on a query. The syntax is below:\n\n```js\ndb.collectionName.count();\n```\n\nFor example, if you wanted to count the number of documents in your `inventory` collection, you would run the following:\n\n```js\ndb.inventory.count();\n```\n\nThe query above will return 10, or however many documents are currently in the `inventory` collection.\n\nYou could also run this query with a filter. Check to see how many of your app users in your `appusers` collection have an age greater than 20 by running the below query:\n\n```js\ndb.appusers.count( { age: { $gt : 20 } } )\n```\n\nAfter running the above query, it should return the number 4 or a number close, depending on your changes in that collection.\n\n---\n\n\n## totalSize()\n\nThis method will return the total size in bytes of the data in the collection plus the size of every index on the collection.\n\nIf you run the query below, a number around 16000 will be returned based on what your collection currently contains:\n\n```js\ndb.appusers.totalSize()\n```\n\n<div class=\"panel panel-success\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Additional Info!</h3>\n </div>\n <div class=\"panel-body\">\n <p>There are many more methods available to you. Each method has the possibility of being slightly complex. If you would like to read more about the methods available in NoSQL, visit MongoDB's documentation <a href=\"https://docs.mongodb.com/v3.0/reference/method/js-collection/\" target=\"_blank\">Collection Methods</a>.</p>\n </div>\n</div>\n",
"_____no_output_____"
],
[
"<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n\n# Page 4 - Key Terms<a class=\"anchor\" id=\"DS108L10_page_4\"></a>\n\n[Back to Top](#DS108L10_toc)\n\n<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n",
"_____no_output_____"
],
[
"# Key Terms\n\nBelow is a list of a short description of the important keywords you have learned in this lesson. Please read through and go back and review any concepts you don't fully understand. Great Work!\n\n<table class=\"table table-striped\">\n <tr>\n <th>Keyword</th>\n <th>Description</th>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>Sharding</td>\n <td>Sharding is a way to spread data across multiple machines and servers. MongoDB uses Sharding to support deployments and applications that contain huge data sets. The reason for this is because when database systems have large data sets, a single server may have trouble keeping up with all the data. There are two ways to deal with a situation like this: <em>Vertical</em> or <em>Horizontal</em> Scaling.</td>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>Vertical Scaling</td>\n <td>Involves ways to increase the capacity of a server, such as using a much more powerful CPU, adding more RAM, or increasing the amount of storage space.</td>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>Horizontal Scaling</td>\n <td>The process of spreading out the dataset between multiple servers and increasing the storage to those servers as needed.</td>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>aggregate()</td>\n <td>This method calculates the aggregate (total) values for data in a collection.</td>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>count()</td>\n <td>This method will count and return the number of results based on a query.</td>\n </tr>\n <tr>\n <td style=\"font-weight: bold;\" nowrap>totalSize()</td>\n <td>This method will return the total size in bytes of the data in the collection plus the size of every indexes on the collection.\n</td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n\n# Page 5 - Lesson 5 Hands On<a class=\"anchor\" id=\"DS108L10_page_5\"></a>\n\n[Back to Top](#DS108L10_toc)\n\n<hr style=\"height:10px;border-width:0;color:gray;background-color:gray\">\n",
"_____no_output_____"
],
[
"Welcome to the last project for the NoSQL course! Great job making it this far! This hands on will be different from the hands on projects you have previously seen in a couple of different ways. You will be putting together the numerous topics you have learned into one large project. It is designed to mimic real problems which you may face in your career, so it may be a challenge for you and will also take several hours. \n\n<div class=\"panel panel-success\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Additional Info!</h3>\n </div>\n <div class=\"panel-body\">\n <p>Before beginning this hands-on, you may want to watch this <a href=\"https://vimeo.com/428206689\"> recorded live workshop, \"Winnie the Pooh and Databases Too,\" </a> that goes over a similar example. </p>\n </div>\n</div>\n\nTake this project step-by-step and be aware that the project description below is written to be a bit less specific than previous Hands-Ons. The hands on is supposed to challenge you to do some problem solving to figure out how to accomplish a task. You can always review past lessons or use a Google search if needed. Good luck!\n\n<div class=\"panel panel-danger\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Caution!</h3>\n </div>\n <div class=\"panel-body\">\n <p>Do not submit your project until you have completed all requirements! You will not be able to resubmit.</p>\n </div>\n</div>\n\n---\n\n## Requirements\n\nFor this hands on, you will be working through several real-life scenarios within new collections. This Hands-On is structured into _two_ parts, and each part will ask you to run multiple queries. After each query, please take a screenshot and add it to a text document (or an equivalent) and name this file `Lesson5handson`. This way, you will be able to submit your answers to each part all at once.\n\n---\n\n## Part 1\n\nYou have just been hired at a startup company. They currently only have ten employees, but they need to be included in the database. So far, they have only been tracked within an excel sheet. Your boss would like you to create a new collection in Atlas named `employees`. Take a look at the following data and the notes listed below before inserting any data:\n\n<table class=\"table table-striped\">\n <tr>\n <th>Name</th>\n <th>Birthday</th>\n <th>Address</th>\n <th>City</th>\n <th>State</th>\n <th>Position Name</th>\n <th>Remote</th>\n <th>Full Time</th>\n <tr>\n <tr>\n <td>Alison Davidson</td>\n <td>04/05/75</td>\n <td>874 W. Oak Place</td>\n <td>Gary</td>\n <td>Indiana</td>\n <td>Customer Support</td>\n <td>Yes</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Henry Chapelton</td>\n <td>09/29/80</td>\n <td>9324 E. Vista Way</td>\n <td>Tempe</td>\n <td>Arizona</td>\n <td>Customer Support</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Alex Miller</td>\n <td>11/22/83</td>\n <td>244 Price Road</td>\n <td>Mesa</td>\n <td>Arizona</td>\n <td>Customer Support</td>\n <td>No</td>\n <td>No</td>\n <tr>\n <tr>\n <td>Carly Nielson</td>\n <td>08/04/87</td>\n <td>678 W. Westward Road</td>\n <td>Phoenix</td>\n <td>Arizona</td>\n <td>Office Manager</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Tom Talbot</td>\n <td>12/30/89</td>\n <td>12 Oakland Way</td>\n <td>Chandler</td>\n <td>Arizona</td>\n <td>Inventory Manager</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Mary Crawley</td>\n <td>07/06/80</td>\n <td>1010 Granite Way</td>\n <td>Charlotte</td>\n <td>North Carolina</td>\n <td>Human Resources</td>\n <td>Yes</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Daisy Baxter</td>\n <td>09/09/87</td>\n <td>990 E. 84th St.</td>\n <td>Tempe</td>\n <td>Arizona</td>\n <td>CEO</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>William Coyle</td>\n <td>10/11/91</td>\n <td>944 W. 16th St.</td>\n <td>Phoenix</td>\n <td>Arizona</td>\n <td>Intern</td>\n <td>No</td>\n <td>No</td>\n <tr>\n <tr>\n <td>Edith Bates</td>\n <td>07/28/90</td>\n <td>7 E. 20th Pl.</td>\n <td>Chandler</td>\n <td>Arizona</td>\n <td>Customer Support</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n <tr>\n <td>Gwen Harding</td>\n <td>10/11/86</td>\n <td>234 W. 48th. St.</td>\n <td>Phoenix</td>\n <td>Arizona</td>\n <td>Office Assistent</td>\n <td>No</td>\n <td>Yes</td>\n <tr>\n</table>\n\n**Notes:**\n\n* The `Birthday` field should have a data type of Date.\n* The `Position Name`, `Remote`, and `Full Time` fields should be within an embedded document called `position`.\n* `Remote` and `Full Time` fields should have boolean values.\n\nIt's been about a month since you have inserted all employees into the database. There have been a couple of changes to the company. The CEO decided that he no longer wants remote employees, so they have transferred the remote employees and they are now living in Arizona. Alison Davidson now lives at 777 E. 1st St. # 120 Tempe, AZ and Mary Crawley now lives at 8322 W. Vista Pl. Scottsdale, AZ. Since all employees now all live in Arizona, there is no need to have a field named \"state\" within this collection, so please remove it. Lastly, they would like very efficient searching using the \"position\" field (remember that field includes a document with three other fields).\n\n---\n\n## Part 2\n\nYou are currently working for a company who wants to build an app similar to Spotify. Below is a list of data for different songs. Please insert this data into a new collection named `songs`.\n\n<table class=\"table table-striped\">\n <tr>\n <th>SongId</th>\n <th align=\"left\">Title</th>\n <th align=\"left\">Artist</th>\n <th align=\"left\">Album</th>\n <th>ReleaseYear</th>\n <tr>\n <tr>\n <td>1</td>\n <td>Girls Just Want To Have Fun</td>\n <td>Cyndi Lauper</td>\n <td>She's So Unusual</td>\n <td>1983</td>\n </tr>\n <tr>\n <td>2</td>\n <td>Hips Don't Lie</td>\n <td>Shakira feat. Wyclef Jean</td>\n <td>Oral Fixation Vol. 2</td>\n <td>2006</td>\n </tr>\n <tr>\n <td>3</td>\n <td>Poker Face</td>\n <td>Lady Gaga</td>\n <td>The Fame</td>\n <td>2008</td>\n </tr>\n <tr>\n <td>4</td>\n <td>Wannabe</td>\n <td>Spice Girls</td>\n <td>Spice</td>\n <td>1996</td>\n </tr>\n <tr>\n <td>5</td>\n <td>California Gurls</td>\n <td>Katy Perry feat. Snoop Dogg</td>\n <td>Teenage Dream</td>\n <td>2010</td>\n </tr>\n <tr>\n <td>6</td>\n <td>Bye, Bye, Bye</td>\n <td>NSYNC</td>\n <td>No Strings Attached</td>\n <td>2000</td>\n </tr>\n <tr>\n <td>7</td>\n <td>I Will Always Love You</td>\n <td>Whitney Houston</td>\n <td>I Will Always Love You: The Best of Whitney Houston</td>\n <td>2012</td>\n </tr>\n <tr>\n <td>8</td>\n <td>Baby One More Time</td>\n <td>Britney Spears</td>\n <td>Baby One More Time</td>\n <td>1999</td>\n </tr>\n <tr>\n <td>9</td>\n <td>Vogue</td>\n <td>Madonna</td>\n <td>I'm Breathless</td>\n <td>1990</td>\n </tr>\n <tr>\n <td>10</td>\n <td>Rolling in the Deep</td>\n <td>Adele</td>\n <td>21</td>\n <td>2011</td>\n </tr>\n <tr>\n <td>11</td>\n <td>1234</td>\n <td>Feist</td>\n <td>The Reminder</td>\n <td>2007</td>\n </tr>\n <tr>\n <td>12</td>\n <td>Elastic Heart</td>\n <td>Sia</td>\n <td>The Hunger Games: Catching Fire Soundtrack</td>\n <td>2015</td>\n </tr>\n <tr>\n <td>13</td>\n <td>Oops! I Did It Again</td>\n <td>Britney Spears</td>\n <td>Oops! I Did It Again</td>\n <td>2000</td>\n </tr>\n <tr>\n <td>14</td>\n <td>Bad Romance</td>\n <td>Lady Gaga</td>\n <td>The Fame Monster</td>\n <td>2009</td>\n </tr>\n <tr>\n <td>15</td>\n <td>Lose Control</td>\n <td>Missy Elliot</td>\n <td>The Cookbook</td>\n <td>2005</td>\n </tr>\n <tr>\n <td>16</td>\n <td>U Can't Touch This</td>\n <td>MC Hammer</td>\n <td>Please Hammer, Don't Hurt 'Em</td>\n <td>1990</td>\n </tr>\n <tr>\n <td>17</td>\n <td>Thriller</td>\n <td>Michael Jackson</td>\n <td>Thriller</td>\n <td>1982</td>\n </tr>\n <tr>\n <td>18</td>\n <td>Single Ladies</td>\n <td>Beyonce</td>\n <td>I am... Sasha Fierce</td>\n <td>2008</td>\n </tr>\n <tr>\n <td>19</td>\n <td>Rhythm Nation</td>\n <td>Janet Jackson</td>\n <td>Janet Jackson's Rhythm Nation 1814</td>\n <td>1989</td>\n </tr>\n</table>\n\n**Notes:**\n\n* The `artist`, `album`, and `releaseYear` fields should be an embedded document named `details`.\n* Be sure that the `songId` and `releaseYear` fields have a type of number.\n\nNext, your company has run into some things they would like to be changed within the database:\n\n* The `title` field needs to be renamed to `songTitle`, so it is clearer to the developers working with the data.\n* They would like to have the `artist` field to be outside the `details` document but the `album` and `releaseYear` should stay within that document.\n\n<div class=\"panel panel-danger\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Caution!</h3>\n </div>\n <div class=\"panel-body\">\n <p>Be sure to zip and submit your <code>Lesson5handson</code> text document when finished! You will not be able to re-submit, so be sure the screenshots to each part are located within this document.</p>\n </div>\n</div>\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4ac0fa57b6f1ff46dc46b998128eab9052709146
| 40,947 |
ipynb
|
Jupyter Notebook
|
tests/tf/04_single_hidden_layer_network.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
tests/tf/04_single_hidden_layer_network.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
tests/tf/04_single_hidden_layer_network.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
| 153.359551 | 33,940 | 0.888197 |
[
[
[
"# Implementing a one-layer Neural Network\n\n\nWe will illustrate how to create a one hidden layer NN\n\nWe will use the iris data for this exercise\n\nWe will build a one-hidden layer neural network to predict the fourth attribute, Petal Width from the other three (Sepal length, Sepal width, Petal length).",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops",
"_____no_output_____"
],
[
"ops.reset_default_graph()",
"_____no_output_____"
],
[
"iris = datasets.load_iris()\nx_vals = np.array([x[0:3] for x in iris.data])\ny_vals = np.array([x[3] for x in iris.data])",
"_____no_output_____"
],
[
"# Create graph session \nsess = tf.Session()",
"_____no_output_____"
],
[
"# make results reproducible\nseed = 2\ntf.set_random_seed(seed)\nnp.random.seed(seed) ",
"_____no_output_____"
],
[
"# Split data into train/test = 80%/20%\ntrain_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)\ntest_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))\nx_vals_train = x_vals[train_indices]\nx_vals_test = x_vals[test_indices]\ny_vals_train = y_vals[train_indices]\ny_vals_test = y_vals[test_indices]",
"_____no_output_____"
],
[
"# Normalize by column (min-max norm)\ndef normalize_cols(m):\n col_max = m.max(axis=0)\n col_min = m.min(axis=0)\n return (m-col_min) / (col_max - col_min)\n \nx_vals_train = np.nan_to_num(normalize_cols(x_vals_train))\nx_vals_test = np.nan_to_num(normalize_cols(x_vals_test))",
"_____no_output_____"
],
[
"# Declare batch size\nbatch_size = 50\n\n# Initialize placeholders\nx_data = tf.placeholder(shape=[None, 3], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)",
"_____no_output_____"
],
[
"# Create variables for both NN layers\nhidden_layer_nodes = 10\nA1 = tf.Variable(tf.random_normal(shape=[3,hidden_layer_nodes])) # inputs -> hidden nodes\nb1 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes])) # one biases for each hidden node\nA2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes,1])) # hidden inputs -> 1 output\nb2 = tf.Variable(tf.random_normal(shape=[1])) # 1 bias for the output\n\n\n# Declare model operations\nhidden_output = tf.nn.relu(tf.add(tf.matmul(x_data, A1), b1))\nfinal_output = tf.nn.relu(tf.add(tf.matmul(hidden_output, A2), b2))\n\n# Declare loss function (MSE)\nloss = tf.reduce_mean(tf.square(y_target - final_output))\n\n# Declare optimizer\nmy_opt = tf.train.GradientDescentOptimizer(0.005)\ntrain_step = my_opt.minimize(loss)",
"_____no_output_____"
],
[
"# Initialize variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Training loop\nloss_vec = []\ntest_loss = []\nfor i in range(500):\n rand_index = np.random.choice(len(x_vals_train), size=batch_size)\n rand_x = x_vals_train[rand_index]\n rand_y = np.transpose([y_vals_train[rand_index]])\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})\n\n temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec.append(np.sqrt(temp_loss))\n \n test_temp_loss = sess.run(loss, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})\n test_loss.append(np.sqrt(test_temp_loss))\n if (i+1)%50==0:\n print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))",
"Generation: 50. Loss = 0.527901\nGeneration: 100. Loss = 0.228715\nGeneration: 150. Loss = 0.179773\nGeneration: 200. Loss = 0.107899\nGeneration: 250. Loss = 0.240029\nGeneration: 300. Loss = 0.15324\nGeneration: 350. Loss = 0.165901\nGeneration: 400. Loss = 0.0957248\nGeneration: 450. Loss = 0.121014\nGeneration: 500. Loss = 0.129494\n"
],
[
"%matplotlib inline\n# Plot loss (MSE) over time\nplt.plot(loss_vec, 'k-', label='Train Loss')\nplt.plot(test_loss, 'r--', label='Test Loss')\nplt.title('Loss (MSE) per Generation')\nplt.legend(loc='upper right')\nplt.xlabel('Generation')\nplt.ylabel('Loss')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac0ff33134ad51a28ac2b9442edcce292f8803f
| 22,520 |
ipynb
|
Jupyter Notebook
|
docs/examples/batch-to-online.ipynb
|
Leo-VK/creme
|
0b02df4f4c826368747ee91946efca1a7e8653a6
|
[
"BSD-3-Clause"
] | 1 |
2020-12-01T07:02:48.000Z
|
2020-12-01T07:02:48.000Z
|
docs/examples/batch-to-online.ipynb
|
Leo-VK/creme
|
0b02df4f4c826368747ee91946efca1a7e8653a6
|
[
"BSD-3-Clause"
] | null | null | null |
docs/examples/batch-to-online.ipynb
|
Leo-VK/creme
|
0b02df4f4c826368747ee91946efca1a7e8653a6
|
[
"BSD-3-Clause"
] | null | null | null | 48.32618 | 1,228 | 0.649778 |
[
[
[
"# From batch to online",
"_____no_output_____"
],
[
"## A quick overview of batch learning\n\nIf you've already delved into machine learning, then you shouldn't have any difficulty in getting to use incremental learning. If you are somewhat new to machine learning, then do not worry! The point of this notebook in particular is to introduce simple notions. We'll also start to show how `creme` fits in and explain how to use it.\n\nThe whole point of machine learning is to *learn from data*. In *supervised learning* you want to learn how to predict a target $y$ given a set of features $X$. Meanwhile in an unsupervised learning there is no target, and the goal is rather to identify patterns and trends in the features $X$. At this point most people tend to imagine $X$ as a somewhat big table where each row is an observation and each column is a feature, and they would be quite right. Learning from tabular data is part of what's called *batch learning*, which basically that all of the data is available to our learning algorithm at once. A lot of libraries have been created to handle the batch learning regime, with one of the most prominent being Python's [scikit-learn](https://scikit-learn.org/stable/). \n\nAs a simple example of batch learning let's say we want to learn to predict if a women has breast cancer or not. We'll use the [breast cancer dataset available with scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer().html). We'll learn to map a set of features to a binary decision using a [logistic regression](https://www.wikiwand.com/en/Logistic_regression). Like many other models based on numerical weights, logisitc regression is sensitive to the scale of the features. Rescaling the data so that each feature has mean 0 and variance 1 is generally considered good practice. We can apply the rescaling and fit the logistic regression sequentially in an elegant manner using a [Pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html). To measure the performance of the model we'll evaluate the average [ROC AUC score](https://www.wikiwand.com/en/Receiver_operating_characteristic) using a 5 fold [cross-validation](https://www.wikiwand.com/en/Cross-validation_(statistics)). ",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\n\n\n# Load the data\ndataset = datasets.load_breast_cancer()\nX, y = dataset.data, dataset.target\n\n# Define the steps of the model\nmodel = pipeline.Pipeline([\n ('scale', preprocessing.StandardScaler()),\n ('lin_reg', linear_model.LogisticRegression(solver='lbfgs'))\n])\n\n# Define a determistic cross-validation procedure\ncv = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)\n\n# Compute the MSE values\nscorer = metrics.make_scorer(metrics.roc_auc_score)\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (± {scores.std():.3f})')",
"ROC AUC: 0.975 (± 0.011)\n"
]
],
[
[
"This might be a lot to take in if you're not accustomed to scikit-learn, but it probably isn't if you are. Batch learning basically boils down to:\n\n1. Loading the data\n2. Fitting a model to the data\n3. Computing the performance of the model on unseen data\n\nThis is pretty standard and is maybe how most people imagine a machine learning pipeline. However this way of proceding has certain downsides. First of all your laptop would crash if the `load_boston` function returned a dataset who's size exceeds your available amount of RAM. Sometimes you can use some tricks to get around this. For example by optimizing the data types and by using sparse representations when applicable you can potentially save precious gigabytes of RAM. However like many tricks this only goes so far. If your dataset weighs hundreds of gigabytes then you won't go far without some special hardware. One solution is to do out-of-core learning; that is, algorithms that can learning by being presented the data in chunks. If you want to go down this road then take a look at [Dask](https://examples.dask.org/machine-learning.html) and [Spark's MLlib](https://spark.apache.org/mllib/).\n\nAnother issue with the batch learning regime is that can't elegantly learn from new data. Indeed if new data is made available, then the model has to learn from scratch with a new dataset composed of the old data and the new data. This is particularly annoying in a real situation where you might have new incoming data every week, day, hour, minute, or even setting. For example if you're building a recommendation engine for an e-commerce app, then you're probably training your model from 0 every week or so. As your app grows in popularity, so does the dataset you're training on. This will lead to longer and longer training times and might require a hardware upgrade.\n\nA final downside that isn't very easy to grasp concerns the manner in which features are extracted. Everytime you want to train your model you first have to extract features. The trick is that some features might not be accessible at the particular point in time you are at. For example maybe that some attributes in your data warehouse get overwritten with time. In other words maybe that all the features pertaining to a particular observations are not available, whereas they were a week ago. This happens more often than not in real scenarios, and apart if you have a sophisticated data engineering pipeline then you will encounter these issues at some point. ",
"_____no_output_____"
],
[
"## A hands-on introduction to incremental learning\n\nIncremental learning is also often called *online learning*, but if you [google online learning](https://www.google.com/search?q=online+learning) a lot of the results will point to educational websites. Hence we prefer the name \"incremental learning\", from which `creme` derives it's name. The point of incremental learning is to fit a model to a stream of data. In other words, the data isn't available in it's entirety, but rather the observations are provided one by one. As an example let's stream through the dataset used previously.",
"_____no_output_____"
]
],
[
[
"for xi, yi in zip(X, y):\n # This where the model learns\n pass",
"_____no_output_____"
]
],
[
[
"In this case we're iterating over a dataset that is already in memory, but we could just as well stream from a CSV file, a Kafka stream, an SQL query, etc. If we look at `x` we can notice that it is a `numpy.ndarray`.",
"_____no_output_____"
]
],
[
[
"xi",
"_____no_output_____"
]
],
[
[
"`creme` on the other hand works with `dict`s. We believe that `dict`s are more enjoyable to program with than `numpy.ndarray`s, at least for when single observations are concerned. `dict`'s bring the added benefit that each feature can be accessed by name rather than by position.",
"_____no_output_____"
]
],
[
[
"for xi, yi in zip(X, y):\n xi = dict(zip(dataset.feature_names, xi))\n pass\n\nxi",
"_____no_output_____"
]
],
[
[
"`creme`'s `stream` module has an `iter_sklearn_dataset` convenience function that we can use instead.",
"_____no_output_____"
]
],
[
[
"from creme import stream\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n pass",
"_____no_output_____"
]
],
[
[
"The simple fact that we are getting the data in a stream means that we can't do a lot of things the same way as in a batch setting. For example let's say we want to scale the data so that it has mean 0 and variance 1, as we did earlier. To do so we simply have to subtract the mean of each feature to each value and then divide the result by the standard deviation of the feature. The problem is that we can't possible known the values of the mean and the standard deviation before actually going through all the data! One way to procede would be to do a first pass over the data to compute the necessary values and then scale the values during a second pass. The problem is that defeats our purpose, which is to learn by only looking at the data once. Although this might seem rather restrictive, it reaps sizable benefits down the road.\n\nThe way we do feature scaling in `creme` involves computing *running statistics*. The idea is that we use a data structure that estimates the mean and updates itself when it is provided with a value. The same goes for the variance (and thus the standard deviation). For example, if we denote $\\mu_t$ the mean and $n_t$ the count at any moment $t$, then updating the mean can be done as so:\n\n$$\n\\begin{cases}\nn_{t+1} = n_t + 1 \\\\\n\\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}}\n\\end{cases}\n$$\n\nLikewhise a running variance can be computed as so:\n\n$$\n\\begin{cases}\nn_{t+1} = n_t + 1 \\\\\n\\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\\\\ns_{t+1} = s_t + (x - \\mu_t) \\times (x - \\mu_{t+1}) \\\\\n\\sigma_{t+1} = \\frac{s_{t+1}}{n_{t+1}}\n\\end{cases}\n$$\n\nwhere $s_t$ is a running sum of squares and $\\sigma_t$ is the running variance at time $t$. This might seem a tad more involved than the batch algorithms you learn in school, but it is rather elegant. Implementing this in Python is not too difficult. For example let's compute the running mean and variance of the `'mean area'` variable.",
"_____no_output_____"
]
],
[
[
"n, mean, sum_of_squares, variance = 0, 0, 0, 0\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n n += 1\n old_mean = mean\n mean += (xi['mean area'] - mean) / n\n sum_of_squares += (xi['mean area'] - old_mean) * (xi['mean area'] - mean)\n variance = sum_of_squares / n\n \nprint(f'Running mean: {mean:.3f}')\nprint(f'Running variance: {variance:.3f}')",
"Running mean: 654.889\nRunning variance: 123625.903\n"
]
],
[
[
"Let's compare this with `numpy`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ni = list(dataset.feature_names).index('mean area')\nprint(f'True mean: {np.mean(X[:, i]):.3f}')\nprint(f'True variance: {np.var(X[:, i]):.3f}')",
"True mean: 654.889\nTrue variance: 123625.903\n"
]
],
[
[
"The results seem to be exactly the same! The twist is that the running statistics won't be very accurate for the first few observations. In general though this doesn't matter too much. Some would even go as far as to say that this descrepancy is beneficial and acts as some sort of regularization...\n\nNow the idea is that we can compute the running statistics of each feature and scale them as they come along. The way to do this with `creme` is to use the `StandardScaler` class from the `preprocessing` module, as so:",
"_____no_output_____"
]
],
[
[
"from creme import preprocessing\n\nscaler = preprocessing.StandardScaler()\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n xi = scaler.fit_one(xi)",
"_____no_output_____"
]
],
[
[
"This is quite terse but let's break it down nonetheless. Every class in `creme` has a `fit_one(x, y)` method where all the magic happens. Now the important thing to notice is that the `fit_one` actually returns the output for the given input. This is one of the nice properties of online learning: inference can be done immediatly. In `creme` each call to a `Transformer`'s `fit_one` will return the transformed output. Meanwhile calling `fit_one` with a `Classifier` or a `Regressor` will return the predicted target for the given set of features. The twist is that the prediction is made *before* looking at the true target `y`. This means that we get a free hold-out prediction every time we call `fit_one`. This can be used to monitor the performance of the model as it trains, which is obviously nice to have.\n\nNow that we are scaling the data, we can start doing some actual machine learning. We're going to implement an online linear regression. Because all the data isn't available at once, we are obliged to do what is called *stochastic gradient descent*, which is a popular research topic and has a lot of variants. SGD is commonly used to train neural networks. The idea is that at each step we compute the loss between the target prediction and the truth. We then calculate the gradient, which is simply a set of derivatives with respect to each weight from the linear regression. Once we have obtained the gradient, we can update the weights by moving them in the opposite direction of the gradient. The amount by which the weights are moved typically depends on a *learning rate*, which is typically set by the user. Different optimizers have different ways of managing the weight update, and some handle the learning rate implicitely. Online linear regression can be done in `creme` with the `LinearRegression` class from the `linear_model` module. We'll be using plain and simple SGD using the `SGD` optimizer from the `optim` module. During training we'll measure the squared error between the truth and the predictions.",
"_____no_output_____"
]
],
[
[
"from creme import linear_model\nfrom creme import optim\n\nscaler = preprocessing.StandardScaler()\noptimizer = optim.SGD(lr=0.01)\nlog_reg = linear_model.LogisticRegression(optimizer)\n\ny_true = []\ny_pred = []\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer(), shuffle=True, seed=42):\n \n # Scale the features\n xi_scaled = scaler.fit_one(xi).transform_one(xi)\n \n # Fit the linear regression\n yi_pred = log_reg.predict_proba_one(xi_scaled)\n log_reg.fit_one(xi_scaled, yi)\n \n # Store the truth and the prediction\n y_true.append(yi)\n y_pred.append(yi_pred[True])\n \nprint(f'ROC AUC: {metrics.roc_auc_score(y_true, y_pred):.3f}')",
"ROC AUC: 0.990\n"
]
],
[
[
"The ROC AUC is significantly better than the one obtained from the cross-validation of scikit-learn's logisitic regression. However to make things really comparable it would be nice to compare with the same cross-validation procedure. `creme` has a `compat` module that contains utilities for making `creme` compatible with other Python libraries. Because we're doing regression we'll be using the `SKLRegressorWrapper`. We'll also be using `Pipeline` to encapsulate the logic of the `StandardScaler` and the `LogisticRegression` in one single object.",
"_____no_output_____"
]
],
[
[
"from creme import compat\nfrom creme import compose\n\n# We define a Pipeline, exactly like we did earlier for sklearn \nmodel = compose.Pipeline(\n ('scale', preprocessing.StandardScaler()),\n ('log_reg', linear_model.LogisticRegression())\n)\n\n# We make the Pipeline compatible with sklearn\nmodel = compat.convert_creme_to_sklearn(model)\n\n# We compute the CV scores using the same CV scheme and the same scoring\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (± {scores.std():.3f})')",
"ROC AUC: 0.964 (± 0.016)\n"
]
],
[
[
"This time the ROC AUC score is lower, which is what we would expect. Indeed online learning isn't as accurate as batch learning. However it all depends in what you're interested in. If you're only interested in predicting the next observation then the online learning regime would be better. That's why it's a bit hard to compare both approaches: they're both suited to different scenarios.",
"_____no_output_____"
],
[
"## Going further",
"_____no_output_____"
],
[
"There's a lot more to learn, and it all depends on what kind on your use case. Feel free to have a look at the [documentation](https://creme-ml.github.io/) to know what `creme` has available, and have a look the [example notebook](https://github.com/creme-ml/notebooks).\n\nHere a few resources if you want to do some reading:\n\n- [Online learning -- Wikipedia](https://www.wikiwand.com/en/Online_machine_learning)\n- [What is online machine learning? -- Max Pagels](https://medium.com/value-stream-design/online-machine-learning-515556ff72c5)\n- [Introduction to Online Learning -- USC course](http://www-bcf.usc.edu/~haipengl/courses/CSCI699/)\n- [Online Methods in Machine Learning -- MIT course](http://www.mit.edu/~rakhlin/6.883/)\n- [Online Learning: A Comprehensive Survey](https://arxiv.org/pdf/1802.02871.pdf)\n- [Streaming 101: The world beyond batch](https://www.oreilly.com/ideas/the-world-beyond-batch-streaming-101)\n- [Machine learning for data streams](https://www.cms.waikato.ac.nz/~abifet/book/contents.html)\n- [Data Stream Mining: A Practical Approach](https://www.cs.waikato.ac.nz/~abifet/MOA/StreamMining.pdf)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4ac10b174ea6ac30adda064941de0fc78ecf62fe
| 30,174 |
ipynb
|
Jupyter Notebook
|
prework/.ipynb_checkpoints/creating_and_manipulating_tensors-checkpoint.ipynb
|
Kabongosalomon/Crash-Course-Machine-Learning-
|
ccbe645f892f5e70d06bfb497f9fb8217b895834
|
[
"Apache-2.0"
] | 1 |
2019-04-03T21:20:48.000Z
|
2019-04-03T21:20:48.000Z
|
prework/creating_and_manipulating_tensors.ipynb
|
Kabongosalomon/Crash-Course-Machine-Learning-
|
ccbe645f892f5e70d06bfb497f9fb8217b895834
|
[
"Apache-2.0"
] | null | null | null |
prework/creating_and_manipulating_tensors.ipynb
|
Kabongosalomon/Crash-Course-Machine-Learning-
|
ccbe645f892f5e70d06bfb497f9fb8217b895834
|
[
"Apache-2.0"
] | null | null | null | 27.430909 | 446 | 0.530589 |
[
[
[
"#### Copyright 2017 Google LLC.",
"_____no_output_____"
]
],
[
[
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Creating and Manipulating Tensors",
"_____no_output_____"
],
[
"**Learning Objectives:**\n * Initialize and assign TensorFlow `Variable`s\n * Create and manipulate tensors\n * Refresh your memory about addition and multiplication in linear algebra (consult an introduction to matrix [addition](https://en.wikipedia.org/wiki/Matrix_addition) and [multiplication](https://en.wikipedia.org/wiki/Matrix_multiplication) if these topics are new to you)\n * Familiarize yourself with basic TensorFlow math and array operations",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\n\nimport tensorflow as tf\ntry:\n tf.contrib.eager.enable_eager_execution()\n print(\"TF imported with eager execution!\")\nexcept ValueError:\n print(\"TF already imported with eager execution!\")",
"TF imported with eager execution!\n"
]
],
[
[
"## Vector Addition\n\nYou can perform many typical mathematical operations on tensors ([TF API](https://www.tensorflow.org/api_guides/python/math_ops)). The code below creates the following vectors (1-D tensors), all having exactly six elements:\n\n* A `primes` vector containing prime numbers.\n* A `ones` vector containing all `1` values.\n* A vector created by performing element-wise addition over the first two vectors.\n* A vector created by doubling the elements in the `primes` vector.",
"_____no_output_____"
]
],
[
[
"primes = tf.constant([2, 3, 5, 7, 11, 13], dtype=tf.int32)\nprint(\"primes:\", primes)\nones = tf.ones([6], dtype=tf.int32)\nprint(\"ones:\", ones)\n\njust_beyond_primes = tf.add(primes, ones)\nprint(\"just_beyond_primes:\", just_beyond_primes)\n\ntwos = tf.constant([2, 2, 2, 2, 2, 2], dtype=tf.int32)\nprimes_doubled = primes * twos\nprint(\"primes_doubled:\", primes_doubled)",
"primes: tf.Tensor([ 2 3 5 7 11 13], shape=(6,), dtype=int32)\nones: tf.Tensor([1 1 1 1 1 1], shape=(6,), dtype=int32)\njust_beyond_primes: tf.Tensor([ 3 4 6 8 12 14], shape=(6,), dtype=int32)\nprimes_doubled: tf.Tensor([ 4 6 10 14 22 26], shape=(6,), dtype=int32)\n"
]
],
[
[
"Printing a tensor returns not only its **value**, but also its **shape** (discussed in the next section) and the **type of value stored** in the tensor. Calling the `numpy` method of a tensor returns the value of the tensor as a numpy array:",
"_____no_output_____"
]
],
[
[
"some_matrix = tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int32)\nprint(some_matrix)\nprint(\"\\nvalue of some_matrix is:\\n\", some_matrix.numpy())",
"tf.Tensor(\n[[1 2 3]\n [4 5 6]], shape=(2, 3), dtype=int32)\n\nvalue of some_matrix is:\n [[1 2 3]\n [4 5 6]]\n"
]
],
[
[
"### Tensor Shapes\n\nShapes are used to characterize the size and number of dimensions of a tensor. The shape of a tensor is expressed as `list`, with the `i`th element representing the size along dimension `i`. The length of the list then indicates the rank of the tensor (i.e., the number of dimensions).\n\nFor more information, see the [TensorFlow documentation](https://www.tensorflow.org/programmers_guide/tensors#shape).\n\nA few basic examples:",
"_____no_output_____"
]
],
[
[
"# A scalar (0-D tensor).\nscalar = tf.zeros([])\n\n# A vector with 3 elements.\nvector = tf.zeros([3])\n\n# A matrix with 2 rows and 3 columns.\nmatrix = tf.zeros([2, 3])\n\nprint('scalar has shape', scalar.get_shape(), 'and value:\\n', scalar.numpy())\nprint('vector has shape', vector.get_shape(), 'and value:\\n', vector.numpy())\nprint('matrix has shape', matrix.get_shape(), 'and value:\\n', matrix.numpy())",
"scalar has shape () and value:\n 0.0\nvector has shape (3,) and value:\n [0. 0. 0.]\nmatrix has shape (2, 3) and value:\n [[0. 0. 0.]\n [0. 0. 0.]]\n"
]
],
[
[
"### Broadcasting\n\nIn mathematics, you can only perform element-wise operations (e.g. *add* and *equals*) on tensors of the same shape. In TensorFlow, however, you may perform operations on tensors that would traditionally have been incompatible. TensorFlow supports **broadcasting** (a concept borrowed from numpy), where the smaller array in an element-wise operation is enlarged to have the same shape as the larger array. For example, via broadcasting:\n\n* If an operand requires a size `[6]` tensor, a size `[1]` or a size `[]` tensor can serve as an operand.\n* If an operation requires a size `[4, 6]` tensor, any of the following sizes can serve as an operand:\n * `[1, 6]`\n * `[6]`\n * `[]`\n* If an operation requires a size `[3, 5, 6]` tensor, any of the following sizes can serve as an operand:\n\n * `[1, 5, 6]`\n * `[3, 1, 6]`\n * `[3, 5, 1]`\n * `[1, 1, 1]`\n * `[5, 6]`\n * `[1, 6]`\n * `[6]`\n * `[1]`\n * `[]`\n\n**NOTE:** When a tensor is broadcast, its entries are conceptually **copied**. (They are not actually copied for performance reasons. Broadcasting was invented as a performance optimization.)\n\nThe full broadcasting ruleset is well described in the easy-to-read [numpy broadcasting documentation](http://docs.scipy.org/doc/numpy-1.10.1/user/basics.broadcasting.html).\n\nThe following code performs the same tensor arithmetic as before, but instead uses scalar values (instead of vectors containing all `1`s or all `2`s) and broadcasting.",
"_____no_output_____"
]
],
[
[
"primes = tf.constant([2, 3, 5, 7, 11, 13], dtype=tf.int32)\nprint(\"primes:\", primes)\n\none = tf.constant(1, dtype=tf.int32)\nprint(\"one:\", one)\n\njust_beyond_primes = tf.add(primes, one)\nprint(\"just_beyond_primes:\", just_beyond_primes)\n\ntwo = tf.constant(2, dtype=tf.int32)\nprimes_doubled = primes * two\nprint(\"primes_doubled:\", primes_doubled)",
"primes: tf.Tensor([ 2 3 5 7 11 13], shape=(6,), dtype=int32)\none: tf.Tensor(1, shape=(), dtype=int32)\njust_beyond_primes: tf.Tensor([ 3 4 6 8 12 14], shape=(6,), dtype=int32)\nprimes_doubled: tf.Tensor([ 4 6 10 14 22 26], shape=(6,), dtype=int32)\n"
]
],
[
[
"### Exercise #1: Arithmetic over vectors.\n\nPerform vector arithmetic to create a \"just_under_primes_squared\" vector, where the `i`th element is equal to the `i`th element in `primes` squared, minus 1. For example, the second element would be equal to `3 * 3 - 1 = 8`.\n\nMake use of either the `tf.multiply` or `tf.pow` ops to square the value of each element in the `primes` vector.",
"_____no_output_____"
]
],
[
[
"# Write your code for Task 1 here.\n\nprimes = tf.constant([2, 3, 5, 7, 11, 13], dtype=tf.int32)\nprint(\"primes:\", primes)\n\nm_one = tf.constant(-1, dtype=tf.int32)\ntwo = tf.constant(2, dtype=tf.int32)\n\nsquare = tf.multiply(primes,primes)\n# square = tf.pow(primes, two)\n\njust_under_primes_squared = tf.add(square, m_one)\nprint(\"just_under_primes_squared:\", just_under_primes_squared)\n\n\n# two = tf.constant(2, dtype=tf.int32)\n# primes_doubled = primes * two\n# print(\"primes_doubled:\", primes_doubled)",
"primes: tf.Tensor([ 2 3 5 7 11 13], shape=(6,), dtype=int32)\njust_under_primes_squared: tf.Tensor([ 3 8 24 48 120 168], shape=(6,), dtype=int32)\n"
]
],
[
[
"### Solution",
"_____no_output_____"
],
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n \n# Task: Square each element in the primes vector, then subtract 1.\n\ndef solution(primes):\n primes_squared = tf.multiply(primes, primes)\n neg_one = tf.constant(-1, dtype=tf.int32)\n just_under_primes_squared = tf.add(primes_squared, neg_one)\n return just_under_primes_squared\n\ndef alternative_solution(primes):\n primes_squared = tf.pow(primes, 2)\n one = tf.constant(1, dtype=tf.int32)\n just_under_primes_squared = tf.subtract(primes_squared, one)\n return just_under_primes_squared\n\nprimes = tf.constant([2, 3, 5, 7, 11, 13], dtype=tf.int32)\njust_under_primes_squared = solution(primes)\nprint(\"just_under_primes_squared:\", just_under_primes_squared)\n-->\n",
"_____no_output_____"
],
[
"## Matrix Multiplication\n\nIn linear algebra, when multiplying two matrices, the number of *columns* of the first matrix must\nequal the number of *rows* in the second matrix.\n\n- It is **_valid_** to multiply a `3x4` matrix by a `4x2` matrix. This will result in a `3x2` matrix.\n- It is **_invalid_** to multiply a `4x2` matrix by a `3x4` matrix.",
"_____no_output_____"
]
],
[
[
"# A 3x4 matrix (2-d tensor).\nx = tf.constant([[5, 2, 4, 3], [5, 1, 6, -2], [-1, 3, -1, -2]],\n dtype=tf.int32)\n\n# A 4x2 matrix (2-d tensor).\ny = tf.constant([[2, 2], [3, 5], [4, 5], [1, 6]], dtype=tf.int32)\n\n# Multiply `x` by `y`; result is 3x2 matrix.\nmatrix_multiply_result = tf.matmul(x, y)\n\nprint(matrix_multiply_result)",
"tf.Tensor(\n[[35 58]\n [35 33]\n [ 1 -4]], shape=(3, 2), dtype=int32)\n"
]
],
[
[
"## Tensor Reshaping\n\nWith tensor addition and matrix multiplication each imposing constraints\non operands, TensorFlow programmers must frequently reshape tensors. \n\nYou can use the `tf.reshape` method to reshape a tensor. \nFor example, you can reshape a 8x2 tensor into a 2x8 tensor or a 4x4 tensor:",
"_____no_output_____"
]
],
[
[
"# Create an 8x2 matrix (2-D tensor).\nmatrix = tf.constant(\n [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]],\n dtype=tf.int32)\n\nreshaped_2x8_matrix = tf.reshape(matrix, [2, 8])\nreshaped_4x4_matrix = tf.reshape(matrix, [4, 4])\n\nprint(\"Original matrix (8x2):\")\nprint(matrix.numpy())\nprint(\"Reshaped matrix (2x8):\")\nprint(reshaped_2x8_matrix.numpy())\nprint(\"Reshaped matrix (4x4):\")\nprint(reshaped_4x4_matrix.numpy())",
"Original matrix (8x2):\n[[ 1 2]\n [ 3 4]\n [ 5 6]\n [ 7 8]\n [ 9 10]\n [11 12]\n [13 14]\n [15 16]]\nReshaped matrix (2x8):\n[[ 1 2 3 4 5 6 7 8]\n [ 9 10 11 12 13 14 15 16]]\nReshaped matrix (4x4):\n[[ 1 2 3 4]\n [ 5 6 7 8]\n [ 9 10 11 12]\n [13 14 15 16]]\n"
]
],
[
[
"\nYou can also use `tf.reshape` to change the number of dimensions (the \"rank\") of the tensor.\nFor example, you could reshape that 8x2 tensor into a 3-D 2x2x4 tensor or a 1-D 16-element tensor.",
"_____no_output_____"
]
],
[
[
"# Create an 8x2 matrix (2-D tensor).\nmatrix = tf.constant(\n [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]],\n dtype=tf.int32)\n\nreshaped_2x2x4_tensor = tf.reshape(matrix, [2, 2, 4])\none_dimensional_vector = tf.reshape(matrix, [16])\n\nprint(\"Original matrix (8x2):\")\nprint(matrix.numpy())\nprint(\"Reshaped 3-D tensor (2x2x4):\")\nprint(reshaped_2x2x4_tensor.numpy())\nprint(\"1-D vector:\")\nprint(one_dimensional_vector.numpy())",
"Original matrix (8x2):\n[[ 1 2]\n [ 3 4]\n [ 5 6]\n [ 7 8]\n [ 9 10]\n [11 12]\n [13 14]\n [15 16]]\nReshaped 3-D tensor (2x2x4):\n[[[ 1 2 3 4]\n [ 5 6 7 8]]\n\n [[ 9 10 11 12]\n [13 14 15 16]]]\n1-D vector:\n[ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]\n"
]
],
[
[
"### Exercise #2: Reshape two tensors in order to multiply them.\n\nThe following two vectors are incompatible for matrix multiplication:\n\n * `a = tf.constant([5, 3, 2, 7, 1, 4])`\n * `b = tf.constant([4, 6, 3])`\n\nReshape these vectors into compatible operands for matrix multiplication.\nThen, invoke a matrix multiplication operation on the reshaped tensors.",
"_____no_output_____"
]
],
[
[
"# Write your code for Task 2 here.\n\na = tf.constant([5, 3, 2, 7, 1, 4])\nb = tf.constant([4, 6, 3])\n\nreshaped_a= tf.reshape(a, [2, 3])\nreshaped_b= tf.reshape(b, [3, 1])\n\n\nmatrix_multiply_ab = tf.matmul(reshaped_a, reshaped_b)\n\nprint(matrix_multiply_ab)",
"tf.Tensor(\n[[44]\n [46]], shape=(2, 1), dtype=int32)\n"
]
],
[
[
"Remember, when multiplying two matrices, the number of *columns* of the first matrix must equal the number of *rows* in the second matrix.\n\nOne possible solution is to reshape `a` into a 2x3 matrix and reshape `b` into a a 3x1 matrix, resulting in a 2x1 matrix after multiplication:",
"_____no_output_____"
],
[
"An alternative solution would be to reshape `a` into a 6x1 matrix and `b` into a 1x3 matrix, resulting in a 6x3 matrix after multiplication.",
"_____no_output_____"
]
],
[
[
"a = tf.constant([5, 3, 2, 7, 1, 4])\nb = tf.constant([4, 6, 3])\n\nreshaped_a = tf.reshape(a, [6, 1])\nreshaped_b = tf.reshape(b, [1, 3])\nc = tf.matmul(reshaped_a, reshaped_b)\n\nprint(\"reshaped_a (6x1):\")\nprint(reshaped_a.numpy())\nprint(\"reshaped_b (1x3):\")\nprint(reshaped_b.numpy())\nprint(\"reshaped_a x reshaped_b (6x3):\")\nprint(c.numpy())",
"reshaped_a (6x1):\n[[5]\n [3]\n [2]\n [7]\n [1]\n [4]]\nreshaped_b (1x3):\n[[4 6 3]]\nreshaped_a x reshaped_b (6x3):\n[[20 30 15]\n [12 18 9]\n [ 8 12 6]\n [28 42 21]\n [ 4 6 3]\n [16 24 12]]\n"
]
],
[
[
"### Solution",
"_____no_output_____"
],
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\n# Task: Reshape two tensors in order to multiply them\n\na = tf.constant([5, 3, 2, 7, 1, 4])\nb = tf.constant([4, 6, 3])\n\nreshaped_a = tf.reshape(a, [2, 3])\nreshaped_b = tf.reshape(b, [3, 1])\nc = tf.matmul(reshaped_a, reshaped_b)\n\nprint(\"reshaped_a (2x3):\")\nprint(reshaped_a.numpy())\nprint(\"reshaped_b (3x1):\")\nprint(reshaped_b.numpy())\nprint(\"reshaped_a x reshaped_b (2x1):\")\nprint(c.numpy())\n-->",
"_____no_output_____"
],
[
"## Variables, Initialization and Assignment\n\nSo far, all the operations we performed were on static values (`tf.constant`); calling `numpy()` always returned the same result. TensorFlow allows you to define `Variable` objects, whose values can be changed.\n\nWhen creating a variable, you can set an initial value explicitly, or you can use an initializer (like a distribution):",
"_____no_output_____"
]
],
[
[
"# Create a scalar variable with the initial value 3.\nv = tf.contrib.eager.Variable([3])\n\n# Create a vector variable of shape [1, 4], with random initial values,\n# sampled from a normal distribution with mean 1 and standard deviation 0.35.\nw = tf.contrib.eager.Variable(tf.random_normal([1, 4], mean=1.0, stddev=0.35))\n\nprint(\"v:\", v.numpy())\nprint(\"w:\", w.numpy())",
"v: [3]\nw: [[0.7422526 1.7744374 1.1272229 1.3453405]]\n"
]
],
[
[
"To change the value of a variable, use the `assign` op:",
"_____no_output_____"
]
],
[
[
"v = tf.contrib.eager.Variable([3])\nprint(v.numpy())\n\ntf.assign(v, [7])\nprint(v.numpy())\n\nv.assign([5])\nprint(v.numpy())",
"[3]\n[7]\n[5]\n"
]
],
[
[
"When assigning a new value to a variable, its shape must be equal to its previous shape:",
"_____no_output_____"
]
],
[
[
"v = tf.contrib.eager.Variable([[1, 2, 3], [4, 5, 6]])\nprint(v.numpy())\n\ntry:\n print(\"Assigning [7, 8, 9] to v\")\n v.assign([7, 8, 9])\nexcept ValueError as e:\n print(\"Exception:\", e)",
"[[1 2 3]\n [4 5 6]]\nAssigning [7, 8, 9] to v\nException: Shapes (2, 3) and (3,) are incompatible\n"
]
],
[
[
"There are many more topics about variables that we didn't cover here, such as loading and storing. To learn more, see the [TensorFlow docs](https://www.tensorflow.org/programmers_guide/variables).",
"_____no_output_____"
],
[
"### Exercise #3: Simulate 10 rolls of two dice.\n\nCreate a dice simulation, which generates a `10x3` 2-D tensor in which:\n\n * Columns `1` and `2` each hold one throw of one six-sided die (with values 1–6).\n * Column `3` holds the sum of Columns `1` and `2` on the same row.\n\nFor example, the first row might have the following values:\n\n * Column `1` holds `4`\n * Column `2` holds `3`\n * Column `3` holds `7`\n\nYou'll need to explore the [TensorFlow documentation](https://www.tensorflow.org/api_guides/python/array_ops) to solve this task.",
"_____no_output_____"
]
],
[
[
"# Write your code for Task 3 here.\n\n# Task: Simulate 10 throws of two dice. Store the results in a 10x3 matrix.\n\ndie1 = tf.contrib.eager.Variable(\n tf.random_uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))\ndie2 = tf.contrib.eager.Variable(\n tf.random_uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))\n\ndice_sum = tf.add(die1, die2)\nresulting_matrix = tf.concat(values=[die1, die2, dice_sum], axis=1)\n\nprint(resulting_matrix.numpy())",
"[[ 1 3 4]\n [ 3 1 4]\n [ 6 5 11]\n [ 4 3 7]\n [ 6 3 9]\n [ 5 1 6]\n [ 4 1 5]\n [ 3 4 7]\n [ 6 5 11]\n [ 1 1 2]]\n"
]
],
[
[
"We're going to place dice throws inside two separate 10x1 matrices, `die1` and `die2`. The summation of the dice rolls will be stored in `dice_sum`, then the resulting 10x3 matrix will be created by *concatenating* the three 10x1 matrices together into a single matrix.\n\nAlternatively, we could have placed dice throws inside a single 10x2 matrix, but adding different columns of the same matrix would be more complicated. We also could have placed dice throws inside two 1-D tensors (vectors), but doing so would require transposing the result.",
"_____no_output_____"
],
[
"### Solution",
"_____no_output_____"
],
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\n# Task: Simulate 10 throws of two dice. Store the results in a 10x3 matrix.\n\ndie1 = tf.contrib.eager.Variable(\n tf.random_uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))\ndie2 = tf.contrib.eager.Variable(\n tf.random_uniform([10, 1], minval=1, maxval=7, dtype=tf.int32))\n\ndice_sum = tf.add(die1, die2)\nresulting_matrix = tf.concat(values=[die1, die2, dice_sum], axis=1)\n\nprint(resulting_matrix.numpy())\n-->",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4ac138b36b850715cc72e3723f78a8fdf0a28eff
| 2,968 |
ipynb
|
Jupyter Notebook
|
use_pickle.ipynb
|
yoojunwoong/machinelearning2
|
ad9e488c0607c13f8baab0248c875a462d0f1dc5
|
[
"Apache-2.0"
] | 1 |
2021-07-01T06:01:24.000Z
|
2021-07-01T06:01:24.000Z
|
use_pickle.ipynb
|
yoojunwoong/machinelearning2
|
ad9e488c0607c13f8baab0248c875a462d0f1dc5
|
[
"Apache-2.0"
] | null | null | null |
use_pickle.ipynb
|
yoojunwoong/machinelearning2
|
ad9e488c0607c13f8baab0248c875a462d0f1dc5
|
[
"Apache-2.0"
] | null | null | null | 17.255814 | 76 | 0.471361 |
[
[
[
"import pickle",
"_____no_output_____"
],
[
"favorite_load = pickle.load(open('./saves/favorite_save.pkl','rb'))\nfavorite_load",
"_____no_output_____"
],
[
"type(favorite_load)",
"_____no_output_____"
],
[
"favorite_load['tiger']",
"_____no_output_____"
],
[
"autompg_lr = pickle.load(open('./saves/autompg_lr.pkl','rb'))\nautompg_lr",
"_____no_output_____"
],
[
"type(autompg_lr)",
"_____no_output_____"
],
[
"autompg_lr.predict([[3504.0,8]])",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac13ded07441724121c6a7046878750705e83a1
| 10,568 |
ipynb
|
Jupyter Notebook
|
examples/notebooks/theta-model.ipynb
|
chengevo/statsmodels
|
c28e6479ace0f0965001c55fb652b2a431bbd158
|
[
"BSD-3-Clause"
] | null | null | null |
examples/notebooks/theta-model.ipynb
|
chengevo/statsmodels
|
c28e6479ace0f0965001c55fb652b2a431bbd158
|
[
"BSD-3-Clause"
] | null | null | null |
examples/notebooks/theta-model.ipynb
|
chengevo/statsmodels
|
c28e6479ace0f0965001c55fb652b2a431bbd158
|
[
"BSD-3-Clause"
] | null | null | null | 31.082353 | 319 | 0.563872 |
[
[
[
"# The Theta Model\n\nThe Theta model of Assimakopoulos & Nikolopoulos (2000) is a simple method for forecasting the involves fitting two $\\theta$-lines, forecasting the lines using a Simple Exponential Smoother, and then combining the forecasts from the two lines to produce the final forecast. The model is implemented in steps:\n\n\n1. Test for seasonality\n2. Deseasonalize if seasonality detected\n3. Estimate $\\alpha$ by fitting a SES model to the data and $b_0$ by OLS.\n4. Forecast the series\n5. Reseasonalize if the data was deseasonalized.\n\nThe seasonality test examines the ACF at the seasonal lag $m$. If this lag is significantly different from zero then the data is deseasonalize using `statsmodels.tsa.seasonal_decompose` use either a multiplicative method (default) or additive. \n\nThe parameters of the model are $b_0$ and $\\alpha$ where $b_0$ is estimated from the OLS regression\n\n$$\nX_t = a_0 + b_0 (t-1) + \\epsilon_t\n$$\n\nand $\\alpha$ is the SES smoothing parameter in\n\n$$\n\\tilde{X}_t = (1-\\alpha) X_t + \\alpha \\tilde{X}_{t-1}\n$$\n\nThe forecasts are then \n\n$$\n \\hat{X}_{T+h|T} = \\frac{\\theta-1}{\\theta} \\hat{b}_0\n \\left[h - 1 + \\frac{1}{\\hat{\\alpha}}\n - \\frac{(1-\\hat{\\alpha})^T}{\\hat{\\alpha}} \\right]\n + \\tilde{X}_{T+h|T}\n$$\n\nUltimately $\\theta$ only plays a role in determining how much the trend is damped. If $\\theta$ is very large, then the forecast of the model is identical to that from an Integrated Moving Average with a drift,\n\n$$\nX_t = X_{t-1} + b_0 + (\\alpha-1)\\epsilon_{t-1} + \\epsilon_t.\n$$\n\nFinally, the forecasts are reseasonalized if needed.\n\nThis module is based on:\n\n* Assimakopoulos, V., & Nikolopoulos, K. (2000). The theta model: a decomposition\n approach to forecasting. International journal of forecasting, 16(4), 521-530.\n* Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method.\n International Journal of Forecasting, 19(2), 287-290.\n* Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos, F.\n (2015). The optimized theta method. arXiv preprint arXiv:1503.03529.",
"_____no_output_____"
],
[
"## Imports\n\nWe start with the standard set of imports and some tweaks to the default matplotlib style.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pandas_datareader as pdr\nimport seaborn as sns\n\nplt.rc(\"figure\", figsize=(16, 8))\nplt.rc(\"font\", size=15)\nplt.rc(\"lines\", linewidth=3)\nsns.set_style(\"darkgrid\")",
"_____no_output_____"
]
],
[
[
"## Load some Data\n\nWe will first look at housing starts using US data. This series is clearly seasonal but does not have a clear trend during the same. ",
"_____no_output_____"
]
],
[
[
"reader = pdr.fred.FredReader([\"HOUST\"], start=\"1980-01-01\", end=\"2020-04-01\")\ndata = reader.read()\nhousing = data.HOUST\nhousing.index.freq = housing.index.inferred_freq\nax = housing.plot()",
"_____no_output_____"
]
],
[
[
"We fit specify the model without any options and fit it. The summary shows that the data was deseasonalized using the multiplicative method. The drift is modest and negative, and the smoothing parameter is fairly low. ",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.forecasting.theta import ThetaModel\n\ntm = ThetaModel(housing)\nres = tm.fit()\nprint(res.summary())",
"_____no_output_____"
]
],
[
[
"The model is first and foremost a forecasting method. Forecasts are produced using the `forecast` method from fitted model. Below we produce a hedgehog plot by forecasting 2-years ahead every 2 years. \n\n**Note**: the default $\\theta$ is 2.",
"_____no_output_____"
]
],
[
[
"forecasts = {\"housing\": housing}\nfor year in range(1995, 2020, 2):\n sub = housing[: str(year)]\n res = ThetaModel(sub).fit()\n fcast = res.forecast(24)\n forecasts[str(year)] = fcast\nforecasts = pd.DataFrame(forecasts)\nax = forecasts[\"1995\":].plot(legend=False)\nchildren = ax.get_children()\nchildren[0].set_linewidth(4)\nchildren[0].set_alpha(0.3)\nchildren[0].set_color(\"#000000\")\nax.set_title(\"Housing Starts\")\nplt.tight_layout(pad=1.0)",
"_____no_output_____"
]
],
[
[
"We could alternatively fit the log of the data. Here it makes more sense to force the deseasonalizing to use the additive method, if needed. We also fit the model parameters using MLE. This method fits the IMA\n\n$$ X_t = X_{t-1} + \\gamma\\epsilon_{t-1} + \\epsilon_t $$\n\nwhere $\\hat{\\alpha}$ = $\\min(\\hat{\\gamma}+1, 0.9998)$ using `statsmodels.tsa.SARIMAX`. The parameters are similar although the drift is closer to zero.",
"_____no_output_____"
]
],
[
[
"tm = ThetaModel(np.log(housing), method=\"additive\")\nres = tm.fit(use_mle=True)\nprint(res.summary())",
"_____no_output_____"
]
],
[
[
"The forecast only depends on the forecast trend component,\n$$\n\\hat{b}_0\n \\left[h - 1 + \\frac{1}{\\hat{\\alpha}}\n - \\frac{(1-\\hat{\\alpha})^T}{\\hat{\\alpha}} \\right],\n$$\n\nthe forecast from the SES (which does not change with the horizon), and the seasonal. These three components are available using the `forecast_components`. This allows forecasts to be constructed using multiple choices of $\\theta$ using the weight expression above. ",
"_____no_output_____"
]
],
[
[
"res.forecast_components(12)",
"_____no_output_____"
]
],
[
[
"## Personal Consumption Expenditure\n\nWe next look at personal consumption expenditure. This series has a clear seasonal component and a drift. ",
"_____no_output_____"
]
],
[
[
"reader = pdr.fred.FredReader([\"NA000349Q\"], start=\"1980-01-01\", end=\"2020-04-01\")\npce = reader.read()\npce.columns = [\"PCE\"]\npce.index.freq = \"QS-OCT\"\n_ = pce.plot()",
"_____no_output_____"
]
],
[
[
"Since this series is always positive, we model the $\\ln$.",
"_____no_output_____"
]
],
[
[
"mod = ThetaModel(np.log(pce))\nres = mod.fit()\nprint(res.summary())",
"_____no_output_____"
]
],
[
[
"Next we explore differenced in the forecast as $\\theta$ changes. When $\\theta$ is close to 1, the drift is nearly absent. As $\\theta$ increases, the drift becomes more obvious.",
"_____no_output_____"
]
],
[
[
"forecasts = pd.DataFrame(\n {\n \"ln PCE\": np.log(pce.PCE),\n \"theta=1.2\": res.forecast(12, theta=1.2),\n \"theta=2\": res.forecast(12),\n \"theta=3\": res.forecast(12, theta=3),\n \"No damping\": res.forecast(12, theta=np.inf),\n }\n)\n_ = forecasts.tail(36).plot()\nplt.title(\"Forecasts of ln PCE\")\nplt.tight_layout(pad=1.0)",
"_____no_output_____"
]
],
[
[
"Finally, `plot_predict` can be used to visualize the predictions and prediction intervals which are constructed assuming the IMA is true.",
"_____no_output_____"
]
],
[
[
"ax = res.plot_predict(24, theta=2)",
"_____no_output_____"
]
],
[
[
"We conclude be producing a hedgehog plot using 2-year non-overlapping samples.",
"_____no_output_____"
]
],
[
[
"ln_pce = np.log(pce.PCE)\nforecasts = {\"ln PCE\": ln_pce}\nfor year in range(1995, 2020, 3):\n sub = ln_pce[: str(year)]\n res = ThetaModel(sub).fit()\n fcast = res.forecast(12)\n forecasts[str(year)] = fcast\nforecasts = pd.DataFrame(forecasts)\nax = forecasts[\"1995\":].plot(legend=False)\nchildren = ax.get_children()\nchildren[0].set_linewidth(4)\nchildren[0].set_alpha(0.3)\nchildren[0].set_color(\"#000000\")\nax.set_title(\"ln PCE\")\nplt.tight_layout(pad=1.0)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac1412dd96dac2ab272661859c73ddcde015edf
| 1,047,980 |
ipynb
|
Jupyter Notebook
|
Process Books/2 visualization for EDA.ipynb
|
NicolasHu11/DS_final
|
ac8b644dfd02b7519fcd23aff73e191a8a230f73
|
[
"MIT"
] | 1 |
2019-05-02T02:02:46.000Z
|
2019-05-02T02:02:46.000Z
|
Process Books/2 visualization for EDA.ipynb
|
NicolasHu11/DS_final
|
ac8b644dfd02b7519fcd23aff73e191a8a230f73
|
[
"MIT"
] | null | null | null |
Process Books/2 visualization for EDA.ipynb
|
NicolasHu11/DS_final
|
ac8b644dfd02b7519fcd23aff73e191a8a230f73
|
[
"MIT"
] | null | null | null | 1,176.184063 | 195,012 | 0.955663 |
[
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n%matplotlib inline",
"_____no_output_____"
],
[
"data_label = pd.read_csv(\"data(with_label).csv\")",
"_____no_output_____"
]
],
[
[
"### 30 day death age",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\nsns.set_style('darkgrid')\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"age\",data=data_label, split=True)\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('Age (years)')\nplt.title('Age distributions for 30-day death')\n\n\nfig = plt.figure(figsize=(12,6))\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"age\",data=data_label[data_label.age<300], split=True,)\nplt.legend(loc='lower left')\n#plt.ylim([0,100])\nplt.xlabel(' ')\nplt.ylabel('Age (years)')\nplt.title('Age distributions for 30-day death \\n (excluding ages > 300)')",
"C:\\Users\\admin\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"## One year death age",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"one_year\", hue=\"gender\", y=\"age\",data=data_label, split=True)\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('Age (years)')\nplt.title('Age distributions for 30-day death')\n\n\nfig = plt.figure(figsize=(12,6))\nax = sns.violinplot(x=\"one_year\", hue=\"gender\", y=\"age\",data=data_label[data_label.age<300], split=True,)\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('Age (years)')\nplt.title('Age distributions for 30-day death \\n (excluding ages > 300)')",
"_____no_output_____"
]
],
[
[
"## sapsii",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"sapsii\",data=data_label, split=True)\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('SAPS II distributions for 30-day death')\n\nfig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"one_year\", hue=\"gender\", y=\"sapsii\",data=data_label, split=True)\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('SAPS II distributions for one year death')",
"_____no_output_____"
]
],
[
[
"## Sofa",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"sofa\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('SOFA distributions for 30-day death')",
"_____no_output_____"
]
],
[
[
"## Cormorbidity",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"elixhauser_vanwalraven\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('elixhauser_vanwalraven for 30-day death')\n\nfig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"elixhauser_sid29\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('elixhauser_sid29 for 30-day death')\n\nfig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"elixhauser_sid30\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('elixhauser_sid30 for 30-day death')",
"_____no_output_____"
]
],
[
[
"## urea_n_mean",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"urea_n_mean\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel('SAPS II score')\nplt.title('urea_n_mean for 30-day death')",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12,6))\n\nax = sns.violinplot(x=\"thirty_days\", hue=\"gender\", y=\"rrt\",data=data_label, split=True)\n\nplt.legend(loc='lower left')\nplt.xlabel(' ')\nplt.ylabel(' ')\nplt.title('rrt for 30-day death')",
"_____no_output_____"
]
],
[
[
"## Correlation heatmap",
"_____no_output_____"
],
[
"### Remains features: age, gender, 'sapsii', 'sofa', 'thirty_days', 'one_year','oasis', 'lods', 'sirs', and other physiological parameters",
"_____no_output_____"
]
],
[
[
"#'platelets_mean','urea_n_mean', 'glucose_mean','resprate_mean', 'sysbp_mean', 'diasbp_mean', 'urine_mean', 'spo2_mean','temp_mean','hr_mean',\ndata = data_label.drop(columns=['subject_id', 'hadm_id', 'admittime', 'dischtime', 'deathtime', 'dod',\n 'first_careunit', 'last_careunit', 'marital_status',\n 'insurance', 'urea_n_min', 'urea_n_max', 'platelets_min',\n 'platelets_max', 'magnesium_max', 'albumin_min',\n 'calcium_min', 'resprate_min', 'resprate_max', \n 'glucose_min', 'glucose_max', 'hr_min', 'hr_max',\n 'sysbp_min', 'sysbp_max','diasbp_min',\n 'diasbp_max', 'temp_min', 'temp_max', \n 'urine_min', 'urine_max',\n 'elixhauser_vanwalraven', 'elixhauser_sid29', 'elixhauser_sid30',\n 'los_hospital', 'meanbp_min', 'meanbp_max', 'meanbp_mean', 'spo2_min',\n 'spo2_max', 'vent', 'rrt', 'urineoutput',\n 'icustay_age_group', 'admission_type',\n 'admission_location', 'discharge_location', 'ethnicity', 'diagnosis',\n 'time_before_death'])\ncorrelation = data.corr()",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\nsns.heatmap(correlation, vmax=1, square=True, annot=False, cmap=\"YlGnBu\")",
"_____no_output_____"
]
],
[
[
"## KDE for 30 day death",
"_____no_output_____"
]
],
[
[
"data_pos = data_label.loc[data_label.thirty_days == 1]\ndata_neg = data_label.loc[data_label.thirty_days == 0]",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(15,15))\n\n\nplt.subplot(331)\ndata_neg.platelets_min.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.platelets_min.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('platelets_min')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(332)\ndata_neg.age.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.age.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('Age')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(333)\ndata_neg.albumin_min.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.albumin_min.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('albumin_min')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(334)\ndata_neg.sysbp_min.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.sysbp_min.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('sysbp_min')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(335)\ndata_neg.temp_mean.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.temp_mean.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('temp_mean')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(336)\ndata_neg.resprate_max.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.resprate_max.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('resprate_max')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(337)\ndata_neg.urea_n_mean.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.urea_n_mean.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('urea_n_mean')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(338)\ndata_neg.vent.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.vent.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('vent')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(339)\ndata_neg.rrt.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.rrt.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('rrt')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(15,15))\nplt.subplot(321)\ndata_neg.sofa.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.sofa.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('sofa')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(322)\ndata_neg.sapsii.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.sapsii.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('sapsii')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(323)\ndata_neg.oasis.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.oasis.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('oasis')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(324)\ndata_neg.lods.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.lods.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('lods')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\nplt.subplot(325)\ndata_neg.sirs.plot.kde(color = 'red', alpha = 0.5)\ndata_pos.sirs.plot.kde(color = 'blue', alpha = 0.5)\nplt.title('sirs')\nplt.legend(labels=['Alive in 30 days', 'Dead in 30 days'])\n\n",
"_____no_output_____"
]
],
[
[
"## Pie chart",
"_____no_output_____"
]
],
[
[
"# Age groups\nage_category = np.floor(data_label['age']/10)\ncount = age_category.value_counts()\ncount['10-20'] = 345\ncount['20-30'] = 1860\ncount['30-40'] = 2817\ncount['40-50'] = 5716\ncount['50-60'] = 10190\ncount['60-70'] = 12300\ncount['70-80'] = 12638\ncount['80-89'] = 9233\ncount['older than 89'] = 2897\ncount = count.drop([7.0, 6.0, 5.0, 8.0, 4.0, 30.0, 3.0, 2.0, 1.0, 31.0])\ncount",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(25,25))\nplt.rcParams.update({'font.size': 18})\n#explode = (0, 0.15, 0)\ncolors = ['#79bd9a','#f4f7f7','#aacfd0','#79a8a9','#a8dba8']\n #f4f7f7 #aacfd0 #79a8a9 #a8dba8 #79bd9a\nplt.subplot(321)\ndata_label.admission_type.value_counts().plot.pie( colors = colors, autopct='%1.1f%%')\nplt.title('Admission type')\nplt.ylabel('')\n\nplt.subplot(322)\nplotting = (data_label.admission_location.value_counts(dropna=False))\nplotting['OTHER'] = plotting['TRANSFER FROM SKILLED NUR'] + plotting['TRANSFER FROM OTHER HEALT'] + plotting['** INFO NOT AVAILABLE **']+plotting['HMO REFERRAL/SICK']+plotting['TRSF WITHIN THIS FACILITY']\nplotting = plotting.drop(['TRANSFER FROM SKILLED NUR', 'TRANSFER FROM OTHER HEALT', '** INFO NOT AVAILABLE **','HMO REFERRAL/SICK','TRSF WITHIN THIS FACILITY'])\nplotting.plot.pie( colors = colors, autopct='%1.1f%%')\nplt.title('Admission location')\nplt.ylabel('')\n\nplt.subplot(323)\ncount.plot.pie( colors = colors, autopct='%1.1f%%')\nplt.title('Age groups')\nplt.ylabel('')\n\n\nplt.subplot(324)\ndata_label.insurance.value_counts().plot.pie( colors = colors, autopct='%1.1f%%')\nplt.title('Insurance provider')\nplt.ylabel('')\n\n\n\n\n\n#admission_location \n#discharge_location \n#ethnicity \n#diagnosis\n",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(8,8))\nplt.rcParams.update({'font.size': 15})\nexplode = (0, 0.1)\ndata_label.one_year.value_counts().plot.pie( colors = colors, autopct='%1.1f%%',explode = explode, startangle = 90)\nplt.title('Patient died in 1 year')\nplt.ylabel('')",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(8,8))\nplt.rcParams.update({'font.size': 15})\ndata_label.thirty_days.value_counts().plot.pie( colors = colors, autopct='%1.1f%%',explode = explode, startangle = 90)\nplt.title('Patient died in 30 days')\nplt.ylabel('')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4ac14841190589b6590e770b6ab23d399b75085b
| 57,852 |
ipynb
|
Jupyter Notebook
|
basics/04_gradient_descent_implementation.ipynb
|
michalastocki/convnet-course
|
71a60baa2726ad0fa5855f6cc78ab76afee097b2
|
[
"MIT"
] | null | null | null |
basics/04_gradient_descent_implementation.ipynb
|
michalastocki/convnet-course
|
71a60baa2726ad0fa5855f6cc78ab76afee097b2
|
[
"MIT"
] | null | null | null |
basics/04_gradient_descent_implementation.ipynb
|
michalastocki/convnet-course
|
71a60baa2726ad0fa5855f6cc78ab76afee097b2
|
[
"MIT"
] | 3 |
2020-02-07T17:34:24.000Z
|
2021-05-25T18:44:57.000Z
| 202.989474 | 24,090 | 0.878587 |
[
[
[
"<a href=\"https://colab.research.google.com/github/krakowiakpawel9/convnet-course/blob/master/basics/04_gradient_descent_implementation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"* @author: [email protected] \n* @site: e-smartdata.org",
"_____no_output_____"
],
[
"## Implementacja Stochastycznego Gradientu (Gradient Descent)\n\nUżyjemy algorytmu stochastycznego spadku do znalezienia minimum funkcji straty określonej wzorem: $L(w) = w^{2}-4w $. \n\nPochodna tej funkcji to $\\frac{dL}{dw}=2*w-4$ ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns \nsns.set()\n\nw = np.arange(-10, 14, 0.1)\nLoss = w**2 - 4*w\n\npoint = -4\nplt.figure(figsize=(8, 6))\nplt.plot(point, point ** 2 -4 * point, 'ro')\nplt.plot(w, Loss)\nplt.title('Loss function')",
"_____no_output_____"
],
[
"def gradient_descent(df=lambda w: 2 * w - 4, learning_rate=0.01, max_iters=10000, precision=0.000001, w_0=-1):\n \"\"\"\n parametry:\n ----------\n df: gradient funkcji do optymalizacji\n learning_rate: wskaźnik uczenia\n max_iters: maksymalna liczba iteracji\n precision: precyzja sprawdza kiedy zatrzymać działanie algorytmu\n w_0: inicjalizacja wagi początkowej\n \"\"\"\n\n # licznik iteracji\n iters = 0\n # kontrola wartości kroku kolejnego spadku\n previous_step_size = 1\n\n points = []\n while previous_step_size > precision and iters < max_iters:\n w_prev = w_0\n w_0 = w_0 - learning_rate * df(w_prev)\n previous_step_size = abs(w_0 - w_prev)\n iters += 1\n points.append(w_0)\n print('Iter #{}: current point: {}'.format(iters, w_0))\n\n print('Minimum lokalne znajduje się w punkcie: {}'.format(w_0))\n return points",
"_____no_output_____"
],
[
"points = gradient_descent(w_0 = -10, learning_rate=0.1)",
"Iter #1: current point: -7.6\nIter #2: current point: -5.68\nIter #3: current point: -4.144\nIter #4: current point: -2.9152\nIter #5: current point: -1.9321599999999999\nIter #6: current point: -1.1457279999999999\nIter #7: current point: -0.5165823999999998\nIter #8: current point: -0.01326591999999982\nIter #9: current point: 0.3893872640000002\nIter #10: current point: 0.7115098112000002\nIter #11: current point: 0.9692078489600002\nIter #12: current point: 1.1753662791680002\nIter #13: current point: 1.3402930233344001\nIter #14: current point: 1.47223441866752\nIter #15: current point: 1.577787534934016\nIter #16: current point: 1.6622300279472129\nIter #17: current point: 1.7297840223577703\nIter #18: current point: 1.7838272178862162\nIter #19: current point: 1.827061774308973\nIter #20: current point: 1.8616494194471784\nIter #21: current point: 1.8893195355577428\nIter #22: current point: 1.9114556284461943\nIter #23: current point: 1.9291645027569555\nIter #24: current point: 1.9433316022055644\nIter #25: current point: 1.9546652817644516\nIter #26: current point: 1.9637322254115612\nIter #27: current point: 1.970985780329249\nIter #28: current point: 1.9767886242633992\nIter #29: current point: 1.9814308994107193\nIter #30: current point: 1.9851447195285754\nIter #31: current point: 1.9881157756228602\nIter #32: current point: 1.9904926204982882\nIter #33: current point: 1.9923940963986306\nIter #34: current point: 1.9939152771189046\nIter #35: current point: 1.9951322216951237\nIter #36: current point: 1.996105777356099\nIter #37: current point: 1.9968846218848793\nIter #38: current point: 1.9975076975079034\nIter #39: current point: 1.9980061580063226\nIter #40: current point: 1.998404926405058\nIter #41: current point: 1.9987239411240465\nIter #42: current point: 1.998979152899237\nIter #43: current point: 1.9991833223193898\nIter #44: current point: 1.999346657855512\nIter #45: current point: 1.9994773262844094\nIter #46: current point: 1.9995818610275276\nIter #47: current point: 1.999665488822022\nIter #48: current point: 1.9997323910576177\nIter #49: current point: 1.9997859128460942\nIter #50: current point: 1.9998287302768754\nIter #51: current point: 1.9998629842215003\nIter #52: current point: 1.9998903873772003\nIter #53: current point: 1.9999123099017602\nIter #54: current point: 1.999929847921408\nIter #55: current point: 1.9999438783371264\nIter #56: current point: 1.9999551026697011\nIter #57: current point: 1.999964082135761\nIter #58: current point: 1.9999712657086088\nIter #59: current point: 1.999977012566887\nIter #60: current point: 1.9999816100535095\nIter #61: current point: 1.9999852880428075\nIter #62: current point: 1.999988230434246\nIter #63: current point: 1.9999905843473968\nIter #64: current point: 1.9999924674779175\nIter #65: current point: 1.999993973982334\nIter #66: current point: 1.9999951791858672\nIter #67: current point: 1.9999961433486937\nMinimum lokalne znajduje się w punkcie: 1.9999961433486937\n"
],
[
"point = -4\nplt.figure(figsize=(8, 6))\nplt.plot(w, Loss)\nplt.title('Loss function')\nfor point in points:\n plt.plot(point, point ** 2 -4 * point, 'ro')",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4ac1648612e7a76d3fd8e3396053e3fb0bf67717
| 42,695 |
ipynb
|
Jupyter Notebook
|
logic.ipynb
|
upsidedownpancake/aima-python
|
4f6c7167872d833714625cf3d25cc1f6f7cf15fe
|
[
"MIT"
] | 1 |
2018-05-12T17:17:05.000Z
|
2018-05-12T17:17:05.000Z
|
logic.ipynb
|
upsidedownpancake/aima-python
|
4f6c7167872d833714625cf3d25cc1f6f7cf15fe
|
[
"MIT"
] | null | null | null |
logic.ipynb
|
upsidedownpancake/aima-python
|
4f6c7167872d833714625cf3d25cc1f6f7cf15fe
|
[
"MIT"
] | null | null | null | 29.424535 | 911 | 0.558754 |
[
[
[
"# Logic: `logic.py`; Chapters 6-8",
"_____no_output_____"
],
[
"This notebook describes the [logic.py](https://github.com/aimacode/aima-python/blob/master/logic.py) module, which covers Chapters 6 (Logical Agents), 7 (First-Order Logic) and 8 (Inference in First-Order Logic) of *[Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu)*. See the [intro notebook](https://github.com/aimacode/aima-python/blob/master/intro.ipynb) for instructions.\n\nWe'll start by looking at `Expr`, the data type for logical sentences, and the convenience function `expr`. We'll be covering two types of knowledge bases, `PropKB` - Propositional logic knowledge base and `FolKB` - First order logic knowledge base. We will construct a propositional knowledge base of a specific situation in the Wumpus World. We will next go through the `tt_entails` function and experiment with it a bit. The `pl_resolution` and `pl_fc_entails` functions will come next. We'll study forward chaining and backward chaining algorithms for `FolKB` and use them on `crime_kb` knowledge base.\n\nBut the first step is to load the code:",
"_____no_output_____"
]
],
[
[
"from utils import *\nfrom logic import *",
"_____no_output_____"
]
],
[
[
"## Logical Sentences",
"_____no_output_____"
],
[
"The `Expr` class is designed to represent any kind of mathematical expression. The simplest type of `Expr` is a symbol, which can be defined with the function `Symbol`:",
"_____no_output_____"
]
],
[
[
"Symbol('x')",
"_____no_output_____"
]
],
[
[
"Or we can define multiple symbols at the same time with the function `symbols`:",
"_____no_output_____"
]
],
[
[
"(x, y, P, Q, f) = symbols('x, y, P, Q, f')",
"_____no_output_____"
]
],
[
[
"We can combine `Expr`s with the regular Python infix and prefix operators. Here's how we would form the logical sentence \"P and not Q\":",
"_____no_output_____"
]
],
[
[
"P & ~Q",
"_____no_output_____"
]
],
[
[
"This works because the `Expr` class overloads the `&` operator with this definition:\n\n```python\ndef __and__(self, other): return Expr('&', self, other)```\n \nand does similar overloads for the other operators. An `Expr` has two fields: `op` for the operator, which is always a string, and `args` for the arguments, which is a tuple of 0 or more expressions. By \"expression,\" I mean either an instance of `Expr`, or a number. Let's take a look at the fields for some `Expr` examples:",
"_____no_output_____"
]
],
[
[
"sentence = P & ~Q\n\nsentence.op",
"_____no_output_____"
],
[
"sentence.args",
"_____no_output_____"
],
[
"P.op",
"_____no_output_____"
],
[
"P.args",
"_____no_output_____"
],
[
"Pxy = P(x, y)\n\nPxy.op",
"_____no_output_____"
],
[
"Pxy.args",
"_____no_output_____"
]
],
[
[
"It is important to note that the `Expr` class does not define the *logic* of Propositional Logic sentences; it just gives you a way to *represent* expressions. Think of an `Expr` as an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). Each of the `args` in an `Expr` can be either a symbol, a number, or a nested `Expr`. We can nest these trees to any depth. Here is a deply nested `Expr`:",
"_____no_output_____"
]
],
[
[
"3 * f(x, y) + P(y) / 2 + 1",
"_____no_output_____"
]
],
[
[
"## Operators for Constructing Logical Sentences\n\nHere is a table of the operators that can be used to form sentences. Note that we have a problem: we want to use Python operators to make sentences, so that our programs (and our interactive sessions like the one here) will show simple code. But Python does not allow implication arrows as operators, so for now we have to use a more verbose notation that Python does allow: `|'==>'|` instead of just `==>`. Alternately, you can always use the more verbose `Expr` constructor forms:\n\n| Operation | Book | Python Infix Input | Python Output | Python `Expr` Input\n|--------------------------|----------------------|-------------------------|---|---|\n| Negation | ¬ P | `~P` | `~P` | `Expr('~', P)`\n| And | P ∧ Q | `P & Q` | `P & Q` | `Expr('&', P, Q)`\n| Or | P ∨ Q | `P`<tt> | </tt>`Q`| `P`<tt> | </tt>`Q` | `Expr('`|`', P, Q)`\n| Inequality (Xor) | P ≠ Q | `P ^ Q` | `P ^ Q` | `Expr('^', P, Q)`\n| Implication | P → Q | `P` <tt>|</tt>`'==>'`<tt>|</tt> `Q` | `P ==> Q` | `Expr('==>', P, Q)`\n| Reverse Implication | Q ← P | `Q` <tt>|</tt>`'<=='`<tt>|</tt> `P` |`Q <== P` | `Expr('<==', Q, P)`\n| Equivalence | P ↔ Q | `P` <tt>|</tt>`'<=>'`<tt>|</tt> `Q` |`P <=> Q` | `Expr('<=>', P, Q)`\n\nHere's an example of defining a sentence with an implication arrow:",
"_____no_output_____"
]
],
[
[
"~(P & Q) |'==>'| (~P | ~Q)",
"_____no_output_____"
]
],
[
[
"## `expr`: a Shortcut for Constructing Sentences\n\nIf the `|'==>'|` notation looks ugly to you, you can use the function `expr` instead:",
"_____no_output_____"
]
],
[
[
"expr('~(P & Q) ==> (~P | ~Q)')",
"_____no_output_____"
]
],
[
[
"`expr` takes a string as input, and parses it into an `Expr`. The string can contain arrow operators: `==>`, `<==`, or `<=>`, which are handled as if they were regular Python infix operators. And `expr` automatically defines any symbols, so you don't need to pre-define them:",
"_____no_output_____"
]
],
[
[
"expr('sqrt(b ** 2 - 4 * a * c)')",
"_____no_output_____"
]
],
[
[
"For now that's all you need to know about `expr`. If you are interested, we explain the messy details of how `expr` is implemented and how `|'==>'|` is handled in the appendix.",
"_____no_output_____"
],
[
"## Propositional Knowledge Bases: `PropKB`\n\nThe class `PropKB` can be used to represent a knowledge base of propositional logic sentences.\n\nWe see that the class `KB` has four methods, apart from `__init__`. A point to note here: the `ask` method simply calls the `ask_generator` method. Thus, this one has already been implemented, and what you'll have to actually implement when you create your own knowledge base class (though you'll probably never need to, considering the ones we've created for you) will be the `ask_generator` function and not the `ask` function itself.\n\nThe class `PropKB` now.\n* `__init__(self, sentence=None)` : The constructor `__init__` creates a single field `clauses` which will be a list of all the sentences of the knowledge base. Note that each one of these sentences will be a 'clause' i.e. a sentence which is made up of only literals and `or`s.\n* `tell(self, sentence)` : When you want to add a sentence to the KB, you use the `tell` method. This method takes a sentence, converts it to its CNF, extracts all the clauses, and adds all these clauses to the `clauses` field. So, you need not worry about `tell`ing only clauses to the knowledge base. You can `tell` the knowledge base a sentence in any form that you wish; converting it to CNF and adding the resulting clauses will be handled by the `tell` method.\n* `ask_generator(self, query)` : The `ask_generator` function is used by the `ask` function. It calls the `tt_entails` function, which in turn returns `True` if the knowledge base entails query and `False` otherwise. The `ask_generator` itself returns an empty dict `{}` if the knowledge base entails query and `None` otherwise. This might seem a little bit weird to you. After all, it makes more sense just to return a `True` or a `False` instead of the `{}` or `None` But this is done to maintain consistency with the way things are in First-Order Logic, where an `ask_generator` function is supposed to return all the substitutions that make the query true. Hence the dict, to return all these substitutions. I will be mostly be using the `ask` function which returns a `{}` or a `False`, but if you don't like this, you can always use the `ask_if_true` function which returns a `True` or a `False`.\n* `retract(self, sentence)` : This function removes all the clauses of the sentence given, from the knowledge base. Like the `tell` function, you don't have to pass clauses to remove them from the knowledge base; any sentence will do fine. The function will take care of converting that sentence to clauses and then remove those.",
"_____no_output_____"
],
[
"## Wumpus World KB\nLet us create a `PropKB` for the wumpus world with the sentences mentioned in `section 7.4.3`.",
"_____no_output_____"
]
],
[
[
"wumpus_kb = PropKB()",
"_____no_output_____"
]
],
[
[
"We define the symbols we use in our clauses.<br/>\n$P_{x, y}$ is true if there is a pit in `[x, y]`.<br/>\n$B_{x, y}$ is true if the agent senses breeze in `[x, y]`.<br/>",
"_____no_output_____"
]
],
[
[
"P11, P12, P21, P22, P31, B11, B21 = expr('P11, P12, P21, P22, P31, B11, B21')",
"_____no_output_____"
]
],
[
[
"Now we tell sentences based on `section 7.4.3`.<br/>\nThere is no pit in `[1,1]`.",
"_____no_output_____"
]
],
[
[
"wumpus_kb.tell(~P11)",
"_____no_output_____"
]
],
[
[
"A square is breezy if and only if there is a pit in a neighboring square. This has to be stated for each square but for now, we include just the relevant squares.",
"_____no_output_____"
]
],
[
[
"wumpus_kb.tell(B11 | '<=>' | ((P12 | P21)))\nwumpus_kb.tell(B21 | '<=>' | ((P11 | P22 | P31)))",
"_____no_output_____"
]
],
[
[
"Now we include the breeze percepts for the first two squares leading up to the situation in `Figure 7.3(b)`",
"_____no_output_____"
]
],
[
[
"wumpus_kb.tell(~B11)\nwumpus_kb.tell(B21)",
"_____no_output_____"
]
],
[
[
"We can check the clauses stored in a `KB` by accessing its `clauses` variable",
"_____no_output_____"
]
],
[
[
"wumpus_kb.clauses",
"_____no_output_____"
]
],
[
[
"We see that the equivalence $B_{1, 1} \\iff (P_{1, 2} \\lor P_{2, 1})$ was automatically converted to two implications which were inturn converted to CNF which is stored in the `KB`.<br/>\n$B_{1, 1} \\iff (P_{1, 2} \\lor P_{2, 1})$ was split into $B_{1, 1} \\implies (P_{1, 2} \\lor P_{2, 1})$ and $B_{1, 1} \\Longleftarrow (P_{1, 2} \\lor P_{2, 1})$.<br/>\n$B_{1, 1} \\implies (P_{1, 2} \\lor P_{2, 1})$ was converted to $P_{1, 2} \\lor P_{2, 1} \\lor \\neg B_{1, 1}$.<br/>\n$B_{1, 1} \\Longleftarrow (P_{1, 2} \\lor P_{2, 1})$ was converted to $\\neg (P_{1, 2} \\lor P_{2, 1}) \\lor B_{1, 1}$ which becomes $(\\neg P_{1, 2} \\lor B_{1, 1}) \\land (\\neg P_{2, 1} \\lor B_{1, 1})$ after applying De Morgan's laws and distributing the disjunction.<br/>\n$B_{2, 1} \\iff (P_{1, 1} \\lor P_{2, 2} \\lor P_{3, 2})$ is converted in similar manner.",
"_____no_output_____"
],
[
"## Inference in Propositional Knowledge Base\nIn this section we will look at two algorithms to check if a sentence is entailed by the `KB`. Our goal is to decide whether $\\text{KB} \\vDash \\alpha$ for some sentence $\\alpha$.\n### Truth Table Enumeration\nIt is a model-checking approach which, as the name suggests, enumerates all possible models in which the `KB` is true and checks if $\\alpha$ is also true in these models. We list the $n$ symbols in the `KB` and enumerate the $2^{n}$ models in a depth-first manner and check the truth of `KB` and $\\alpha$.",
"_____no_output_____"
]
],
[
[
"%psource tt_check_all",
"_____no_output_____"
]
],
[
[
"Note that `tt_entails()` takes an `Expr` which is a conjunction of clauses as the input instead of the `KB` itself. You can use the `ask_if_true()` method of `PropKB` which does all the required conversions. Let's check what `wumpus_kb` tells us about $P_{1, 1}$.",
"_____no_output_____"
]
],
[
[
"wumpus_kb.ask_if_true(~P11), wumpus_kb.ask_if_true(P11)",
"_____no_output_____"
]
],
[
[
"Looking at Figure 7.9 we see that in all models in which the knowledge base is `True`, $P_{1, 1}$ is `False`. It makes sense that `ask_if_true()` returns `True` for $\\alpha = \\neg P_{1, 1}$ and `False` for $\\alpha = P_{1, 1}$. This begs the question, what if $\\alpha$ is `True` in only a portion of all models. Do we return `True` or `False`? This doesn't rule out the possibility of $\\alpha$ being `True` but it is not entailed by the `KB` so we return `False` in such cases. We can see this is the case for $P_{2, 2}$ and $P_{3, 1}$.",
"_____no_output_____"
]
],
[
[
"wumpus_kb.ask_if_true(~P22), wumpus_kb.ask_if_true(P22)",
"_____no_output_____"
]
],
[
[
"### Proof by Resolution\nRecall that our goal is to check whether $\\text{KB} \\vDash \\alpha$ i.e. is $\\text{KB} \\implies \\alpha$ true in every model. Suppose we wanted to check if $P \\implies Q$ is valid. We check the satisfiability of $\\neg (P \\implies Q)$, which can be rewritten as $P \\land \\neg Q$. If $P \\land \\neg Q$ is unsatisfiable, then $P \\implies Q$ must be true in all models. This gives us the result \"$\\text{KB} \\vDash \\alpha$ <em>if and only if</em> $\\text{KB} \\land \\neg \\alpha$ is unsatisfiable\".<br/>\nThis technique corresponds to <em>proof by <strong>contradiction</strong></em>, a standard mathematical proof technique. We assume $\\alpha$ to be false and show that this leads to a contradiction with known axioms in $\\text{KB}$. We obtain a contradiction by making valid inferences using inference rules. In this proof we use a single inference rule, <strong>resolution</strong> which states $(l_1 \\lor \\dots \\lor l_k) \\land (m_1 \\lor \\dots \\lor m_n) \\land (l_i \\iff \\neg m_j) \\implies l_1 \\lor \\dots \\lor l_{i - 1} \\lor l_{i + 1} \\lor \\dots \\lor l_k \\lor m_1 \\lor \\dots \\lor m_{j - 1} \\lor m_{j + 1} \\lor \\dots \\lor m_n$. Applying the resolution yeilds us a clause which we add to the KB. We keep doing this until:\n\n* There are no new clauses that can be added, in which case $\\text{KB} \\nvDash \\alpha$.\n* Two clauses resolve to yield the <em>empty clause</em>, in which case $\\text{KB} \\vDash \\alpha$.\n\nThe <em>empty clause</em> is equivalent to <em>False</em> because it arises only from resolving two complementary\nunit clauses such as $P$ and $\\neg P$ which is a contradiction as both $P$ and $\\neg P$ can't be <em>True</em> at the same time.",
"_____no_output_____"
]
],
[
[
"%psource pl_resolution",
"_____no_output_____"
],
[
"pl_resolution(wumpus_kb, ~P11), pl_resolution(wumpus_kb, P11)",
"_____no_output_____"
],
[
"pl_resolution(wumpus_kb, ~P22), pl_resolution(wumpus_kb, P22)",
"_____no_output_____"
]
],
[
[
"## First-Order Logic Knowledge Bases: `FolKB`\n\nThe class `FolKB` can be used to represent a knowledge base of First-order logic sentences. You would initialize and use it the same way as you would for `PropKB` except that the clauses are first-order definite clauses. We will see how to write such clauses to create a database and query them in the following sections.",
"_____no_output_____"
],
[
"## Criminal KB\nIn this section we create a `FolKB` based on the following paragraph.<br/>\n<em>The law says that it is a crime for an American to sell weapons to hostile nations. The country Nono, an enemy of America, has some missiles, and all of its missiles were sold to it by Colonel West, who is American.</em><br/>\nThe first step is to extract the facts and convert them into first-order definite clauses. Extracting the facts from data alone is a challenging task. Fortunately, we have a small paragraph and can do extraction and conversion manually. We'll store the clauses in list aptly named `clauses`.",
"_____no_output_____"
]
],
[
[
"clauses = []",
"_____no_output_____"
]
],
[
[
"<em>“... it is a crime for an American to sell weapons to hostile nations”</em><br/>\nThe keywords to look for here are 'crime', 'American', 'sell', 'weapon' and 'hostile'. We use predicate symbols to make meaning of them.\n\n* `Criminal(x)`: `x` is a criminal\n* `American(x)`: `x` is an American\n* `Sells(x ,y, z)`: `x` sells `y` to `z`\n* `Weapon(x)`: `x` is a weapon\n* `Hostile(x)`: `x` is a hostile nation\n\nLet us now combine them with appropriate variable naming to depict the meaning of the sentence. The criminal `x` is also the American `x` who sells weapon `y` to `z`, which is a hostile nation.\n\n$\\text{American}(x) \\land \\text{Weapon}(y) \\land \\text{Sells}(x, y, z) \\land \\text{Hostile}(z) \\implies \\text{Criminal} (x)$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)\"))",
"_____no_output_____"
]
],
[
[
"<em>\"The country Nono, an enemy of America\"</em><br/>\nWe now know that Nono is an enemy of America. We represent these nations using the constant symbols `Nono` and `America`. the enemy relation is show using the predicate symbol `Enemy`.\n\n$\\text{Enemy}(\\text{Nono}, \\text{America})$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"Enemy(Nono, America)\"))",
"_____no_output_____"
]
],
[
[
"<em>\"Nono ... has some missiles\"</em><br/>\nThis states the existence of some missile which is owned by Nono. $\\exists x \\text{Owns}(\\text{Nono}, x) \\land \\text{Missile}(x)$. We invoke existential instantiation to introduce a new constant `M1` which is the missile owned by Nono.\n\n$\\text{Owns}(\\text{Nono}, \\text{M1}), \\text{Missile}(\\text{M1})$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"Owns(Nono, M1)\"))\nclauses.append(expr(\"Missile(M1)\"))",
"_____no_output_____"
]
],
[
[
"<em>\"All of its missiles were sold to it by Colonel West\"</em><br/>\nIf Nono owns something and it classifies as a missile, then it was sold to Nono by West.\n\n$\\text{Missile}(x) \\land \\text{Owns}(\\text{Nono}, x) \\implies \\text{Sells}(\\text{West}, x, \\text{Nono})$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)\"))",
"_____no_output_____"
]
],
[
[
"<em>\"West, who is American\"</em><br/>\nWest is an American.\n\n$\\text{American}(\\text{West})$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"American(West)\"))",
"_____no_output_____"
]
],
[
[
"We also know, from our understanding of language, that missiles are weapons and that an enemy of America counts as “hostile”.\n\n$\\text{Missile}(x) \\implies \\text{Weapon}(x), \\text{Enemy}(x, \\text{America}) \\implies \\text{Hostile}(x)$",
"_____no_output_____"
]
],
[
[
"clauses.append(expr(\"Missile(x) ==> Weapon(x)\"))\nclauses.append(expr(\"Enemy(x, America) ==> Hostile(x)\"))",
"_____no_output_____"
]
],
[
[
"Now that we have converted the information into first-order definite clauses we can create our first-order logic knowledge base.",
"_____no_output_____"
]
],
[
[
"crime_kb = FolKB(clauses)",
"_____no_output_____"
]
],
[
[
"## Inference in First-Order Logic\nIn this section we look at a forward chaining and a backward chaining algorithm for `FolKB`. Both aforementioned algorithms rely on a process called <strong>unification</strong>, a key component of all first-order inference algorithms.",
"_____no_output_____"
],
[
"### Unification\nWe sometimes require finding substitutions that make different logical expressions look identical. This process, called unification, is done by the `unify` algorithm. It takes as input two sentences and returns a <em>unifier</em> for them if one exists. A unifier is a dictionary which stores the substitutions required to make the two sentences identical. It does so by recursively unifying the components of a sentence, where the unification of a variable symbol `var` with a constant symbol `Const` is the mapping `{var: Const}`. Let's look at a few examples.",
"_____no_output_____"
]
],
[
[
"unify(expr('x'), 3)",
"_____no_output_____"
],
[
"unify(expr('A(x)'), expr('A(B)'))",
"_____no_output_____"
],
[
"unify(expr('Cat(x) & Dog(Dobby)'), expr('Cat(Bella) & Dog(y)'))",
"_____no_output_____"
]
],
[
[
"In cases where there is no possible substitution that unifies the two sentences the function return `None`.",
"_____no_output_____"
]
],
[
[
"print(unify(expr('Cat(x)'), expr('Dog(Dobby)')))",
"None\n"
]
],
[
[
"We also need to take care we do not unintentionally use the same variable name. Unify treats them as a single variable which prevents it from taking multiple value.",
"_____no_output_____"
]
],
[
[
"print(unify(expr('Cat(x) & Dog(Dobby)'), expr('Cat(Bella) & Dog(x)')))",
"None\n"
]
],
[
[
"### Forward Chaining Algorithm\nWe consider the simple forward-chaining algorithm presented in <em>Figure 9.3</em>. We look at each rule in the knoweldge base and see if the premises can be satisfied. This is done by finding a substitution which unifies each of the premise with a clause in the `KB`. If we are able to unify the premises, the conclusion (with the corresponding substitution) is added to the `KB`. This inferencing process is repeated until either the query can be answered or till no new sentences can be added. We test if the newly added clause unifies with the query in which case the substitution yielded by `unify` is an answer to the query. If we run out of sentences to infer, this means the query was a failure.\n\nThe function `fol_fc_ask` is a generator which yields all substitutions which validate the query.",
"_____no_output_____"
]
],
[
[
"%psource fol_fc_ask",
"_____no_output_____"
]
],
[
[
"Let's find out all the hostile nations. Note that we only told the `KB` that Nono was an enemy of America, not that it was hostile.",
"_____no_output_____"
]
],
[
[
"answer = fol_fc_ask(crime_kb, expr('Hostile(x)'))\nprint(list(answer))",
"[{x: Nono}]\n"
]
],
[
[
"The generator returned a single substitution which says that Nono is a hostile nation. See how after adding another enemy nation the generator returns two substitutions.",
"_____no_output_____"
]
],
[
[
"crime_kb.tell(expr('Enemy(JaJa, America)'))\nanswer = fol_fc_ask(crime_kb, expr('Hostile(x)'))\nprint(list(answer))",
"[{x: Nono}, {x: JaJa}]\n"
]
],
[
[
"<strong><em>Note</em>:</strong> `fol_fc_ask` makes changes to the `KB` by adding sentences to it.",
"_____no_output_____"
],
[
"### Backward Chaining Algorithm\nThis algorithm works backward from the goal, chaining through rules to find known facts that support the proof. Suppose `goal` is the query we want to find the substitution for. We find rules of the form $\\text{lhs} \\implies \\text{goal}$ in the `KB` and try to prove `lhs`. There may be multiple clauses in the `KB` which give multiple `lhs`. It is sufficient to prove only one of these. But to prove a `lhs` all the conjuncts in the `lhs` of the clause must be proved. This makes it similar to <em>And/Or</em> search.",
"_____no_output_____"
],
[
"#### OR\nThe <em>OR</em> part of the algorithm comes from our choice to select any clause of the form $\\text{lhs} \\implies \\text{goal}$. Looking at all rules's `lhs` whose `rhs` unify with the `goal`, we yield a substitution which proves all the conjuncts in the `lhs`. We use `parse_definite_clause` to attain `lhs` and `rhs` from a clause of the form $\\text{lhs} \\implies \\text{rhs}$. For atomic facts the `lhs` is an empty list.",
"_____no_output_____"
]
],
[
[
"%psource fol_bc_or",
"_____no_output_____"
]
],
[
[
"#### AND\nThe <em>AND</em> corresponds to proving all the conjuncts in the `lhs`. We need to find a substitution which proves each <em>and</em> every clause in the list of conjuncts.",
"_____no_output_____"
]
],
[
[
"%psource fol_bc_and",
"_____no_output_____"
]
],
[
[
"Now the main function `fl_bc_ask` calls `fol_bc_or` with substitution initialized as empty. The `ask` method of `FolKB` uses `fol_bc_ask` and fetches the first substitution returned by the generator to answer query. Let's query the knowledge base we created from `clauses` to find hostile nations.",
"_____no_output_____"
]
],
[
[
"# Rebuild KB because running fol_fc_ask would add new facts to the KB\ncrime_kb = FolKB(clauses)",
"_____no_output_____"
],
[
"crime_kb.ask(expr('Hostile(x)'))",
"_____no_output_____"
]
],
[
[
"You may notice some new variables in the substitution. They are introduced to standardize the variable names to prevent naming problems as discussed in the [Unification section](#Unification)",
"_____no_output_____"
],
[
"## Appendix: The Implementation of `|'==>'|`\n\nConsider the `Expr` formed by this syntax:",
"_____no_output_____"
]
],
[
[
"P |'==>'| ~Q",
"_____no_output_____"
]
],
[
[
"What is the funny `|'==>'|` syntax? The trick is that \"`|`\" is just the regular Python or-operator, and so is exactly equivalent to this: ",
"_____no_output_____"
]
],
[
[
"(P | '==>') | ~Q",
"_____no_output_____"
]
],
[
[
"In other words, there are two applications of or-operators. Here's the first one:",
"_____no_output_____"
]
],
[
[
"P | '==>'",
"_____no_output_____"
]
],
[
[
"What is going on here is that the `__or__` method of `Expr` serves a dual purpose. If the right-hand-side is another `Expr` (or a number), then the result is an `Expr`, as in `(P | Q)`. But if the right-hand-side is a string, then the string is taken to be an operator, and we create a node in the abstract syntax tree corresponding to a partially-filled `Expr`, one where we know the left-hand-side is `P` and the operator is `==>`, but we don't yet know the right-hand-side.\n\nThe `PartialExpr` class has an `__or__` method that says to create an `Expr` node with the right-hand-side filled in. Here we can see the combination of the `PartialExpr` with `Q` to create a complete `Expr`:",
"_____no_output_____"
]
],
[
[
"partial = PartialExpr('==>', P) \npartial | ~Q",
"_____no_output_____"
]
],
[
[
"This [trick](http://code.activestate.com/recipes/384122-infix-operators/) is due to [Ferdinand Jamitzky](http://code.activestate.com/recipes/users/98863/), with a modification by [C. G. Vedant](https://github.com/Chipe1),\nwho suggested using a string inside the or-bars.\n\n## Appendix: The Implementation of `expr`\n\nHow does `expr` parse a string into an `Expr`? It turns out there are two tricks (besides the Jamitzky/Vedant trick):\n\n1. We do a string substitution, replacing \"`==>`\" with \"`|'==>'|`\" (and likewise for other operators).\n2. We `eval` the resulting string in an environment in which every identifier\nis bound to a symbol with that identifier as the `op`.\n\nIn other words,",
"_____no_output_____"
]
],
[
[
"expr('~(P & Q) ==> (~P | ~Q)')",
"_____no_output_____"
]
],
[
[
"is equivalent to doing:",
"_____no_output_____"
]
],
[
[
"P, Q = symbols('P, Q')\n~(P & Q) |'==>'| (~P | ~Q)",
"_____no_output_____"
]
],
[
[
"One thing to beware of: this puts `==>` at the same precedence level as `\"|\"`, which is not quite right. For example, we get this:",
"_____no_output_____"
]
],
[
[
"P & Q |'==>'| P | Q",
"_____no_output_____"
]
],
[
[
"which is probably not what we meant; when in doubt, put in extra parens:",
"_____no_output_____"
]
],
[
[
"(P & Q) |'==>'| (P | Q)",
"_____no_output_____"
]
],
[
[
"## Examples",
"_____no_output_____"
]
],
[
[
"from notebook import Canvas_fol_bc_ask\ncanvas_bc_ask = Canvas_fol_bc_ask('canvas_bc_ask', crime_kb, expr('Criminal(x)'))",
"_____no_output_____"
]
],
[
[
"# Authors\n\nThis notebook by [Chirag Vartak](https://github.com/chiragvartak) and [Peter Norvig](https://github.com/norvig).\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4ac18186fbb8c46ce05b41f28c7cb567541f8efa
| 81,402 |
ipynb
|
Jupyter Notebook
|
notebooks/official/migration/UJ3 Vertex SDK Custom Image Classification with custom training container.ipynb
|
ronyu21/vertex-ai-samples
|
b78dfa38ef994425c218173daa9360a06d3ad0ab
|
[
"Apache-2.0"
] | 213 |
2021-06-10T20:05:20.000Z
|
2022-03-31T16:09:29.000Z
|
notebooks/official/migration/UJ3 Vertex SDK Custom Image Classification with custom training container.ipynb
|
ronyu21/vertex-ai-samples
|
b78dfa38ef994425c218173daa9360a06d3ad0ab
|
[
"Apache-2.0"
] | 343 |
2021-07-25T22:55:25.000Z
|
2022-03-31T23:58:47.000Z
|
notebooks/official/migration/UJ3 Vertex SDK Custom Image Classification with custom training container.ipynb
|
ronyu21/vertex-ai-samples
|
b78dfa38ef994425c218173daa9360a06d3ad0ab
|
[
"Apache-2.0"
] | 143 |
2021-07-21T17:27:47.000Z
|
2022-03-29T01:20:43.000Z
| 39.248795 | 2,100 | 0.565944 |
[
[
[
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Vertex AI: Vertex AI Migration: Custom Image Classification w/custom training container\n\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/vertex-ai-samples/tree/master/notebooks/official/migration/UJ3%20Vertex%20SDK%20Custom%20Image%20Classification%20with%20custom%20training%20container.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/vertex-ai-samples/tree/master/notebooks/official/migration/UJ3%20Vertex%20SDK%20Custom%20Image%20Classification%20with%20custom%20training%20container.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n</table>\n<br/><br/><br/>",
"_____no_output_____"
],
[
"### Dataset\n\nThe dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck.",
"_____no_output_____"
],
[
"### Costs\n\nThis tutorial uses billable components of Google Cloud:\n\n* Vertex AI\n* Cloud Storage\n\nLearn about [Vertex AI\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.",
"_____no_output_____"
],
[
"### Set up your local development environment\n\nIf you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.\n\nOtherwise, make sure your environment meets this notebook's requirements. You need the following:\n\n- The Cloud Storage SDK\n- Git\n- Python 3\n- virtualenv\n- Jupyter notebook running in a virtual environment with Python 3\n\nThe Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:\n\n1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).\n\n2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).\n\n3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.\n\n4. To install Jupyter, run `pip3 install jupyter` on the command-line in a terminal shell.\n\n5. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.\n\n6. Open this notebook in the Jupyter Notebook Dashboard.\n",
"_____no_output_____"
],
[
"## Installation\n\nInstall the latest version of Vertex SDK for Python.",
"_____no_output_____"
]
],
[
[
"import os\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG",
"_____no_output_____"
]
],
[
[
"Install the latest GA version of *google-cloud-storage* library as well.",
"_____no_output_____"
]
],
[
[
"! pip3 install -U google-cloud-storage $USER_FLAG",
"_____no_output_____"
],
[
"if os.environ[\"IS_TESTING\"]:\n ! apt-get update && apt-get install -y python3-opencv-headless\n ! apt-get install -y libgl1-mesa-dev\n ! pip3 install --upgrade opencv-python-headless $USER_FLAG",
"_____no_output_____"
],
[
"if os.environ[\"IS_TESTING\"]:\n ! pip3 install --upgrade tensorflow $USER_FLAG",
"_____no_output_____"
]
],
[
[
"### Restart the kernel\n\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.",
"_____no_output_____"
]
],
[
[
"import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"## Before you begin\n\n### GPU runtime\n\nThis tutorial does not require a GPU runtime.\n\n### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)\n\n3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)\n\n4. If you are running this notebook locally, you will need to install the [Cloud SDK]((https://cloud.google.com/sdk)).\n\n5. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.",
"_____no_output_____"
]
],
[
[
"PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)",
"_____no_output_____"
],
[
"! gcloud config set project $PROJECT_ID",
"_____no_output_____"
]
],
[
[
"#### Region\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\n- Americas: `us-central1`\n- Europe: `europe-west4`\n- Asia Pacific: `asia-east1`\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\n\nLearn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)",
"_____no_output_____"
]
],
[
[
"REGION = \"us-central1\" # @param {type: \"string\"}",
"_____no_output_____"
]
],
[
[
"#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"_____no_output_____"
]
],
[
[
"### Authenticate your Google Cloud account\n\n**If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step.\n\n**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\nIn the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.\n\n**Click Create service account**.\n\nIn the **Service account name** field, enter a name, and click **Create**.\n\nIn the **Grant this service account access to project** section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select **Vertex Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\nClick Create. A JSON file that contains your key downloads to your local environment.\n\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.",
"_____no_output_____"
]
],
[
[
"# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\nimport os\nimport sys\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''",
"_____no_output_____"
]
],
[
[
"### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\n\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.",
"_____no_output_____"
]
],
[
[
"BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"_____no_output_____"
]
],
[
[
"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil mb -l $REGION $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"_____no_output_____"
]
],
[
[
"! gsutil ls -al $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"### Set up variables\n\nNext, set up some variables used throughout the tutorial.\n### Import libraries and define constants",
"_____no_output_____"
]
],
[
[
"import google.cloud.aiplatform as aip",
"_____no_output_____"
]
],
[
[
"## Initialize Vertex SDK for Python\n\nInitialize the Vertex SDK for Python for your project and corresponding bucket.",
"_____no_output_____"
]
],
[
[
"aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)",
"_____no_output_____"
]
],
[
[
"#### Set hardware accelerators\n\nYou can set hardware accelerators for training and prediction.\n\nSet the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:\n\n (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)\n\n\nOtherwise specify `(None, None)` to use a container image to run on a CPU.\n\nLearn more [here](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators) hardware accelerator support for your region\n\n*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_TRAIN_GPU\"):\n TRAIN_GPU, TRAIN_NGPU = (\n aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_TRAIN_GPU\")),\n )\nelse:\n TRAIN_GPU, TRAIN_NGPU = (None, None)\n\nif os.getenv(\"IS_TESTING_DEPLOY_GPU\"):\n DEPLOY_GPU, DEPLOY_NGPU = (\n aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,\n int(os.getenv(\"IS_TESTING_DEPLOY_GPU\")),\n )\nelse:\n DEPLOY_GPU, DEPLOY_NGPU = (None, None)",
"_____no_output_____"
]
],
[
[
"#### Set pre-built containers\n\nSet the pre-built Docker container image for prediction.\n\n- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:\n\n\nFor the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_TF\"):\n TF = os.getenv(\"IS_TESTING_TF\")\nelse:\n TF = \"2-1\"\n\nif TF[0] == \"2\":\n if DEPLOY_GPU:\n DEPLOY_VERSION = \"tf2-gpu.{}\".format(TF)\n else:\n DEPLOY_VERSION = \"tf2-cpu.{}\".format(TF)\nelse:\n if DEPLOY_GPU:\n DEPLOY_VERSION = \"tf-gpu.{}\".format(TF)\n else:\n DEPLOY_VERSION = \"tf-cpu.{}\".format(TF)\n\nDEPLOY_IMAGE = \"gcr.io/cloud-aiplatform/prediction/{}:latest\".format(DEPLOY_VERSION)\n\nprint(\"Deployment:\", DEPLOY_IMAGE, DEPLOY_GPU)",
"_____no_output_____"
]
],
[
[
"#### Set machine type\n\nNext, set the machine type to use for training and prediction.\n\n- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.\n - `machine type`\n - `n1-standard`: 3.75GB of memory per vCPU.\n - `n1-highmem`: 6.5GB of memory per vCPU\n - `n1-highcpu`: 0.9 GB of memory per vCPU\n - `vCPUs`: number of \\[2, 4, 8, 16, 32, 64, 96 \\]\n\n*Note: The following is not supported for training:*\n\n - `standard`: 2 vCPUs\n - `highcpu`: 2, 4 and 8 vCPUs\n\n*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_TRAIN_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_TRAIN_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nTRAIN_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Train machine type\", TRAIN_COMPUTE)\n\nif os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\nelse:\n MACHINE_TYPE = \"n1-standard\"\n\nVCPU = \"4\"\nDEPLOY_COMPUTE = MACHINE_TYPE + \"-\" + VCPU\nprint(\"Deploy machine type\", DEPLOY_COMPUTE)",
"_____no_output_____"
]
],
[
[
"### Create a Docker file\n\nIn this tutorial, you train a CIFAR10 model using your own custom container.\n\nTo use your own custom container, you build a Docker file. First, you will create a directory for the container components.",
"_____no_output_____"
],
[
"### Examine the training package\n\n#### Package layout\n\nBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.\n\n- PKG-INFO\n- README.md\n- setup.cfg\n- setup.py\n- trainer\n - \\_\\_init\\_\\_.py\n - task.py\n\nThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.\n\nThe file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).\n\n#### Package Assembly\n\nIn the following cells, you will assemble the training package.",
"_____no_output_____"
]
],
[
[
"# Make folder for Python training script\n! rm -rf custom\n! mkdir custom\n\n# Add package information\n! touch custom/README.md\n\nsetup_cfg = \"[egg_info]\\n\\ntag_build =\\n\\ntag_date = 0\"\n! echo \"$setup_cfg\" > custom/setup.cfg\n\nsetup_py = \"import setuptools\\n\\nsetuptools.setup(\\n\\n install_requires=[\\n\\n 'tensorflow_datasets==1.3.0',\\n\\n ],\\n\\n packages=setuptools.find_packages())\"\n! echo \"$setup_py\" > custom/setup.py\n\npkg_info = \"Metadata-Version: 1.0\\n\\nName: CIFAR10 image classification\\n\\nVersion: 0.0.0\\n\\nSummary: Demostration training script\\n\\nHome-page: www.google.com\\n\\nAuthor: Google\\n\\nAuthor-email: [email protected]\\n\\nLicense: Public\\n\\nDescription: Demo\\n\\nPlatform: Vertex\"\n! echo \"$pkg_info\" > custom/PKG-INFO\n\n# Make the training subfolder\n! mkdir custom/trainer\n! touch custom/trainer/__init__.py",
"_____no_output_____"
]
],
[
[
"#### Task.py contents\n\nIn the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:\n\n- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.\n- Loads CIFAR10 dataset from TF Datasets (tfds).\n- Builds a model using TF.Keras model API.\n- Compiles the model (`compile()`).\n- Sets a training distribution strategy according to the argument `args.distribute`.\n- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`\n- Saves the trained model (`save(args.model_dir)`) to the specified model directory.",
"_____no_output_____"
]
],
[
[
"%%writefile custom/trainer/task.py\n# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nimport argparse\nimport os\nimport sys\ntfds.disable_progress_bar()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model-dir', dest='model_dir',\n default=os.getenv(\"AIP_MODEL_DIR\"), type=str, help='Model dir.')\nparser.add_argument('--lr', dest='lr',\n default=0.01, type=float,\n help='Learning rate.')\nparser.add_argument('--epochs', dest='epochs',\n default=10, type=int,\n help='Number of epochs.')\nparser.add_argument('--steps', dest='steps',\n default=200, type=int,\n help='Number of steps per epoch.')\nparser.add_argument('--distribute', dest='distribute', type=str, default='single',\n help='distributed training strategy')\nargs = parser.parse_args()\n\nprint('Python Version = {}'.format(sys.version))\nprint('TensorFlow Version = {}'.format(tf.__version__))\nprint('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))\nprint('DEVICES', device_lib.list_local_devices())\n\n# Single Machine, single compute device\nif args.distribute == 'single':\n if tf.test.is_gpu_available():\n strategy = tf.distribute.OneDeviceStrategy(device=\"/gpu:0\")\n else:\n strategy = tf.distribute.OneDeviceStrategy(device=\"/cpu:0\")\n# Single Machine, multiple compute device\nelif args.distribute == 'mirror':\n strategy = tf.distribute.MirroredStrategy()\n# Multiple Machine, multiple compute device\nelif args.distribute == 'multi':\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n# Multi-worker configuration\nprint('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))\n\n# Preparing dataset\nBUFFER_SIZE = 10000\nBATCH_SIZE = 64\n\n\ndef make_datasets_unbatched():\n\n # Scaling CIFAR10 data from (0, 255] to (0., 1.]\n def scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255.0\n return image, label\n\n\n datasets, info = tfds.load(name='cifar10',\n with_info=True,\n as_supervised=True)\n return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()\n\n\n# Build the Keras model\ndef build_and_compile_cnn_model():\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Conv2D(32, 3, activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n model.compile(\n loss=tf.keras.losses.sparse_categorical_crossentropy,\n optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),\n metrics=['accuracy'])\n return model\n\n\n# Train the model\nNUM_WORKERS = strategy.num_replicas_in_sync\n# Here the batch size scales up by number of workers since\n# `tf.data.Dataset.batch` expects the global batch size.\nGLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS\ntrain_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)\n\nwith strategy.scope():\n # Creation of dataset, and model building/compiling need to be within\n # `strategy.scope()`.\n model = build_and_compile_cnn_model()\n\nmodel.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)\nmodel.save(args.model_dir)",
"_____no_output_____"
]
],
[
[
"#### Write the Docker file contents\n\nYour first step in containerizing your code is to create a Docker file. In your Docker you’ll include all the commands needed to run your container image. It’ll install all the libraries you’re using and set up the entry point for your training code.\n\n1. Install a pre-defined container image from TensorFlow repository for deep learning images.\n2. Copies in the Python training code, to be shown subsequently.\n3. Sets the entry into the Python training script as `trainer/task.py`. Note, the `.py` is dropped in the ENTRYPOINT command, as it is implied.",
"_____no_output_____"
]
],
[
[
"%%writefile custom/Dockerfile\n\nFROM gcr.io/deeplearning-platform-release/tf2-cpu.2-3\nWORKDIR /root\n\nWORKDIR /\n\n# Copies the trainer code to the docker image.\nCOPY trainer /trainer\n\n# Sets up the entry point to invoke the trainer.\nENTRYPOINT [\"python\", \"-m\", \"trainer.task\"]",
"_____no_output_____"
]
],
[
[
"#### Build the container locally\n\nNext, you will provide a name for your customer container that you will use when you submit it to the Google Container Registry.",
"_____no_output_____"
]
],
[
[
"TRAIN_IMAGE = \"gcr.io/\" + PROJECT_ID + \"/cifar10:v1\"",
"_____no_output_____"
]
],
[
[
"Next, build the container.",
"_____no_output_____"
]
],
[
[
"! docker build custom -t $TRAIN_IMAGE",
"_____no_output_____"
]
],
[
[
"#### Test the container locally\n\nRun the container within your notebook instance to ensure it’s working correctly. You will run it for 5 epochs.",
"_____no_output_____"
]
],
[
[
"! docker run $TRAIN_IMAGE --epochs=5",
"_____no_output_____"
]
],
[
[
"#### Register the custom container\n\nWhen you’ve finished running the container locally, push it to Google Container Registry.",
"_____no_output_____"
]
],
[
[
"! docker push $TRAIN_IMAGE",
"_____no_output_____"
]
],
[
[
"#### Store training script on your Cloud Storage bucket\n\nNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! rm -f custom.tar custom.tar.gz\n! tar cvf custom.tar custom\n! gzip custom.tar\n! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz",
"_____no_output_____"
]
],
[
[
"## Train a model",
"_____no_output_____"
],
[
"### [training.containers-overview](https://cloud.google.com/vertex-ai/docs/training/containers-overview)",
"_____no_output_____"
],
[
"### Create and run custom training job\n\n\nTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.\n\n#### Create custom training job\n\nA custom training job is created with the `CustomTrainingJob` class, with the following parameters:\n\n- `display_name`: The human readable name for the custom training job.\n- `container_uri`: The training container image.",
"_____no_output_____"
]
],
[
[
"job = aip.CustomContainerTrainingJob(\n display_name=\"cifar10_\" + TIMESTAMP, container_uri=TRAIN_IMAGE\n)\n\nprint(job)",
"_____no_output_____"
]
],
[
[
"*Example output:*\n\n <google.cloud.aiplatform.training_jobs.CustomContainerTrainingJob object at 0x7feab1346710>",
"_____no_output_____"
],
[
"#### Run the custom training job\n\nNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:\n\n- `args`: The command-line arguments to pass to the training script.\n- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).\n- `machine_type`: The machine type for the compute instances.\n- `accelerator_type`: The hardware accelerator type.\n- `accelerator_count`: The number of accelerators to attach to a worker replica.\n- `base_output_dir`: The Cloud Storage location to write the model artifacts to.\n- `sync`: Whether to block until completion of the job.",
"_____no_output_____"
]
],
[
[
"MODEL_DIR = \"{}/{}\".format(BUCKET_NAME, TIMESTAMP)\n\nEPOCHS = 20\nSTEPS = 100\n\nDIRECT = True\nif DIRECT:\n CMDARGS = [\n \"--model-dir=\" + MODEL_DIR,\n \"--epochs=\" + str(EPOCHS),\n \"--steps=\" + str(STEPS),\n ]\nelse:\n CMDARGS = [\n \"--epochs=\" + str(EPOCHS),\n \"--steps=\" + str(STEPS),\n ]\n\nif TRAIN_GPU:\n job.run(\n args=CMDARGS,\n replica_count=1,\n machine_type=TRAIN_COMPUTE,\n accelerator_type=TRAIN_GPU.name,\n accelerator_count=TRAIN_NGPU,\n base_output_dir=MODEL_DIR,\n sync=True,\n )\nelse:\n job.run(\n args=CMDARGS,\n replica_count=1,\n machine_type=TRAIN_COMPUTE,\n base_output_dir=MODEL_DIR,\n sync=True,\n )\n\nmodel_path_to_deploy = MODEL_DIR",
"_____no_output_____"
]
],
[
[
"### Wait for completion of custom training job\n\nNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` methid to block until the custom training job is completed.",
"_____no_output_____"
],
[
"## Evaluate the model",
"_____no_output_____"
],
[
"## Load the saved model\n\nYour model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.\n\nTo load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\nlocal_model = tf.keras.models.load_model(MODEL_DIR)",
"_____no_output_____"
]
],
[
[
"## Evaluate the model\n\nNow find out how good the model is.\n\n### Load evaluation data\n\nYou will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.\n\nYou don't need the training data, and hence why we loaded it as `(_, _)`.\n\nBefore you can run the data through evaluation, you need to preprocess it:\n\n`x_test`:\n1. Normalize (rescale) the pixel data by dividing each pixel by 255. This replaces each single byte integer pixel with a 32-bit floating point number between 0 and 1.\n\n`y_test`:<br/>\n2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom tensorflow.keras.datasets import cifar10\n\n(_, _), (x_test, y_test) = cifar10.load_data()\nx_test = (x_test / 255.0).astype(np.float32)\n\nprint(x_test.shape, y_test.shape)",
"_____no_output_____"
]
],
[
[
"### Perform the model evaluation\n\nNow evaluate how well the model in the custom job did.",
"_____no_output_____"
]
],
[
[
"local_model.evaluate(x_test, y_test)",
"_____no_output_____"
]
],
[
[
"### [general.import-model](https://cloud.google.com/vertex-ai/docs/general/import-model)",
"_____no_output_____"
],
[
"### Serving function for image data\n\nTo pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.\n\nTo resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).\n\nWhen you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:\n- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).\n- `image.convert_image_dtype` - Changes integer pixel values to float 32.\n- `image.resize` - Resizes the image to match the input shape for the model.\n- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.\n\nAt this point, the data can be passed to the model (`m_call`).",
"_____no_output_____"
]
],
[
[
"CONCRETE_INPUT = \"numpy_inputs\"\n\n\ndef _preprocess(bytes_input):\n decoded = tf.io.decode_jpeg(bytes_input, channels=3)\n decoded = tf.image.convert_image_dtype(decoded, tf.float32)\n resized = tf.image.resize(decoded, size=(32, 32))\n rescale = tf.cast(resized / 255.0, tf.float32)\n return rescale\n\n\[email protected](input_signature=[tf.TensorSpec([None], tf.string)])\ndef preprocess_fn(bytes_inputs):\n decoded_images = tf.map_fn(\n _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False\n )\n return {\n CONCRETE_INPUT: decoded_images\n } # User needs to make sure the key matches model's input\n\n\[email protected](input_signature=[tf.TensorSpec([None], tf.string)])\ndef serving_fn(bytes_inputs):\n images = preprocess_fn(bytes_inputs)\n prob = m_call(**images)\n return prob\n\n\nm_call = tf.function(local_model.call).get_concrete_function(\n [tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]\n)\n\ntf.saved_model.save(\n local_model, model_path_to_deploy, signatures={\"serving_default\": serving_fn}\n)",
"_____no_output_____"
]
],
[
[
"## Get the serving function signature\n\nYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.\n\nFor your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array.\n\nWhen making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.",
"_____no_output_____"
]
],
[
[
"loaded = tf.saved_model.load(model_path_to_deploy)\n\nserving_input = list(\n loaded.signatures[\"serving_default\"].structured_input_signature[1].keys()\n)[0]\nprint(\"Serving function input:\", serving_input)",
"_____no_output_____"
]
],
[
[
"## Upload the model\n\nNext, upload your model to a `Model` resource using `Model.upload()` method, with the following parameters:\n\n- `display_name`: The human readable name for the `Model` resource.\n- `artifact`: The Cloud Storage location of the trained model artifacts.\n- `serving_container_image_uri`: The serving container image.\n- `sync`: Whether to execute the upload asynchronously or synchronously.\n\nIf the `upload()` method is run asynchronously, you can subsequently block until completion with the `wait()` method.",
"_____no_output_____"
]
],
[
[
"model = aip.Model.upload(\n display_name=\"cifar10_\" + TIMESTAMP,\n artifact_uri=MODEL_DIR,\n serving_container_image_uri=DEPLOY_IMAGE,\n sync=False,\n)\n\nmodel.wait()",
"_____no_output_____"
]
],
[
[
"*Example output:*\n\n INFO:google.cloud.aiplatform.models:Creating Model\n INFO:google.cloud.aiplatform.models:Create Model backing LRO: projects/759209241365/locations/us-central1/models/925164267982815232/operations/3458372263047331840\n INFO:google.cloud.aiplatform.models:Model created. Resource name: projects/759209241365/locations/us-central1/models/925164267982815232\n INFO:google.cloud.aiplatform.models:To use this Model in another session:\n INFO:google.cloud.aiplatform.models:model = aiplatform.Model('projects/759209241365/locations/us-central1/models/925164267982815232')",
"_____no_output_____"
],
[
"## Make batch predictions",
"_____no_output_____"
],
[
"### [predictions.batch-prediction](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions)",
"_____no_output_____"
],
[
"### Get test items\n\nYou will use examples out of the test (holdout) portion of the dataset as a test items.",
"_____no_output_____"
]
],
[
[
"test_image_1 = x_test[0]\ntest_label_1 = y_test[0]\ntest_image_2 = x_test[1]\ntest_label_2 = y_test[1]\nprint(test_image_1.shape)",
"_____no_output_____"
]
],
[
[
"### Prepare the request content\nYou are going to send the CIFAR10 images as compressed JPG image, instead of the raw uncompressed bytes:\n\n- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image.\n - Denormalize the image data from \\[0,1) range back to [0,255).\n - Convert the 32-bit floating point values to 8-bit unsigned integers.",
"_____no_output_____"
]
],
[
[
"import cv2\n\ncv2.imwrite(\"tmp1.jpg\", (test_image_1 * 255).astype(np.uint8))\ncv2.imwrite(\"tmp2.jpg\", (test_image_2 * 255).astype(np.uint8))",
"_____no_output_____"
]
],
[
[
"### Copy test item(s)\n\nFor the batch prediction, copy the test items over to your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil cp tmp1.jpg $BUCKET_NAME/tmp1.jpg\n! gsutil cp tmp2.jpg $BUCKET_NAME/tmp2.jpg\n\ntest_item_1 = BUCKET_NAME + \"/tmp1.jpg\"\ntest_item_2 = BUCKET_NAME + \"/tmp2.jpg\"",
"_____no_output_____"
]
],
[
[
"### Make the batch input file\n\nNow make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:\n\n- `input_name`: the name of the input layer of the underlying model.\n- `'b64'`: A key that indicates the content is base64 encoded.\n- `content`: The compressed JPG image bytes as a base64 encoded string.\n\nEach instance in the prediction request is a dictionary entry of the form:\n\n {serving_input: {'b64': content}}\n\nTo pass the image data to the prediction service you encode the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network.\n\n- `tf.io.read_file`: Read the compressed JPG images into memory as raw bytes.\n- `base64.b64encode`: Encode the raw bytes into a base64 encoded string.",
"_____no_output_____"
]
],
[
[
"import base64\nimport json\n\ngcs_input_uri = BUCKET_NAME + \"/\" + \"test.jsonl\"\nwith tf.io.gfile.GFile(gcs_input_uri, \"w\") as f:\n bytes = tf.io.read_file(test_item_1)\n b64str = base64.b64encode(bytes.numpy()).decode(\"utf-8\")\n data = {serving_input: {\"b64\": b64str}}\n f.write(json.dumps(data) + \"\\n\")\n bytes = tf.io.read_file(test_item_2)\n b64str = base64.b64encode(bytes.numpy()).decode(\"utf-8\")\n data = {serving_input: {\"b64\": b64str}}\n f.write(json.dumps(data) + \"\\n\")",
"_____no_output_____"
]
],
[
[
"### Make the batch prediction request\n\nNow that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:\n\n- `job_display_name`: The human readable name for the batch prediction job.\n- `gcs_source`: A list of one or more batch request input files.\n- `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls.\n- `instances_format`: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'.\n- `predictions_format`: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'.\n- `machine_type`: The type of machine to use for training.\n- `accelerator_type`: The hardware accelerator type.\n- `accelerator_count`: The number of accelerators to attach to a worker replica.\n- `sync`: If set to True, the call will block while waiting for the asynchronous batch job to complete.",
"_____no_output_____"
]
],
[
[
"MIN_NODES = 1\nMAX_NODES = 1\n\nbatch_predict_job = model.batch_predict(\n job_display_name=\"cifar10_\" + TIMESTAMP,\n gcs_source=gcs_input_uri,\n gcs_destination_prefix=BUCKET_NAME,\n instances_format=\"jsonl\",\n predictions_format=\"jsonl\",\n model_parameters=None,\n machine_type=DEPLOY_COMPUTE,\n accelerator_type=DEPLOY_GPU,\n accelerator_count=DEPLOY_NGPU,\n starting_replica_count=MIN_NODES,\n max_replica_count=MAX_NODES,\n sync=False,\n)\n\nprint(batch_predict_job)",
"_____no_output_____"
]
],
[
[
"*Example output:*\n\n INFO:google.cloud.aiplatform.jobs:Creating BatchPredictionJob\n <google.cloud.aiplatform.jobs.BatchPredictionJob object at 0x7f806a6112d0> is waiting for upstream dependencies to complete.\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob created. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296\n INFO:google.cloud.aiplatform.jobs:To use this BatchPredictionJob in another session:\n INFO:google.cloud.aiplatform.jobs:bpj = aiplatform.BatchPredictionJob('projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296')\n INFO:google.cloud.aiplatform.jobs:View Batch Prediction Job:\n https://console.cloud.google.com/ai/platform/locations/us-central1/batch-predictions/5110965452507447296?project=759209241365\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/5110965452507447296 current state:\n JobState.JOB_STATE_RUNNING",
"_____no_output_____"
],
[
"### Wait for completion of batch prediction job\n\nNext, wait for the batch job to complete. Alternatively, one can set the parameter `sync` to `True` in the `batch_predict()` method to block until the batch prediction job is completed.",
"_____no_output_____"
]
],
[
[
"batch_predict_job.wait()",
"_____no_output_____"
]
],
[
[
"*Example Output:*\n\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob created. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328\n INFO:google.cloud.aiplatform.jobs:To use this BatchPredictionJob in another session:\n INFO:google.cloud.aiplatform.jobs:bpj = aiplatform.BatchPredictionJob('projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328')\n INFO:google.cloud.aiplatform.jobs:View Batch Prediction Job:\n https://console.cloud.google.com/ai/platform/locations/us-central1/batch-predictions/181835033978339328?project=759209241365\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_RUNNING\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328 current state:\n JobState.JOB_STATE_SUCCEEDED\n INFO:google.cloud.aiplatform.jobs:BatchPredictionJob run completed. Resource name: projects/759209241365/locations/us-central1/batchPredictionJobs/181835033978339328",
"_____no_output_____"
],
[
"### Get the predictions\n\nNext, get the results from the completed batch prediction job.\n\nThe results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:\n\n- `instance`: The prediction request.\n- `prediction`: The prediction response.",
"_____no_output_____"
]
],
[
[
"import json\n\nbp_iter_outputs = batch_predict_job.iter_outputs()\n\nprediction_results = list()\nfor blob in bp_iter_outputs:\n if blob.name.split(\"/\")[-1].startswith(\"prediction\"):\n prediction_results.append(blob.name)\n\ntags = list()\nfor prediction_result in prediction_results:\n gfile_name = f\"gs://{bp_iter_outputs.bucket.name}/{prediction_result}\"\n with tf.io.gfile.GFile(name=gfile_name, mode=\"r\") as gfile:\n for line in gfile.readlines():\n line = json.loads(line)\n print(line)\n break",
"_____no_output_____"
]
],
[
[
"*Example Output:*\n\n {'instance': {'bytes_inputs': {'b64': '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCAAgACADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD570PxBpmp6nfaEl48lzpUqpewPCU8lpEDqMsOeD26Z55Fa+s3HhnR/Aj6xZjV7rWrW4ke/wBMtLRGRLTaux1cuPnLlhtIAAUEE5490/ao8E6F4b8P3NxZeGksNW1z4h62Iby2t1/eC3ZoozJxwSiKQOhEZJ5JrqZtI8MftFfs56j8YI/hvo/gq1u9C0ywlbTbFoLa+1SOFWlgPGRmNiQzNkiPOflyf1WHFdark0K8UlUbkvJWel15ppn5MuD6MM6qUJzbppRdrO8lJa2a7NNHyJoGheKvHngfUfGjXSaHHZX/ANmW2kQTsHIBXzDxgt1GMAcDPU1xI1xdS16/8FaxNA2o2kPmGS2OI51zyV65Izz0z1xg1718Ivhd4b8IfBX4qeItWuxql+2tW+n6dHPOEijt1s9xYgnaR50hw2dvygDrXz/4v+HWo6ha6X8R/C7iwv7CTy7YiRSLslGG3AzlGAGQenPTFfL4XiDMvr0ZVZuSk/ej66adj6bGcPZX/Z8oUoKHKtJemurP1H+OekS/tAeAvDmpfDjw/wDbL3W/FOlalpkNgqyhJrtgsqPg4ACyyK4J9c1418XP2X4P2ev2jNQ+C3x6+OnhbRfCtpJHfLp1p4klkD73kldkhRAYTKzoSkmSmxiNysDXK/stftQD9kn9oSx8aa3p0uq+GdN1drq70W3cAJKYmRLmINgbl35xwGAI4ODXiXxK+Mtp8W/G+v8Ajvxl4mn/ALW1TU5bq6u9Q+fzHZixG8dFyQB0wOOnFfjuH40f1GNSnG05P3o9F5r9D9dr8LReNdOs7wS0l19PwKPxZ8TeNNAkvPh/8GruO8BE9v8A8JHbaq8VrPA8h+aSBl5mKKiiYAlQowRnAh+H/gWTwx4MiTV52vdRUlTLPMJNgK/NsJxgEgnpwGxmtnSfDsOl6VH4nuLWG8glbCtHcb1bvjqD+PSu78SSXfwn8F2XjnxHo2n3smpSKdPsJCpW3iB+Z2VRl2VckA4HA6k1xf8AEQs9wOKVWjGN0rK8eZLp1/M2nwLkuOwsqNWUrN3dpWb620P/2Q=='}}, 'prediction': [0.0560616329, 0.122713037, 0.121289924, 0.109751239, 0.121320881, 0.0897410363, 0.145011798, 0.0976110101, 0.0394041203, 0.0970953554]}",
"_____no_output_____"
],
[
"## Make online predictions",
"_____no_output_____"
],
[
"### [predictions.deploy-model-api](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api)",
"_____no_output_____"
],
[
"## Deploy the model\n\nNext, deploy your model for online prediction. To deploy the model, you invoke the `deploy` method, with the following parameters:\n\n- `deployed_model_display_name`: A human readable name for the deployed model.\n- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.\nIf only one model, then specify as { \"0\": 100 }, where \"0\" refers to this model being uploaded and 100 means 100% of the traffic.\nIf there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { \"0\": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100.\n- `machine_type`: The type of machine to use for training.\n- `accelerator_type`: The hardware accelerator type.\n- `accelerator_count`: The number of accelerators to attach to a worker replica.\n- `starting_replica_count`: The number of compute instances to initially provision.\n- `max_replica_count`: The maximum number of compute instances to scale to. In this tutorial, only one instance is provisioned.",
"_____no_output_____"
]
],
[
[
"DEPLOYED_NAME = \"cifar10-\" + TIMESTAMP\n\nTRAFFIC_SPLIT = {\"0\": 100}\n\nMIN_NODES = 1\nMAX_NODES = 1\n\nif DEPLOY_GPU:\n endpoint = model.deploy(\n deployed_model_display_name=DEPLOYED_NAME,\n traffic_split=TRAFFIC_SPLIT,\n machine_type=DEPLOY_COMPUTE,\n accelerator_type=DEPLOY_GPU,\n accelerator_count=DEPLOY_NGPU,\n min_replica_count=MIN_NODES,\n max_replica_count=MAX_NODES,\n )\nelse:\n endpoint = model.deploy(\n deployed_model_display_name=DEPLOYED_NAME,\n traffic_split=TRAFFIC_SPLIT,\n machine_type=DEPLOY_COMPUTE,\n accelerator_type=DEPLOY_GPU,\n accelerator_count=0,\n min_replica_count=MIN_NODES,\n max_replica_count=MAX_NODES,\n )",
"_____no_output_____"
]
],
[
[
"*Example output:*\n\n INFO:google.cloud.aiplatform.models:Creating Endpoint\n INFO:google.cloud.aiplatform.models:Create Endpoint backing LRO: projects/759209241365/locations/us-central1/endpoints/4867177336350441472/operations/4087251132693348352\n INFO:google.cloud.aiplatform.models:Endpoint created. Resource name: projects/759209241365/locations/us-central1/endpoints/4867177336350441472\n INFO:google.cloud.aiplatform.models:To use this Endpoint in another session:\n INFO:google.cloud.aiplatform.models:endpoint = aiplatform.Endpoint('projects/759209241365/locations/us-central1/endpoints/4867177336350441472')\n INFO:google.cloud.aiplatform.models:Deploying model to Endpoint : projects/759209241365/locations/us-central1/endpoints/4867177336350441472\n INFO:google.cloud.aiplatform.models:Deploy Endpoint model backing LRO: projects/759209241365/locations/us-central1/endpoints/4867177336350441472/operations/1691336130932244480\n INFO:google.cloud.aiplatform.models:Endpoint model deployed. Resource name: projects/759209241365/locations/us-central1/endpoints/4867177336350441472",
"_____no_output_____"
],
[
"### [predictions.online-prediction-automl](https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-automl)",
"_____no_output_____"
],
[
"### Get test item\n\nYou will use an example out of the test (holdout) portion of the dataset as a test item.",
"_____no_output_____"
]
],
[
[
"test_image = x_test[0]\ntest_label = y_test[0]\nprint(test_image.shape)",
"_____no_output_____"
]
],
[
[
"### Prepare the request content\nYou are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes:\n\n- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image.\n - Denormalize the image data from \\[0,1) range back to [0,255).\n - Convert the 32-bit floating point values to 8-bit unsigned integers.\n- `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes.\n- `base64.b64encode`: Encode the raw bytes into a base 64 encoded string.",
"_____no_output_____"
]
],
[
[
"import base64\n\nimport cv2\n\ncv2.imwrite(\"tmp.jpg\", (test_image * 255).astype(np.uint8))\n\nbytes = tf.io.read_file(\"tmp.jpg\")\nb64str = base64.b64encode(bytes.numpy()).decode(\"utf-8\")",
"_____no_output_____"
]
],
[
[
"### Make the prediction\n\nNow that your `Model` resource is deployed to an `Endpoint` resource, you can do online predictions by sending prediction requests to the Endpoint resource.\n\n#### Request\n\nSince in this example your test item is in a Cloud Storage bucket, you open and read the contents of the image using `tf.io.gfile.Gfile()`. To pass the test data to the prediction service, you encode the bytes into base64 -- which makes the content safe from modification while transmitting binary data over the network.\n\nThe format of each instance is:\n\n { serving_input: { 'b64': base64_encoded_bytes } }\n\nSince the `predict()` method can take multiple items (instances), send your single test item as a list of one test item.\n\n#### Response\n\nThe response from the `predict()` call is a Python dictionary with the following entries:\n\n- `ids`: The internal assigned unique identifiers for each prediction request.\n- `predictions`: The predicted confidence, between 0 and 1, per class label.\n- `deployed_model_id`: The Vertex AI identifier for the deployed `Model` resource which did the predictions.",
"_____no_output_____"
]
],
[
[
"# The format of each instance should conform to the deployed model's prediction input schema.\ninstances = [{serving_input: {\"b64\": b64str}}]\n\nprediction = endpoint.predict(instances=instances)\n\nprint(prediction)",
"_____no_output_____"
]
],
[
[
"*Example output:*\n\n Prediction(predictions=[[0.0560616292, 0.122713044, 0.121289924, 0.109751239, 0.121320873, 0.0897410288, 0.145011798, 0.0976110175, 0.0394041166, 0.0970953479]], deployed_model_id='4087166195420102656', explanations=None)",
"_____no_output_____"
],
[
"## Undeploy the model\n\nWhen you are done doing predictions, you undeploy the model from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.",
"_____no_output_____"
]
],
[
[
"endpoint.undeploy_all()",
"_____no_output_____"
]
],
[
[
"# Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n\n- Dataset\n- Pipeline\n- Model\n- Endpoint\n- AutoML Training Job\n- Batch Job\n- Custom Job\n- Hyperparameter Tuning Job\n- Cloud Storage Bucket",
"_____no_output_____"
]
],
[
[
"delete_all = True\n\nif delete_all:\n # Delete the dataset using the Vertex dataset object\n try:\n if \"dataset\" in globals():\n dataset.delete()\n except Exception as e:\n print(e)\n\n # Delete the model using the Vertex model object\n try:\n if \"model\" in globals():\n model.delete()\n except Exception as e:\n print(e)\n\n # Delete the endpoint using the Vertex endpoint object\n try:\n if \"endpoint\" in globals():\n endpoint.delete()\n except Exception as e:\n print(e)\n\n # Delete the AutoML or Pipeline trainig job\n try:\n if \"dag\" in globals():\n dag.delete()\n except Exception as e:\n print(e)\n\n # Delete the custom trainig job\n try:\n if \"job\" in globals():\n job.delete()\n except Exception as e:\n print(e)\n\n # Delete the batch prediction job using the Vertex batch prediction object\n try:\n if \"batch_predict_job\" in globals():\n batch_predict_job.delete()\n except Exception as e:\n print(e)\n\n # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object\n try:\n if \"hpt_job\" in globals():\n hpt_job.delete()\n except Exception as e:\n print(e)\n\n if \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac18bfff150d8d023708af21e4a86972b987c3f
| 2,734 |
ipynb
|
Jupyter Notebook
|
11. Linked List-1/9.Insert_Node_Recursively.ipynb
|
Ansh-cell/Data-structure-Algorithms-using-Python
|
2074bd1aece7ea95a8ae12bd3e4de8139711eba1
|
[
"MIT"
] | 2 |
2021-07-06T21:27:33.000Z
|
2021-08-24T14:28:34.000Z
|
11. Linked List-1/9.Insert_Node_Recursively.ipynb
|
Ansh-cell/Data-structure-Algorithms-using-Python
|
2074bd1aece7ea95a8ae12bd3e4de8139711eba1
|
[
"MIT"
] | null | null | null |
11. Linked List-1/9.Insert_Node_Recursively.ipynb
|
Ansh-cell/Data-structure-Algorithms-using-Python
|
2074bd1aece7ea95a8ae12bd3e4de8139711eba1
|
[
"MIT"
] | null | null | null | 23.568966 | 74 | 0.464887 |
[
[
[
"class Node:\n\n def __init__(self, data):\n self.data = data\n self.address_of_next_node = None\n\n\ndef Take_Input():\n\n Input = [int(element) for element in input().split()]\n\n head = None\n tail = None\n\n for current_data in Input:\n\n if current_data == -1:\n return\n new_Node = Node(current_data)\n\n if head is None:\n head = new_Node\n tail = new_Node\n else:\n tail.address_of_next_node = new_Node\n tail = new_Node\n\n return head\n\n\ndef Insert(head,i,data):\n\n if i < 0:\n return head\n\n if i == 0:\n new_Node = Node(data)\n new_Node.address_of_next_node = head\n return new_Node\n if head is None:\n return None\n\n smaller_head = Insert(head.address_of_next_node, i - 1, data)\n head.address_of_next_node = smaller_head\n return head\n\ndef Print_LL(head):\n\n while head is not None:\n print(str(head.data) + \" -> \",end=\"\")\n head = head.address_of_next_node\n print(\"None\")\n return",
"_____no_output_____"
],
[
"first_node_head = Take_Input()\nPrint_LL(first_node_head)\nremove_index = int(input(\"Enter ith node: \"))\ndata_of_ith_node = int(input(\"Enter data: \"))\nnew_LL = Insert(first_node_head, remove_index, data_of_ith_node)\nPrint_LL(new_LL)\n",
"1 -> 2 -> 3 -> None\n1 -> 2 -> 3 -> None\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4ac1a036019104d443253fef5b4f30d935406ff8
| 3,004 |
ipynb
|
Jupyter Notebook
|
Algorithm/62. circular array rotation.ipynb
|
faisalsanto007/Hakcerrank-problem-solving
|
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
|
[
"MIT"
] | null | null | null |
Algorithm/62. circular array rotation.ipynb
|
faisalsanto007/Hakcerrank-problem-solving
|
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
|
[
"MIT"
] | null | null | null |
Algorithm/62. circular array rotation.ipynb
|
faisalsanto007/Hakcerrank-problem-solving
|
eaf6404e8896fe3448df8a3cb4c86585fd7bebcc
|
[
"MIT"
] | null | null | null | 26.121739 | 426 | 0.494008 |
[
[
[
"John Watson knows of an operation called a right circular rotation on an array of integers. One rotation operation moves the last array element to the first position and shifts all remaining elements right one. To test Sherlock's abilities, Watson provides Sherlock with an array of integers. Sherlock is to perform the rotation operation a number of times then determine the value of the element at a given position.\n\nFor each array, perform a number of right circular rotations and return the values of the elements at the given indices.",
"_____no_output_____"
]
],
[
[
"n,k,q = raw_input().strip().split(' ')\nn,k,q = [int(n),int(k),int(q)]\na = map(int,raw_input().strip().split(' '))\n\n#print a \nif k%n!=0:\n k=k%n\n for i in range(k):\n last=a[-1]\n a=a[:-1]\n a.insert(0,last)\n \nfor a0 in xrange(q):\n m = int(raw_input().strip())\n print a[m]",
"_____no_output_____"
],
[
"if __name__ == \"__main__\":\n n, k, q = map(int, input().split())\n a = list(map(int, input().split()))\n\n # make the circular rotation\n # in Python, it's ultra-simple\n k = n - k % n\n a = a[k:] + a[:k]\n\n for _ in range(q):\n i = int(input())\n print(a[i])",
"_____no_output_____"
]
],
[
[
"**Sample Input 0**\n\n 3 2 3\n 1 2 3\n 0\n 1\n 2\n\n**Sample Output 0**\n\n 2\n 3\n 1\n\n**Explanation 0**\n\n After the first rotation, the array is [3,1,2].\n After the second (and final) rotation, the array is [2,3,1].\n\n We will call this final state array b = [2,3,1]. For each query, we just have to get the value of b[queries[i]].\n\n 1. queries[0] = 0, b[0] = 2.\n 2. queries[1] = 1, b[1] = 3.\n 3. queries[2] = 2, b[2] = 1.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4ac1a39749784bf52a054cfc460cccce86d459a3
| 3,257 |
ipynb
|
Jupyter Notebook
|
simulation_class.ipynb
|
HaoLiNick/quantbasic
|
59ff8bef07df2357cc91e7092c8cb660285541b0
|
[
"MIT"
] | null | null | null |
simulation_class.ipynb
|
HaoLiNick/quantbasic
|
59ff8bef07df2357cc91e7092c8cb660285541b0
|
[
"MIT"
] | null | null | null |
simulation_class.ipynb
|
HaoLiNick/quantbasic
|
59ff8bef07df2357cc91e7092c8cb660285541b0
|
[
"MIT"
] | null | null | null | 31.931373 | 87 | 0.538839 |
[
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"class simulation_class():\n \n def __init__(self,name,mar_env,corr):\n self.name=name\n self.pricing_data=mar_env.pricing_data\n self.initial_value=mar_env.get_constant('initial_value')\n self.volatility=mar_env.get_constant('volatility')\n self.final_date=mar_env.get_constant('final_date')\n self.currency=mar_env.get_constant('currency')\n self.frequency=mar_env.get_constant('frequency')\n self.paths=mar_env.get_constant('paths')\n self.discount_curve=mar_env.get_curve('discount_curve')\n try:\n self.time_grid=mar_env.get_list('time_grid')\n except:\n self.time_grid=None\n try:\n self.special_dates=mar_env.get_list('special_dates')\n except:\n self.special_dates=[]\n self.instrument_values=None\n self.correlated=corr\n if corr is True:\n self.cholesky_matrix=mar_env.get_list('cholesky_matrix')\n self.rn_set=mar_env.get_list('rn_set')[self.name]\n self.random_numbers=mar_env.get_list('random_numbers')\n \n def generate_time_grid(self):\n start=self.pricing_data\n end=self.final_date\n time_grid=pd.date_range(start,end,freq=self.frequency).to_pydatetime()\n time_grid=list(time_grid)\n if start not in time_grid:\n time_grid.insert(0,start)\n if end not in time_grid:\n time_grid.append(end)\n if len(self.special_dates)>0:\n time_grid.extend(self.special_dates)\n time_grid=list(set(time_grid))\n time_grid.sort()\n self.time_grid=np.array(time_grid)\n \n def get_instrument_values(self,fixed_seed=True):\n if self.instrument_values is None:\n self.generate_paths(fixed_seed=fixed_seed,day_count=365)\n elif fixed_seed is False:\n self.generate_paths(fixed_seed=fixed_seed,day_count=365)\n return self.instrument_values\n ",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4ac1a46e2c39b1bcffbeab1efed60b42e1fa95cf
| 2,397 |
ipynb
|
Jupyter Notebook
|
DSA/arrays/.ipynb_checkpoints/MaxArea-checkpoint.ipynb
|
lance-lh/Data-Structures-and-Algorithms
|
c432654edaeb752536e826e88bcce3ed2ab000fb
|
[
"MIT"
] | 1 |
2019-03-27T13:00:28.000Z
|
2019-03-27T13:00:28.000Z
|
DSA/arrays/.ipynb_checkpoints/MaxArea-checkpoint.ipynb
|
lance-lh/Data-Structures-and-Algorithms
|
c432654edaeb752536e826e88bcce3ed2ab000fb
|
[
"MIT"
] | null | null | null |
DSA/arrays/.ipynb_checkpoints/MaxArea-checkpoint.ipynb
|
lance-lh/Data-Structures-and-Algorithms
|
c432654edaeb752536e826e88bcce3ed2ab000fb
|
[
"MIT"
] | null | null | null | 23.271845 | 99 | 0.460576 |
[
[
[
"Given n non-negative integers a1, a2, ..., an ,\nwhere each represents a point at coordinate (i, ai).\nn vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0).\nFind two lines, which together with x-axis forms a container,\nsuch that the container contains the most water.\n\nNote: You may not slant the container and n is at least 2.\n\nExample:\n\n Input: [1,8,6,2,5,4,8,3,7]\n Output: 49",
"_____no_output_____"
]
],
[
[
"# brute-force, O(n^2)\n'''\ndef maxArea(height):\n\n Tarea = []\n for j in list(reversed(range(len(height)))):\n for i in list(range(j)):\n area = compute_area(i,j)\n Tarea.append(area)\n return max(Tarea)\n\ndef compute_area(i,j):\n w = j - i\n h = min(height[i], height[j])\n area = w * h\n return area\n'''\n\n\n# two pointers, O(n)\ndef maxArea(height):\n l = 0\n r = len(height) - 1\n lstarea = []\n while l < r:\n area = (r - l) * min(height[r],height[l])\n lstarea.append(area)\n if height[l] < height[r]:\n l = l + 1\n else:\n r = r - 1\n return max(lstarea)\n",
"_____no_output_____"
],
[
"# test\nheight = [1,8,6,2,5,4,8,3,7]\nprint(maxArea(height))",
"49\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4ac1a9e3d243397b655c97604128bc5efcdcb757
| 30,213 |
ipynb
|
Jupyter Notebook
|
docs/notebooks/classifiers.ipynb
|
kwombach/recordlinkage
|
918c80d109a97fd71088ad44b0eb7aa85c02c3f4
|
[
"BSD-3-Clause"
] | 2 |
2019-02-16T09:53:57.000Z
|
2019-02-16T09:54:31.000Z
|
docs/notebooks/classifiers.ipynb
|
kwombach/recordlinkage
|
918c80d109a97fd71088ad44b0eb7aa85c02c3f4
|
[
"BSD-3-Clause"
] | null | null | null |
docs/notebooks/classifiers.ipynb
|
kwombach/recordlinkage
|
918c80d109a97fd71088ad44b0eb7aa85c02c3f4
|
[
"BSD-3-Clause"
] | null | null | null | 30.892638 | 758 | 0.459968 |
[
[
[
"# Classification algorithms\n\nIn the context of record linkage, classification refers to the process of dividing record pairs into matches and non-matches (distinct pairs). There are dozens of classification algorithms for record linkage. Roughly speaking, classification algorithms fall into two groups: \n\n- **supervised learning algorithms** - These algorithms make use of trainings data. If you do have trainings data, then you can use supervised learning algorithms. Most supervised learning algorithms offer good accuracy and reliability. Examples of supervised learning algorithms in the *Python Record Linkage Toolkit* are *Logistic Regression*, *Naive Bayes* and *Support Vector Machines*. \n- **unsupervised learning algorithms** - These algorithms do not need training data. The *Python Record Linkage Toolkit* supports *K-means clustering* and an *Expectation/Conditional Maximisation* classifier. ",
"_____no_output_____"
]
],
[
[
"%precision 5\n\nfrom __future__ import print_function\n\nimport pandas as pd\npd.set_option('precision',5)\npd.options.display.max_rows = 10\n",
"_____no_output_____"
]
],
[
[
"**First things first**\n\nThe examples below make use of the [Krebs register](http://recordlinkage.readthedocs.org/en/latest/reference.html#recordlinkage.datasets.krebsregister_cmp_data) (German for cancer registry) dataset. The Krebs register dataset contains comparison vectors of a large set of record pairs. For each record pair, it is known if the records represent the same person (match) or not (non-match). This was done with a massive clerical review. First, import the recordlinkage module and load the Krebs register data. The dataset contains 5749132 compared record pairs and has the following variables: first name, last name, sex, birthday, birth month, birth year and zip code. The Krebs register contains `len(krebs_true_links) == 20931` matching record pairs. ",
"_____no_output_____"
]
],
[
[
"import recordlinkage as rl\nfrom recordlinkage.datasets import load_krebsregister\n\nkrebs_X, krebs_true_links = load_krebsregister(missing_values=0)\nkrebs_X",
"_____no_output_____"
]
],
[
[
"Most classifiers can not handle comparison vectors with missing values. To prevent issues with the classification algorithms, we convert the missing values into disagreeing comparisons (using argument missing_values=0). This approach for handling missing values is widely used in record linkage applications.",
"_____no_output_____"
]
],
[
[
"krebs_X.describe().T",
"_____no_output_____"
]
],
[
[
"## Supervised learning\nAs described before, supervised learning algorithms do need training data. Training data is data for which the true match status is known for each comparison vector. In the example in this section, we consider that the true match status of the first 5000 record pairs of the Krebs register data is known.",
"_____no_output_____"
]
],
[
[
"golden_pairs = krebs_X[0:5000]\ngolden_matches_index = golden_pairs.index & krebs_true_links # 2093 matching pairs",
"_____no_output_____"
]
],
[
[
"### Logistic regression\nThe ``recordlinkage.LogisticRegressionClassifier`` classifier is an application of the logistic regression model. This supervised learning method is one of the oldest classification algorithms used in record linkage. In situations with enough training data, the algorithm gives relatively good results.",
"_____no_output_____"
]
],
[
[
"# Initialize the classifier\nlogreg = rl.LogisticRegressionClassifier()\n\n# Train the classifier\nlogreg.fit(golden_pairs, golden_matches_index)\nprint (\"Intercept: \", logreg.intercept)\nprint (\"Coefficients: \", logreg.coefficients)",
"Intercept: -6.298043571006437\nCoefficients: [ 4.90452843e-01 1.21640484e-01 2.15040485e+00 -2.84818101e-03\n -1.79712465e+00 9.61085558e-01 6.72610441e-02 1.03408608e+00\n 4.30556110e+00]\n"
],
[
"# Predict the match status for all record pairs\nresult_logreg = logreg.predict(krebs_X)\n\nlen(result_logreg)",
"_____no_output_____"
],
[
"rl.confusion_matrix(krebs_true_links, result_logreg, len(krebs_X))",
"_____no_output_____"
],
[
"# The F-score for this prediction is\nrl.fscore(krebs_true_links, result_logreg)",
"_____no_output_____"
]
],
[
[
"The predicted number of matches is not much more than the 20931 true matches. The result was achieved with a small training dataset of 5000 record pairs.",
"_____no_output_____"
],
[
"In (older) literature, record linkage procedures are often divided in **deterministic record linkage** and **probabilistic record linkage**. The Logistic Regression Classifier belongs to deterministic record linkage methods. Each feature/variable has a certain importance (named weight). The weight is multiplied with the comparison/similarity vector. If the total sum exceeds a certain threshold, it as considered to be a match. ",
"_____no_output_____"
]
],
[
[
"intercept = -9\ncoefficients = [2.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0]\n\nlogreg = rl.LogisticRegressionClassifier(coefficients, intercept)\n\n# predict without calling LogisticRegressionClassifier.fit\nresult_logreg_pretrained = logreg.predict(krebs_X)\nprint (len(result_logreg_pretrained))",
"21303\n"
],
[
"rl.confusion_matrix(krebs_true_links, result_logreg_pretrained, len(krebs_X))",
"_____no_output_____"
],
[
"# The F-score for this classification is\nrl.fscore(krebs_true_links, result_logreg_pretrained)",
"_____no_output_____"
]
],
[
[
"For the given coefficients, the F-score is better than the situation without trainings data. Surprising? No (use more trainings data and the result will improve)",
"_____no_output_____"
],
[
"### Naive Bayes\nIn contrast to the logistic regression classifier, the Naive Bayes classifier is a probabilistic classifier. The probabilistic record linkage framework by Fellegi and Sunter (1969) is the most well-known probabilistic classification method for record linkage. Later, it was proved that the Fellegi and Sunter method is mathematically equivalent to the Naive Bayes method in case of assuming independence between comparison variables.",
"_____no_output_____"
]
],
[
[
"# Train the classifier\nnb = rl.NaiveBayesClassifier(binarize=0.3)\nnb.fit(golden_pairs, golden_matches_index)\n\n# Predict the match status for all record pairs\nresult_nb = nb.predict(krebs_X)\n\nlen(result_nb)",
"_____no_output_____"
],
[
"rl.confusion_matrix(krebs_true_links, result_nb, len(krebs_X))",
"_____no_output_____"
],
[
"# The F-score for this classification is\nrl.fscore(krebs_true_links, result_nb)",
"_____no_output_____"
]
],
[
[
"### Support Vector Machines\n\nSupport Vector Machines (SVM) have become increasingly popular in record linkage. The algorithm performs well there is only a small amount of training data available. The implementation of SVM in the Python Record Linkage Toolkit is a linear SVM algorithm. ",
"_____no_output_____"
]
],
[
[
"# Train the classifier\nsvm = rl.SVMClassifier()\nsvm.fit(golden_pairs, golden_matches_index)\n\n# Predict the match status for all record pairs\nresult_svm = svm.predict(krebs_X)\n\nlen(result_svm)",
"_____no_output_____"
],
[
"rl.confusion_matrix(krebs_true_links, result_svm, len(krebs_X))",
"_____no_output_____"
],
[
"# The F-score for this classification is\nrl.fscore(krebs_true_links, result_svm)",
"_____no_output_____"
]
],
[
[
"## Unsupervised learning\nIn situations without training data, unsupervised learning can be a solution for record linkage problems. In this section, we discuss two unsupervised learning methods. One algorithm is K-means clustering, and the other algorithm is an implementation of the Expectation-Maximisation algorithm. Most of the time, unsupervised learning algorithms take more computational time because of the iterative structure in these algorithms.",
"_____no_output_____"
],
[
"### K-means clustering\n\nThe K-means clustering algorithm is well-known and widely used in big data analysis. The K-means classifier in the Python Record Linkage Toolkit package is configured in such a way that it can be used for linking records. For more info about the K-means clustering see [Wikipedia](https://en.wikipedia.org/wiki/K-means_clustering). ",
"_____no_output_____"
]
],
[
[
"kmeans = rl.KMeansClassifier()\nresult_kmeans = kmeans.fit_predict(krebs_X)\n\n# The predicted number of matches\nlen(result_kmeans)",
"_____no_output_____"
]
],
[
[
"The classifier is now trained and the comparison vectors are classified. ",
"_____no_output_____"
]
],
[
[
"rl.confusion_matrix(krebs_true_links, result_kmeans, len(krebs_X))",
"_____no_output_____"
],
[
"rl.fscore(krebs_true_links, result_kmeans)",
"_____no_output_____"
]
],
[
[
"### Expectation/Conditional Maximization Algorithm\nThe ECM-algorithm is an Expectation-Maximisation algorithm with some additional constraints. This algorithm is closely related to the Naive Bayes algorithm. The ECM algorithm is also closely related to estimating the parameters in the Fellegi and Sunter (1969) framework. The algorithms assume that the attributes are independent of each other. The Naive Bayes algorithm uses the same principles.",
"_____no_output_____"
]
],
[
[
"# Train the classifier\necm = rl.ECMClassifier(binarize=0.8)\nresult_ecm = ecm.fit_predict(krebs_X)\n\nlen(result_ecm)",
"_____no_output_____"
],
[
"rl.confusion_matrix(krebs_true_links, result_ecm, len(krebs_X))",
"_____no_output_____"
],
[
"# The F-score for this classification is\nrl.fscore(krebs_true_links, result_ecm)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4ac1ad40a3791b421e2cdf41bbc8028da7f61b2c
| 506,816 |
ipynb
|
Jupyter Notebook
|
11-Model-Deep-Factorization.ipynb
|
Abhishek-Aditya-bs/Recommender-System-MovieLens
|
5bda21346561c6e5d2fc30855cae2b5dd6f8a4a8
|
[
"MIT"
] | 294 |
2016-11-18T13:22:30.000Z
|
2022-03-30T08:26:32.000Z
|
MovieLens/11-Model-Deep-Factorization.ipynb
|
karndeepsingh/recommendation
|
7f7ecbddd113b07a34d205923697a3da919b102f
|
[
"MIT"
] | 14 |
2020-01-28T23:00:51.000Z
|
2022-02-10T00:22:53.000Z
|
MovieLens/11-Model-Deep-Factorization.ipynb
|
karndeepsingh/recommendation
|
7f7ecbddd113b07a34d205923697a3da919b102f
|
[
"MIT"
] | 117 |
2016-11-18T13:22:31.000Z
|
2022-03-28T05:39:04.000Z
| 506.816 | 256,168 | 0.939962 |
[
[
[
"# Deep Matrix Factorisation \n\nMatrix factorization with deep layers",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append(\"../\")\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nfrom IPython.display import SVG, display\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom reco.preprocess import encode_user_item, random_split, user_split",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Prepare the data",
"_____no_output_____"
]
],
[
[
"df_ratings = pd.read_csv(\"data/ratings.csv\")\ndf_items = pd.read_csv(\"data/items.csv\")",
"_____no_output_____"
],
[
"# Data Encoding\nDATA, user_encoder, item_encoder = encode_user_item(df_ratings, \"user_id\", \"movie_id\", \"rating\", \"unix_timestamp\")",
"Number of users: 943\nNumber of items: 1682\n"
],
[
"DATA.head()",
"_____no_output_____"
],
[
"n_users = DATA.USER.nunique()\nn_items = DATA.ITEM.nunique()\nn_users, n_items",
"_____no_output_____"
],
[
"max_rating = DATA.RATING.max()\nmin_rating = DATA.RATING.min()\nmin_rating, max_rating",
"_____no_output_____"
],
[
"# Data Splitting\n#train, val, test = user_split(DATA, [0.6, 0.2, 0.2])\ntrain, test = user_split(DATA, [0.9, 0.1])",
"_____no_output_____"
],
[
"train.shape, test.shape",
"_____no_output_____"
]
],
[
[
"## Deep Matrix Factorization\n\nThis is a model with User and Item Embedding Dot Product",
"_____no_output_____"
]
],
[
[
"from keras.models import Model\nfrom keras.layers import Input, Embedding, Flatten, Dot, Add, Lambda, Activation, Reshape, Concatenate, Dense, Dropout\nfrom keras.regularizers import l2\nfrom keras.constraints import non_neg\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\nfrom keras.utils.vis_utils import model_to_dot\nfrom reco import vis",
"_____no_output_____"
]
],
[
[
"### Build the Model",
"_____no_output_____"
]
],
[
[
"def Deep_MF(n_users, n_items, n_factors):\n \n # Item Layer\n item_input = Input(shape=[1], name='Item')\n item_embedding = Embedding(n_items, n_factors, embeddings_regularizer=l2(1e-6),\n embeddings_initializer='glorot_normal',\n name='ItemEmbedding')(item_input)\n item_vec = Flatten(name='FlattenItemE')(item_embedding)\n \n # Item Bias\n item_bias = Embedding(n_items, 1, embeddings_regularizer=l2(1e-6), \n embeddings_initializer='glorot_normal',\n name='ItemBias')(item_input)\n item_bias_vec = Flatten(name='FlattenItemBiasE')(item_bias)\n\n # User Layer\n user_input = Input(shape=[1], name='User')\n user_embedding = Embedding(n_users, n_factors, embeddings_regularizer=l2(1e-6),\n embeddings_initializer='glorot_normal',\n name='UserEmbedding')(user_input)\n user_vec = Flatten(name='FlattenUserE')(user_embedding)\n \n # User Bias\n user_bias = Embedding(n_users, 1, embeddings_regularizer=l2(1e-6), \n embeddings_initializer='glorot_normal',\n name='UserBias')(user_input)\n user_bias_vec = Flatten(name='FlattenUserBiasE')(user_bias)\n\n # Dot Product of Item and User & then Add Bias\n Concat = Concatenate(name='Concat')([item_vec, user_vec])\n ConcatDrop = Dropout(0.5)(Concat)\n\n kernel_initializer='he_normal'\n \n # Use Dense to learn non-linear dense representation\n Dense_1 = Dense(10, kernel_initializer='glorot_normal', name=\"Dense1\")(ConcatDrop)\n Dense_1_Drop = Dropout(0.5)(Dense_1)\n Dense_2 = Dense(1, kernel_initializer='glorot_normal', name=\"Dense2\")(Dense_1_Drop)\n\n \n AddBias = Add(name=\"AddBias\")([Dense_2, item_bias_vec, user_bias_vec])\n \n \n \n # Scaling for each user\n y = Activation('sigmoid')(AddBias)\n rating_output = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(y)\n \n # Model Creation\n model = Model([user_input, item_input], rating_output)\n \n # Compile Model\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.001))\n \n return model",
"_____no_output_____"
],
[
"n_factors = 50\nmodel = Deep_MF(n_users, n_items, n_factors)",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"model_7\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nItem (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nUser (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nItemEmbedding (Embedding) (None, 1, 50) 84100 Item[0][0] \n__________________________________________________________________________________________________\nUserEmbedding (Embedding) (None, 1, 50) 47150 User[0][0] \n__________________________________________________________________________________________________\nFlattenItemE (Flatten) (None, 50) 0 ItemEmbedding[0][0] \n__________________________________________________________________________________________________\nFlattenUserE (Flatten) (None, 50) 0 UserEmbedding[0][0] \n__________________________________________________________________________________________________\nConcat (Concatenate) (None, 100) 0 FlattenItemE[0][0] \n FlattenUserE[0][0] \n__________________________________________________________________________________________________\ndropout_7 (Dropout) (None, 100) 0 Concat[0][0] \n__________________________________________________________________________________________________\nDense1 (Dense) (None, 10) 1010 dropout_7[0][0] \n__________________________________________________________________________________________________\ndropout_8 (Dropout) (None, 10) 0 Dense1[0][0] \n__________________________________________________________________________________________________\nItemBias (Embedding) (None, 1, 1) 1682 Item[0][0] \n__________________________________________________________________________________________________\nUserBias (Embedding) (None, 1, 1) 943 User[0][0] \n__________________________________________________________________________________________________\nDense2 (Dense) (None, 1) 11 dropout_8[0][0] \n__________________________________________________________________________________________________\nFlattenItemBiasE (Flatten) (None, 1) 0 ItemBias[0][0] \n__________________________________________________________________________________________________\nFlattenUserBiasE (Flatten) (None, 1) 0 UserBias[0][0] \n__________________________________________________________________________________________________\nAddBias (Add) (None, 1) 0 Dense2[0][0] \n FlattenItemBiasE[0][0] \n FlattenUserBiasE[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 1) 0 AddBias[0][0] \n__________________________________________________________________________________________________\nlambda_7 (Lambda) (None, 1) 0 activation_7[0][0] \n==================================================================================================\nTotal params: 134,896\nTrainable params: 134,896\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"
],
[
"from reco.utils import create_directory\ncreate_directory(\"/model-img\")",
"Directory already exists /Users/amitkaps/Documents/github/recommendation/MovieLens/model-img\n"
],
[
"plot_model(model, show_layer_names=True, show_shapes=True, to_file=\"model-img/Deep-CF.png\" )",
"_____no_output_____"
]
],
[
[
"### Train the Model",
"_____no_output_____"
]
],
[
[
"%%time\noutput = model.fit([train.USER, train.ITEM], train.RATING, \n batch_size=128, epochs=5, verbose=1, \n validation_data= ([test.USER, test.ITEM], test.RATING))",
"Train on 90009 samples, validate on 9991 samples\nEpoch 1/5\n90009/90009 [==============================] - 5s 55us/step - loss: 1.0582 - val_loss: 0.9087\nEpoch 2/5\n90009/90009 [==============================] - 4s 42us/step - loss: 0.9246 - val_loss: 0.8940\nEpoch 3/5\n90009/90009 [==============================] - 3s 35us/step - loss: 0.9042 - val_loss: 0.8906\nEpoch 4/5\n90009/90009 [==============================] - 4s 42us/step - loss: 0.8911 - val_loss: 0.8911\nEpoch 5/5\n90009/90009 [==============================] - 3s 36us/step - loss: 0.8862 - val_loss: 0.8903\nCPU times: user 29 s, sys: 2.74 s, total: 31.7 s\nWall time: 19.5 s\n"
],
[
"vis.metrics(output.history)",
"_____no_output_____"
]
],
[
[
"### Score the Model",
"_____no_output_____"
]
],
[
[
"score = model.evaluate([test.USER, test.ITEM], test.RATING, verbose=1)\nscore",
"20000/20000 [==============================] - 0s 14us/step\n"
]
],
[
[
"### Evaluate the Model",
"_____no_output_____"
]
],
[
[
"from reco.evaluate import get_embedding, get_predictions, recommend_topk\nfrom reco.evaluate import precision_at_k, recall_at_k, ndcg_at_k",
"_____no_output_____"
],
[
"item_embedding = get_embedding(model, \"ItemEmbedding\")\nuser_embedding = get_embedding(model, \"UserEmbedding\")",
"_____no_output_____"
],
[
"%%time\npredictions = get_predictions(model, DATA)",
"CPU times: user 21.5 s, sys: 1.89 s, total: 23.4 s\nWall time: 16.5 s\n"
],
[
"predictions.head()",
"_____no_output_____"
],
[
"%%time\n# Recommendation for Top10K\nranking_topk = recommend_topk(model, DATA, train, k=5)",
"CPU times: user 24.9 s, sys: 1.72 s, total: 26.6 s\nWall time: 19.7 s\n"
],
[
"eval_precision = precision_at_k(test, ranking_topk, k=10)\neval_recall = recall_at_k(test, ranking_topk, k=10)\neval_ndcg = ndcg_at_k(test, ranking_topk, k=10)\n\nprint(\"NDCG@K:\\t%f\" % eval_ndcg,\n \"Precision@K:\\t%f\" % eval_precision,\n \"Recall@K:\\t%f\" % eval_recall, sep='\\n')",
"NDCG@K:\t0.037769\nPrecision@K:\t0.026321\nRecall@K:\t0.013309\n"
]
],
[
[
"### Get Similar Items",
"_____no_output_____"
]
],
[
[
"from reco.recommend import get_similar, show_similar",
"_____no_output_____"
],
[
"%%time\nitem_distances, item_similar_indices = get_similar(item_embedding, 5)",
"CPU times: user 61 ms, sys: 0 ns, total: 61 ms\nWall time: 60.5 ms\n"
],
[
"item_similar_indices",
"_____no_output_____"
],
[
"show_similar(1, item_similar_indices, item_encoder)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4ac1b6d6d70bbf3c9c4131b133bee5041f05fdd1
| 188,670 |
ipynb
|
Jupyter Notebook
|
closest.ipynb
|
davidjwilson/GD394-Tess
|
eafba78d04196f7d160fe5ce937030c08f8c3a5d
|
[
"MIT"
] | null | null | null |
closest.ipynb
|
davidjwilson/GD394-Tess
|
eafba78d04196f7d160fe5ce937030c08f8c3a5d
|
[
"MIT"
] | null | null | null |
closest.ipynb
|
davidjwilson/GD394-Tess
|
eafba78d04196f7d160fe5ce937030c08f8c3a5d
|
[
"MIT"
] | null | null | null | 155.92562 | 35,468 | 0.821413 |
[
[
[
"Looking at the giant neaby star",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport os\nimport glob\nfrom astropy.table import Table\nfrom astropy.io import ascii\nimport astropy.units as u\nimport astropy.constants as const\nfrom astropy.modeling import models, fitting\nimport lightkurve as lk\nfrom astropy.timeseries import LombScargle\nimport scipy.signal as signal\n\n#matplotlib set up\n%matplotlib inline\nfrom matplotlib import rcParams\nrcParams[\"figure.figsize\"] = (14, 5)\nrcParams[\"font.size\"] = 20",
"_____no_output_____"
],
[
"from lightkurve import search_targetpixelfile\npixelfile = search_targetpixelfile('GD394', sector=15).download(quality_bitmask='hardest')",
"_____no_output_____"
],
[
"pixelfile.plot(frame=1,aperture_mask=pixelfile.pipeline_mask);",
"_____no_output_____"
],
[
"pixelfile.interact()",
"_____no_output_____"
],
[
"lc = lk.lightcurvefile.LightCurveFile(path='tess2019226182529-s0015-0000000259773610-0151-s_tp-custom-lc.fits') \nlc1 = fits.open('tess2019226182529-s0015-0000000259773610-0151-s_tp-custom-lc.fits')\ndata = lc1[1].data\nlc2 = lk.LightCurve(time=data['TIME'], flux= data['FLUX'])\n#lc2.scatter()\n\npg = lc2.to_periodogram(oversample_factor=10,minimum_period=0.8*u.day, maximum_period=1.5*u.day)\n#plt.plot(pg.period, pg.power)\n\npg.plot()\nprint(pg.period_at_max_power)",
"1.0109293599047895 d\n"
],
[
"\n",
"_____no_output_____"
],
[
"pf16 = search_targetpixelfile('GD394', sector=16).download(quality_bitmask='hardest')",
"_____no_output_____"
],
[
"pf16.plot(frame=1,aperture_mask=pixelfile.pipeline_mask);",
"_____no_output_____"
],
[
"pf16.interact()",
"_____no_output_____"
],
[
"lc12 = fits.open('tess2019253231442-s0016-0000000259773610-0152-s_tp-custom-lc.fits')\ndata = lc12[1].data\nlc22 = lk.LightCurve(time=data['TIME'], flux= data['FLUX'])\n#lc2.scatter()\n\npg2 = lc22.to_periodogram(oversample_factor=10,minimum_period=0.8*u.day, maximum_period=1.2*u.day)\nplt.plot(pg2.period, pg2.power)",
"_____no_output_____"
],
[
"pixelfile.interact_sky()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac1d312e5283f81bf1784d7f00d2ec1e885fcb9
| 533,706 |
ipynb
|
Jupyter Notebook
|
convolutional-neural-networks/conv-visualization/conv_visualization.ipynb
|
mohanadhammad/deep-learning-v2-pytorch
|
f17e7f075326320ac031c1f18db751e9d83e7a85
|
[
"MIT"
] | null | null | null |
convolutional-neural-networks/conv-visualization/conv_visualization.ipynb
|
mohanadhammad/deep-learning-v2-pytorch
|
f17e7f075326320ac031c1f18db751e9d83e7a85
|
[
"MIT"
] | null | null | null |
convolutional-neural-networks/conv-visualization/conv_visualization.ipynb
|
mohanadhammad/deep-learning-v2-pytorch
|
f17e7f075326320ac031c1f18db751e9d83e7a85
|
[
"MIT"
] | null | null | null | 1,379.085271 | 163,976 | 0.955299 |
[
[
[
"# Convolutional Layer\n\nIn this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer. \n\nIn this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.\n\n<img src='notebook_ims/conv_layer.gif' height=60% width=60% />",
"_____no_output_____"
],
[
"### Import the image",
"_____no_output_____"
]
],
[
[
"import cv2\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# TODO: Feel free to try out your own images here by changing img_path\n# to a file path to another image on your computer!\nimg_path = 'data/udacity_sdc.png'\n\n# load color image \nbgr_img = cv2.imread(img_path)\n# convert to grayscale\ngray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)\n\n# normalize, rescale entries to lie in [0,1]\ngray_img = gray_img.astype(\"float32\")/255\n\n# plot image\nplt.imshow(gray_img, cmap='gray')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Define and visualize the filters",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n## TODO: Feel free to modify the numbers here, to try out another filter!\nfilter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])\n\nprint('Filter shape: ', filter_vals.shape)\n",
"Filter shape: (4, 4)\n"
],
[
"# Defining four different filters, \n# all of which are linear combinations of the `filter_vals` defined above\n\n# define four filters\nfilter_1 = filter_vals\nfilter_2 = -filter_1\nfilter_3 = filter_1.T\nfilter_4 = -filter_3\nfilters = np.array([filter_1, filter_2, filter_3, filter_4])\n\n# For an example, print out the values of filter 1\nprint('Filter 1: \\n', filter_1)",
"Filter 1: \n [[-1 -1 1 1]\n [-1 -1 1 1]\n [-1 -1 1 1]\n [-1 -1 1 1]]\n"
],
[
"# visualize all four filters\nfig = plt.figure(figsize=(10, 5))\nfor i in range(4):\n ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i], cmap='gray')\n ax.set_title('Filter %s' % str(i+1))\n width, height = filters[i].shape\n for x in range(width):\n for y in range(height):\n ax.annotate(str(filters[i][x][y]), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if filters[i][x][y]<0 else 'black')",
"_____no_output_____"
]
],
[
[
"## Define a convolutional layer \n\nThe various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:\n* Convolutional layer\n\nInitialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!\n\n\n#### `__init__` and `forward`\nTo define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python. \n\nBelow, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n \n# define a neural network with a single convolutional layer with four filters\nclass Net(nn.Module):\n \n def __init__(self, weight):\n super(Net, self).__init__()\n # initializes the weights of the convolutional layer to be the weights of the 4 defined filters\n k_height, k_width = weight.shape[2:]\n # assumes there are 4 grayscale filters\n self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)\n self.conv.weight = torch.nn.Parameter(weight)\n\n def forward(self, x):\n # calculates the output of a convolutional layer\n # pre- and post-activation\n conv_x = self.conv(x)\n activated_x = F.relu(conv_x)\n \n # returns both layers\n return conv_x, activated_x\n \n# instantiate the model and set the weights\nweight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)\nmodel = Net(weight)\n\n# print out the layer in the network\nprint(model)",
"Net(\n (conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False)\n)\n"
]
],
[
[
"### Visualize the output of each filter\n\nFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.",
"_____no_output_____"
]
],
[
[
"# helper function for visualizing the output of a given layer\n# default number of filters is 4\ndef viz_layer(layer, n_filters= 4):\n fig = plt.figure(figsize=(20, 20))\n \n for i in range(n_filters):\n ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])\n # grab layer outputs\n ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')\n ax.set_title('Output %s' % str(i+1))",
"_____no_output_____"
]
],
[
[
"Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.",
"_____no_output_____"
]
],
[
[
"# plot original image\nplt.imshow(gray_img, cmap='gray')\n\n# visualize all filters\nfig = plt.figure(figsize=(12, 6))\nfig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)\nfor i in range(4):\n ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i], cmap='gray')\n ax.set_title('Filter %s' % str(i+1))\n\n \n# convert the image into an input Tensor\ngray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)\n\n# get the convolutional layer (pre and post activation)\nconv_layer, activated_layer = model(gray_img_tensor)\n\n# visualize the output of a conv layer\nviz_layer(conv_layer)",
"_____no_output_____"
]
],
[
[
"#### ReLu activation\n\nIn this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`. \n\n<img src='notebook_ims/relu_ex.png' height=50% width=50% />",
"_____no_output_____"
]
],
[
[
"# after a ReLu is applied\n# visualize the output of an activated conv layer\nviz_layer(activated_layer)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac1d9f6011b55dbc6778c7ec05f32649939fde0
| 62,026 |
ipynb
|
Jupyter Notebook
|
Presentations/2019/11.November/12/AdaDelta_Worksheet.ipynb
|
lmsasu/MLReadingGroup
|
a17d2d62b4c7940b9829fc52eb0167847f9e7491
|
[
"MIT"
] | 12 |
2018-10-31T14:06:55.000Z
|
2021-09-22T14:29:42.000Z
|
Presentations/2019/11.November/12/AdaDelta_Worksheet.ipynb
|
lmsasu/MLReadingGroup
|
a17d2d62b4c7940b9829fc52eb0167847f9e7491
|
[
"MIT"
] | 1 |
2020-12-16T18:18:30.000Z
|
2020-12-16T18:18:30.000Z
|
Presentations/2019/11.November/12/AdaDelta_Worksheet.ipynb
|
lmsasu/MLReadingGroup
|
a17d2d62b4c7940b9829fc52eb0167847f9e7491
|
[
"MIT"
] | 1 |
2020-12-17T10:52:37.000Z
|
2020-12-17T10:52:37.000Z
| 65.42827 | 10,892 | 0.762132 |
[
[
[
"# AdaDelta compared to AdaGrad \nPresented during ML reading group, 2019-11-12.\n\nAuthor: Ivan Bogdan-Daniel, [email protected]",
"_____no_output_____"
]
],
[
[
"#%matplotlib notebook\n%matplotlib inline\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nprint(f'Numpy version: {np.__version__}')",
"Numpy version: 1.16.5\n"
]
],
[
[
"# AdaDelta",
"_____no_output_____"
],
[
"The [AdaDelta paper](https://arxiv.org/pdf/1212.5701.pdf) \n\nThe idea presented in this paper was derived from ADAGRAD in order to improve upon the two main drawbacks of the method:\n\n1) the continual decay of learning rates throughout training\n\n2) the need for a manually selected global learning rate. \n\n\n\nAdaGrad comes with:\n$$w_{t+1}^{(j)} = w_{t}^{(j)} - \\frac{\\eta}{\\sqrt{\\varepsilon + \\sum_{\\tau=1}^{t}{(g_{\\tau}^{(j)}})^2}} \\nabla J_{w}(w_t^{(j)})$$\nwhere $g_{\\tau}$ is the gradient of error function at iteration $\\tau$, $g_{\\tau}^{(j)}$ is the partial derivative of the \nerror function in direction of the $j$ - th feature, at iteration $\\tau$, $m$ - is the number of features, i.e. \n\nThe problem appears in the sum:\n\n$${\\varepsilon + \\sum_{\\tau=1}^{t}{(g_{\\tau}^{(j)}})^2}$$\n\nIt grows into a very large number making the fraction $$\\frac{\\eta}{\\sqrt{\\varepsilon + \\sum_{\\tau=1}^{t}{(g_{\\tau}^{(j)}})^2}}$$ become an insignificant number. The\nlearning rate will continue to decrease throughout training,\neventually decreasing to zero and stopping training completely. \n\n\n\n\n",
"_____no_output_____"
],
[
"# Solution\n\nInstead of accumulating the sum of squared gradients over all\ntime, we restricted the window of past gradients that are accumulated to be some fixed size w.\n\nSince storing w previous squared gradients is inefficient,\nour methods implements this accumulation as an exponentially decaying average of the squared gradients\n\nThis ensures that learning continues\nto make progress even after many iterations of updates have\nbeen done.\n\nAt time t this average is: $$E[g^2]_{t}$$ then we compute:\n\n$$E[g^2]_{t}=\\rho E[g^2]_{t-1}+(1-\\rho)g^2_{t}$$\n\nWhere $\\rho$ is a hyper parameter similar to the one used in momentum, it can take values between 0 and 1, generally 0.95 is recommended.\n\nSince we require the square root of this quantity:\n\n$$RMS[g]_{t} = \\sqrt{E[g^2]_{t}+\\epsilon}$$\n\nThe parameter update becomes:\n\n$$w_{t+1}^{(j)} = w_{t}^{(j)} - \\frac{\\eta}{RMS[g]_{t}} g_{t}$$\n\nAdaDelta rule:\n\n$$w_{t+1}^{(j)} = w_{t}^{(j)} - \\frac{RMS[\\Delta w]_{t-1}}{RMS[g]_{t}} g_{t}$$\n\nWhere $RMS[\\Delta w]_{t-1}$ is computed similar to $RMS[g]_{t}$",
"_____no_output_____"
],
[
"# Algorithm\n\nRequire: Decay rate $\\rho$, Constant $\\epsilon$\n\nRequire: Initial parameter x\n\n<img src=\"./images/adadelta_algorithm.png\" alt=\"drawing\" width=\"600\"/>\n\nSource: [AdaDelta paper](https://arxiv.org/pdf/1212.5701.pdf) ",
"_____no_output_____"
],
[
"## Generate data",
"_____no_output_____"
]
],
[
[
"from scipy.sparse import random #to generate sparse data\n\nnp.random.seed(10) # for reproducibility\nm_data = 100\nn_data = 4 #number of features of the data\n_scales = np.array([1,10, 10,1 ]) # play with these... \n\n_parameters = np.array([3, 0.5, 1, 7])\n\ndef gen_data(m, n, scales, parameters, add_noise=True):\n # Adagrad is designed especially for sparse data.\n # produce: X, a 2d tensor with m lines and n columns\n # and X[:, k] uniformly distributed in [-scale_k, scale_k] with the first and the last column containing sparse data \n #(approx 75% of the elements are 0)\n #\n # To generate a sparse data matrix with m rows and n columns\n # and random values use S = random(m, n, density=0.25).A, where density = density of the data. S will be the \n # resulting matrix \n # more information at https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.random.html\n #\n # To obtain X - generate a random matrix with X[:, k] uniformly distributed in [-scale_k, scale_k]\n # set X[:, 0] and X[:, -1] to 0 and add matrix S with the sparse data.\n #\n # let y be [email protected] + epsilon, with epsilon ~ N(0, 1); y is a vector with m elements\n # parameters - the ideal weights, used to produce output values y\n #\n \n \n return X, y",
"_____no_output_____"
],
[
"X, y = gen_data(m_data, n_data, _scales, _parameters)\nprint(X)\nprint(y)",
"[[ 1.28428476e-01 -9.58496101e+00 3.03724491e+00 5.66402849e-01]\n [ 0.00000000e+00 -5.50406709e+00 -6.03874270e+00 0.00000000e+00]\n [ 0.00000000e+00 -8.23320372e+00 3.70719637e+00 0.00000000e+00]\n [ 0.00000000e+00 2.43845268e-01 6.25241923e+00 0.00000000e+00]\n [ 0.00000000e+00 -4.16247864e+00 8.35548245e+00 0.00000000e+00]\n [ 0.00000000e+00 -7.15659905e+00 -2.41271256e+00 0.00000000e+00]\n [ 0.00000000e+00 -8.76767173e-01 2.35533957e+00 0.00000000e+00]\n [ 0.00000000e+00 2.81450742e+00 6.92397429e+00 0.00000000e+00]\n [ 0.00000000e+00 -3.61527822e+00 -8.19081301e+00 0.00000000e+00]\n [ 0.00000000e+00 6.57362653e+00 -9.06207361e+00 0.00000000e+00]\n [ 0.00000000e+00 6.38573991e+00 -6.02104921e+00 0.00000000e+00]\n [ 8.70617442e-01 5.09295383e+00 -4.08076586e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.69968205e+00 -1.60699768e+00 0.00000000e+00]\n [ 2.25025940e-01 -5.99602037e+00 -2.31771103e+00 0.00000000e+00]\n [ 0.00000000e+00 -8.73909058e-01 6.52245688e+00 0.00000000e+00]\n [ 0.00000000e+00 8.05663521e+00 6.91158976e-01 3.11049320e-01]\n [ 6.99230218e-01 -2.85636483e+00 -8.29397744e+00 1.20066250e-01]\n [ 4.35560520e-01 5.47660592e+00 -9.20081583e+00 0.00000000e+00]\n [ 0.00000000e+00 2.72982286e+00 -3.07305700e+00 0.00000000e+00]\n [ 0.00000000e+00 5.26481174e+00 7.56193285e+00 0.00000000e+00]\n [ 0.00000000e+00 4.93441801e-01 1.95673296e+00 4.72786595e-01]\n [ 0.00000000e+00 -9.49200436e+00 -3.54633767e+00 9.36627065e-01]\n [ 5.66848670e-01 1.31014040e+00 8.75673187e-03 0.00000000e+00]\n [ 0.00000000e+00 9.57638292e+00 -3.20584313e+00 0.00000000e+00]\n [ 0.00000000e+00 -1.18452350e+00 -3.63454389e+00 0.00000000e+00]\n [ 7.66727043e-01 8.03185874e+00 -8.63805453e+00 3.99611848e-01]\n [ 8.33878745e-01 4.37205621e+00 1.72043960e+00 0.00000000e+00]\n [ 2.20553036e-01 1.26381369e+00 -3.93728612e+00 5.17770641e-01]\n [ 6.59370483e-01 -6.81612533e+00 -8.99044660e+00 0.00000000e+00]\n [ 0.00000000e+00 -5.71328276e+00 8.14395285e+00 0.00000000e+00]\n [ 0.00000000e+00 5.04992340e+00 -7.76788679e+00 0.00000000e+00]\n [ 0.00000000e+00 2.71701689e+00 -7.04759615e+00 0.00000000e+00]\n [ 0.00000000e+00 -9.02743987e+00 -4.80746491e+00 0.00000000e+00]\n [ 0.00000000e+00 -1.50338618e+00 8.44465574e+00 0.00000000e+00]\n [ 0.00000000e+00 6.69417700e-01 -9.70279951e+00 0.00000000e+00]\n [ 0.00000000e+00 5.83513993e+00 1.23114721e+00 0.00000000e+00]\n [ 0.00000000e+00 4.17699653e+00 -7.02933097e+00 0.00000000e+00]\n [ 0.00000000e+00 -7.90760511e+00 -1.20789524e+00 0.00000000e+00]\n [ 0.00000000e+00 6.38071728e+00 -8.19786531e+00 4.69963406e-01]\n [ 0.00000000e+00 2.31111174e+00 -6.03798686e+00 1.11351480e-01]\n [ 9.75478840e-01 -9.24884648e+00 -9.23008644e+00 0.00000000e+00]\n [ 0.00000000e+00 1.14590812e+00 -2.29772801e+00 0.00000000e+00]\n [ 0.00000000e+00 2.79623787e+00 5.65429636e+00 0.00000000e+00]\n [ 2.00911953e-01 5.62121235e+00 2.39389944e+00 3.75041373e-01]\n [ 0.00000000e+00 -6.47915733e+00 -8.29715875e-01 6.03607471e-01]\n [ 0.00000000e+00 6.91102632e+00 -6.50372210e+00 0.00000000e+00]\n [ 0.00000000e+00 4.85349155e+00 -8.66049294e-01 7.21664254e-01]\n [ 0.00000000e+00 -3.22641734e+00 -8.10681919e+00 0.00000000e+00]\n [ 0.00000000e+00 -5.88099483e+00 2.45394163e+00 6.81639561e-01]\n [ 0.00000000e+00 6.30635176e+00 -2.97572994e+00 0.00000000e+00]\n [ 0.00000000e+00 5.69733437e+00 -2.13161775e+00 0.00000000e+00]\n [ 0.00000000e+00 -4.54964959e+00 6.58803840e+00 0.00000000e+00]\n [ 0.00000000e+00 2.88653230e+00 -5.73626869e+00 0.00000000e+00]\n [ 0.00000000e+00 -3.26135909e+00 7.69127763e+00 4.74423672e-02]\n [ 0.00000000e+00 -9.01609684e+00 -6.30746324e+00 0.00000000e+00]\n [ 0.00000000e+00 8.38569350e+00 -8.43009761e-01 4.88265796e-01]\n [ 0.00000000e+00 -1.93060319e+00 -9.51134710e+00 0.00000000e+00]\n [ 0.00000000e+00 4.18211928e+00 -2.88455314e+00 2.54448656e-01]\n [ 0.00000000e+00 7.62950343e-01 1.18173033e+00 0.00000000e+00]\n [ 0.00000000e+00 -1.42505342e+00 3.62241299e-01 4.42430961e-01]\n [ 0.00000000e+00 -2.07116242e+00 5.86546466e+00 2.98494435e-01]\n [ 1.61042178e-01 6.80494632e+00 1.38542799e+00 0.00000000e+00]\n [ 0.00000000e+00 -5.42435334e+00 2.29403426e+00 0.00000000e+00]\n [ 0.00000000e+00 2.55123705e+00 6.42432952e+00 0.00000000e+00]\n [ 2.06067511e-01 2.02103782e+00 9.07080981e+00 0.00000000e+00]\n [ 0.00000000e+00 -7.55328158e+00 -8.25515416e-01 0.00000000e+00]\n [ 0.00000000e+00 -6.13492535e+00 -9.06521546e+00 0.00000000e+00]\n [ 0.00000000e+00 6.55806351e+00 1.01772887e+01 0.00000000e+00]\n [ 0.00000000e+00 -7.06462371e+00 -7.72079382e+00 0.00000000e+00]\n [ 0.00000000e+00 -3.57715937e+00 2.76070163e+00 0.00000000e+00]\n [ 6.56868150e-01 -3.24490706e+00 7.96048618e+00 2.13258235e-01]\n [ 0.00000000e+00 -1.18477010e+00 8.73920010e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.87231918e+00 -8.26061182e+00 9.25358895e-01]\n [ 0.00000000e+00 1.68111745e+00 7.56932357e+00 0.00000000e+00]\n [ 9.70500250e-01 2.90455491e+00 -8.86743594e+00 0.00000000e+00]\n [ 8.49714595e-02 -6.21941094e+00 6.76473462e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.44422503e+00 -8.34759417e+00 0.00000000e+00]\n [ 0.00000000e+00 3.92616544e+00 -5.90476772e+00 0.00000000e+00]\n [ 0.00000000e+00 -8.39994438e+00 9.26715011e+00 0.00000000e+00]\n [ 9.35356900e-01 -8.34725974e+00 -3.09526420e+00 0.00000000e+00]\n [ 0.00000000e+00 -5.84145804e+00 -6.39318116e+00 0.00000000e+00]\n [ 0.00000000e+00 -4.71605148e+00 2.06635691e-01 0.00000000e+00]\n [ 0.00000000e+00 -1.00970445e+00 2.02880752e+00 5.91105935e-01]\n [ 0.00000000e+00 4.41599804e+00 1.01612128e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.30856527e+00 2.82673574e+00 3.80136209e-02]\n [ 0.00000000e+00 2.52287338e+00 1.99614283e-01 0.00000000e+00]\n [ 0.00000000e+00 7.09374326e-01 -3.14713749e+00 0.00000000e+00]\n [ 0.00000000e+00 8.91879035e+00 2.89640857e+00 0.00000000e+00]\n [ 7.50011376e-01 8.06343479e+00 -8.93081739e+00 0.00000000e+00]\n [ 6.02331842e-01 8.61280010e+00 8.68143801e+00 4.38024101e-01]\n [ 0.00000000e+00 -7.66241121e+00 6.12763435e+00 0.00000000e+00]\n [ 7.76805449e-01 -2.85073604e+00 6.33893988e+00 7.71342899e-01]\n [ 0.00000000e+00 5.09447132e+00 -4.54947648e+00 0.00000000e+00]\n [ 9.81989522e-01 1.13241385e+00 -1.18525885e+00 0.00000000e+00]\n [ 0.00000000e+00 5.12764716e+00 -9.25355982e+00 0.00000000e+00]\n [ 9.68976460e-01 -7.11350855e+00 2.28392467e+00 0.00000000e+00]\n [ 0.00000000e+00 -3.56868835e+00 -8.64289378e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.78294601e+00 -4.64223629e+00 0.00000000e+00]\n [ 0.00000000e+00 -6.96627151e+00 -9.08545612e+00 0.00000000e+00]\n [ 0.00000000e+00 6.50929057e+00 -1.15378473e+00 0.00000000e+00]]\n[ 2.0024736 -6.6694714 -2.43545494 4.33951713 5.95881325\n -5.68591574 2.0358535 8.26592253 -8.58164616 -4.90478044\n -4.25251071 3.29224634 -2.26898874 -3.58260985 6.55072984\n 7.82513886 -7.24300833 -4.16990542 -1.78616165 7.96925265\n 5.21768494 -0.89471119 2.54687286 0.91195781 -3.89737864\n 0.65691508 6.45566764 1.24756009 -10.41615208 5.14639228\n -4.30241788 -4.13246337 -8.47866984 7.23655925 -7.81483692\n 3.05844197 -4.04251292 -6.19476221 -1.06966314 -5.1019856\n -9.65191646 -2.10656506 7.81272642 9.29811209 0.89853123\n -2.50316116 7.97039694 -10.49050576 4.16254498 0.15999857\n 0.46929628 4.31380324 -5.97022422 6.62998753 -10.87680885\n 8.00641106 -10.53002665 0.4227351 0.9277499 3.46386573\n 7.72178422 5.26929615 -0.7254132 6.56171771 11.07089748\n -3.53630748 -12.19065404 11.47558689 -10.17153586 0.56926885\n 10.69790793 8.80244795 -4.65272321 8.43087792 -3.58940664\n 4.32021797 -12.69034642 -5.64588293 5.16202517 -4.3180381\n -8.19561253 -1.32998579 4.09818321 2.2249444 0.25579068\n 1.32798135 -2.4784993 8.13143279 -3.57118809 17.78136899\n 1.91749245 11.38595428 -2.87278321 1.02600309 -6.958641\n 0.31297435 -11.35389443 -6.89797032 -13.38002989 2.79790438]\n"
]
],
[
[
"## Define error function, gradient, inference",
"_____no_output_____"
]
],
[
[
"def model_estimate(X, w):\n '''Computes the linear regression estimation on the dataset X, using coefficients w\n :param X: 2d tensor with m_data lines and n_data columns\n :param w: a 1d tensor with n_data coefficients (no intercept)\n :return: a 1d tensor with m_data elements y_hat = w @X.T\n '''\n \n return y_hat ",
"_____no_output_____"
],
[
"def J(X, y, w):\n \"\"\"Computes the mean squared error of model. See the picture from last week's sheet.\n :param X: input values, of shape m_data x n_data\n :param y: ground truth, column vector with m_data values\n :param w: column with n_data coefficients for the linear form \n :return: a scalar value >= 0\n :use the same formula as in the exercise from last week\n \"\"\"\n \n return err",
"_____no_output_____"
],
[
"def gradient(X, y, w):\n '''Commputes the gradients to be used for gradient descent. \n :param X: 2d tensor with training data\n :param y: 1d tensor with y.shape[0] == W.shape[0]\n :param w: 1d tensor with current values of the coefficients\n :return: gradients to be used for gradient descent. \n :use the same formula as in the exercise from last week\n '''\n \n return grad## implement",
"_____no_output_____"
]
],
[
[
"## Momentum algorithm",
"_____no_output_____"
]
],
[
[
"#The function from last week for comparison\ndef gd_with_momentum(X, y, w_init, eta=1e-1, gamma = 0.9, thresh = 0.001):\n \"\"\"Applies gradient descent with momentum coefficient\n :params: as in gd_no_momentum\n :param gamma: momentum coefficient\n :param thresh: the threshold for gradient norm (to stop iterations)\n :return: the list of succesive errors and the found w* vector \n \"\"\"\n w = w_init\n w_err=[]\n \n delta = np.zeros_like(w)\n while True:\n grad = gradient(X, y, w)\n err = J(X, y, w)\n w_err.append(err)\n w_nou = w + gamma * delta - eta * grad\n delta = w_nou - w\n w = w_nou\n \n if np.linalg.norm(grad) < thresh :\n break;\n return w_err, w",
"_____no_output_____"
],
[
"w_init = np.array([0, 0, 0, 0])\nerrors_momentum, w_best = gd_with_momentum(X, y, w_init,0.0001, 0.9)",
"_____no_output_____"
],
[
"print(f'How many iterations were made: {len(errors_momentum)}')",
"How many iterations were made: 102146\n"
],
[
"w_best",
"_____no_output_____"
],
[
"fig, axes = plt.subplots()\naxes.plot(list(range(len(errors_momentum))), errors_momentum)\naxes.set_xlabel('Epochs')\naxes.set_ylabel('Error')\naxes.set_title('Optimization with momentum')",
"_____no_output_____"
]
],
[
[
"## Apply AdaGrad and report resulting $\\eta$'s",
"_____no_output_____"
]
],
[
[
"def ada_grad(X, y, w_init, eta_init=1e-1, eps = 0.001, thresh = 0.001):\n '''Iterates with gradient descent. algorithm\n :param X: 2d tensor with data\n :param y: 1d tensor, ground truth \n :param w_init: 1d tensor with the X.shape[1] initial coefficients\n :param eta_init: the initial learning rate hyperparameter\n :param eps: the epsilon value from the AdaGrad formula\n :param thresh: the threshold for gradient norm (to stop iterations)\n :return: the list of succesive errors w_err, the found w - the estimated feature vector \n :and rates the learning rates after the final iteration \n '''\n \n n = X.shape[1]\n w = w_init\n w_err=[]\n \n sum_sq_grad = np.zeros(n)\n rates = np.zeros(n) + eta_init\n \n while True:\n grad = gradient(X, y, w)\n pgrad = grad**2\n err = J(X, y, w)\n w_err.append(err)\n prod = rates*grad\n \n w = w - prod\n sum_sq_grad += pgrad\n rates = eta_init/np.sqrt(eps + sum_sq_grad)\n \n \n if np.linalg.norm(grad) < thresh:\n break;\n return w_err, w, rates",
"_____no_output_____"
],
[
"w_init = np.array([0,0,0,0])\nadaGerr, w_ada_best, rates = ada_grad(X, y, w_init)\nprint(rates)",
"[0.01515786 0.00157473 0.00021369 0.01046079]\n"
],
[
"print(f'How many iterations were made: {len(adaGerr)}')",
"How many iterations were made: 9106\n"
],
[
"w_ada_best",
"_____no_output_____"
],
[
"fig, axes = plt.subplots()\naxes.plot(list(range(len(adaGerr))),adaGerr)\naxes.set_xlabel('Epochs')\naxes.set_ylabel('Error')\naxes.set_title('Optimization with AdaGrad')",
"_____no_output_____"
]
],
[
[
"## Apply AdaDelta and report resulting $\\eta$'s",
"_____no_output_____"
]
],
[
[
"def ada_delta(X, y, w_init, eta_init=1e-1, gamma=0.99, eps = 0.001, thresh = 0.001):\n '''Iterates with gradient descent. algorithm\n :param X: 2d tensor with data\n :param y: 1d tensor, ground truth \n :param w_init: 1d tensor with the X.shape[1] initial coefficients\n :param eta_init: the initial learning rate hyperparameter\n :param gamma: decay constant, similar to momentum\n :param eps: the epsilon value from the AdaGrad formula\n :param thresh: the threshold for gradient norm (to stop iterations)\n :return: the list of succesive errors w_err, the found w - the estimated feature vector \n :and rates the learning rates after the final iteration \n '''\n \n #todo\n #same as adagrad but instead of summing the square of gradients\n #use the adadelta formula for decaying average",
"_____no_output_____"
],
[
"w_init = np.array([0,0,0,0])\nadaDerr, w_adad_best, rates = ada_delta(X, y, w_init)\nprint(rates)",
"[1.27981839 0.03944876 0.01893041 1.14190179]\n"
],
[
"print(f'How many iterations were made: {len(adaDerr)}')",
"How many iterations were made: 109\n"
],
[
"w_adad_best",
"_____no_output_____"
],
[
"fig, axes = plt.subplots()\naxes.plot(list(range(len(adaDerr))),adaDerr)\naxes.set_xlabel('Epochs')\naxes.set_ylabel('Error')\naxes.set_title('Optimization with AdaDelta')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4ac1dabe721aa35494c21ba0c6247e948430e576
| 248,593 |
ipynb
|
Jupyter Notebook
|
Figure_3/Figure_3_A_foldxvsrosettaenergy.ipynb
|
shorthouse-mrc/Fumarate_Hydratase
|
3044b0a1cc6206b614a06ddda3eb70f19f8de444
|
[
"MIT"
] | null | null | null |
Figure_3/Figure_3_A_foldxvsrosettaenergy.ipynb
|
shorthouse-mrc/Fumarate_Hydratase
|
3044b0a1cc6206b614a06ddda3eb70f19f8de444
|
[
"MIT"
] | null | null | null |
Figure_3/Figure_3_A_foldxvsrosettaenergy.ipynb
|
shorthouse-mrc/Fumarate_Hydratase
|
3044b0a1cc6206b614a06ddda3eb70f19f8de444
|
[
"MIT"
] | null | null | null | 1,775.664286 | 245,188 | 0.96267 |
[
[
[
"import matplotlib.pyplot as plt\nimport matplotlib\nimport glob\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport re\nfrom scipy import stats\nimport numpy as np",
"_____no_output_____"
],
[
"matplotlib.rcParams['figure.figsize'] = [14, 8]",
"_____no_output_____"
],
[
"# Load data frame containing all the mutation information\ncomplete_data = pd.read_csv(\"../Data/Structure_data/FH_complete_mutation_summary.csv\")",
"_____no_output_____"
],
[
"f, ax = plt.subplots(figsize=(10, 10))\n# Plot Rosetta vs Foldx\nsnsplot = sns.regplot(complete_data[\"Rosetta_energy\"], complete_data[\"Foldx_energy\"], line_kws={'color':'#FB4459'}, scatter_kws={'lw':'0', 'edgecolors':'white', 'alpha':'0.25', 'color':'#43A9DB'})\nsnsplot.set_xlim(auto=True)\nax.set_xlabel(\"Rosetta Predicted $\\Delta \\Delta$G (Kcal/mol)\", size = 18)\nax.set_ylabel(\"Foldx Predicted $\\Delta \\Delta$G (Kcal/mol)\", size = 18)\n\n#symetric log for easier visualization of high energy\nsnsplot.set(xscale=\"symlog\", yscale=\"symlog\")\nsns.despine()\nplt.savefig(\"Figure_3_A_foldxvsrosettaenergy.png\", dpi = 300)",
"_____no_output_____"
],
[
"## Calculate the spearman rank\nstats.spearmanr(complete_data[\"Rosetta_energy\"], complete_data[\"Foldx_energy\"])",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4ac1dd6480dbafed76cd6e2d48f4acfa90d70edf
| 117,979 |
ipynb
|
Jupyter Notebook
|
example_notebook.ipynb
|
nrudelic/tcr-pmhc
|
ec9f139c7f253cbb5f13ef1405446d131ff71b71
|
[
"MIT"
] | null | null | null |
example_notebook.ipynb
|
nrudelic/tcr-pmhc
|
ec9f139c7f253cbb5f13ef1405446d131ff71b71
|
[
"MIT"
] | null | null | null |
example_notebook.ipynb
|
nrudelic/tcr-pmhc
|
ec9f139c7f253cbb5f13ef1405446d131ff71b71
|
[
"MIT"
] | null | null | null | 190.28871 | 20,495 | 0.78058 |
[
[
[
"#!/usr/bin/env python\n# coding: utf-8\n# Imports\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.metrics import roc_curve, confusion_matrix\nimport torch\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.\nimport torch.nn.functional as F # All functions that don't have any parameters\n\n",
"_____no_output_____"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"###############################\n### Load data ###\n###############################\n\ndata_list = []\ntarget_list = []\n\nimport glob\nfor fp in glob.glob(\"data/train/*input.npz\"):\n data = np.load(fp)[\"arr_0\"]\n targets = np.load(fp.replace(\"input\", \"labels\"))[\"arr_0\"]\n \n data_list.append(data)\n target_list.append(targets)\n#print(data_list)\n\n# Note:\n# Choose your own training and val set based on data_list and target_list\n# Here using the last partition as val set\n\nX_train = np.concatenate(data_list[ :-1])\ny_train = np.concatenate(target_list[:-1])\nnsamples, nx, ny = X_train.shape\nprint(\"Training set shape:\", nsamples,nx,ny)\n\nX_val = np.concatenate(data_list[-1: ])\ny_val = np.concatenate(target_list[-1: ])\nnsamples, nx, ny = X_val.shape\nprint(\"val set shape:\", nsamples,nx,ny)\n\np_neg = len(y_train[y_train == 1])/len(y_train)*100\nprint(\"Percent positive samples in train:\", p_neg)\n\np_pos = len(y_val[y_val == 1])/len(y_val)*100\nprint(\"Percent positive samples in val:\", p_pos)\n\n# make the data set into one dataset that can go into dataloader\ntrain_ds = []\nfor i in range(len(X_train)):\n train_ds.append([np.transpose(X_train[i]), y_train[i]])\n\nval_ds = []\nfor i in range(len(X_val)):\n val_ds.append([np.transpose(X_val[i]), y_val[i]])\n\nbat_size = 64\nprint(\"\\nNOTE:\\nSetting batch-size to\", bat_size)\ntrain_ldr = torch.utils.data.DataLoader(train_ds,batch_size=bat_size, shuffle=True)\nval_ldr = torch.utils.data.DataLoader(val_ds,batch_size=bat_size, shuffle=True)\n\n\n# Set device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Using device (CPU/GPU):\", device)\n#device = torch.device(\"cpu\")",
"Training set shape: 4174 420 54\nval set shape: 1532 420 54\nPercent positive samples in train: 24.96406324868232\nPercent positive samples in val: 25.0\n\nNOTE:\nSetting batch-size to 64\nUsing device (CPU/GPU): cpu\n"
],
[
"\n###############################\n### Define network ###\n###############################\n\nprint(\"Initializing network\")\n\n# Hyperparameters\ninput_size = 420\nnum_classes = 1\nlearning_rate = 0.01\n\nclass Net(nn.Module):\n def __init__(self, num_classes):\n super(Net, self).__init__() \n self.bn0 = nn.BatchNorm1d(54)\n self.conv1 = nn.Conv1d(in_channels=54, out_channels=100, kernel_size=3, stride=2, padding=1)\n torch.nn.init.trunc_normal_(self.conv1.weight)\n self.pool = nn.MaxPool1d(kernel_size=2, stride=2)\n self.conv1_bn = nn.BatchNorm1d(100)\n \n self.conv2 = nn.Conv1d(in_channels=100, out_channels=100, kernel_size=3, stride=2, padding=1)\n torch.nn.init.xavier_normal_(self.conv2.weight)\n self.conv2_bn = nn.BatchNorm1d(100)\n \n self.fc1 = nn.Linear(2600, num_classes)\n torch.nn.init.kaiming_normal_(self.fc1.weight)\n \n def forward(self, x): \n x = self.bn0(x)\n x = self.pool(F.leaky_relu(self.conv1(x)))\n x = self.conv1_bn(x)\n \n x = self.pool(F.leaky_relu(self.conv2(x)))\n x = self.conv2_bn(x)\n \n x = x.view(x.size(0), -1)\n x = torch.sigmoid(self.fc1(x))\n \n return x\n \n# Initialize network\nnet = Net(num_classes=num_classes).to(device)\n\n# Loss and optimizer\ncriterion = nn.BCELoss()\noptimizer = optim.Adam(net.parameters(), lr=learning_rate)\n",
"Initializing network\n"
],
[
"###############################\n### TRAIN ###\n###############################\n\nprint(\"Training\")\n\nnum_epochs = 5\n\ntrain_acc, train_loss = [], []\nvalid_acc, valid_loss = [], []\nlosses = []\nval_losses = []\n\nfor epoch in range(num_epochs):\n cur_loss = 0\n val_loss = 0\n \n net.train()\n train_preds, train_targs = [], [] \n for batch_idx, (data, target) in enumerate(train_ldr):\n X_batch = data.float().detach().requires_grad_(True)\n target_batch = torch.tensor(np.array(target), dtype = torch.float).unsqueeze(1)\n \n optimizer.zero_grad()\n output = net(X_batch)\n \n batch_loss = criterion(output, target_batch)\n batch_loss.backward()\n optimizer.step()\n \n preds = np.round(output.detach().cpu())\n train_targs += list(np.array(target_batch.cpu()))\n train_preds += list(preds.data.numpy().flatten())\n cur_loss += batch_loss.detach()\n\n losses.append(cur_loss / len(train_ldr.dataset)) \n \n net.eval()\n ### Evaluate validation\n val_preds, val_targs = [], []\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(val_ldr): ###\n x_batch_val = data.float().detach()\n y_batch_val = target.float().detach().unsqueeze(1)\n \n output = net(x_batch_val)\n \n val_batch_loss = criterion(output, y_batch_val)\n \n preds = np.round(output.detach())\n val_preds += list(preds.data.numpy().flatten()) \n val_targs += list(np.array(y_batch_val))\n val_loss += val_batch_loss.detach()\n \n val_losses.append(val_loss / len(val_ldr.dataset))\n print(\"\\nEpoch:\", epoch+1)\n \n train_acc_cur = accuracy_score(train_targs, train_preds) \n valid_acc_cur = accuracy_score(val_targs, val_preds) \n\n train_acc.append(train_acc_cur)\n valid_acc.append(valid_acc_cur)\n \n from sklearn.metrics import matthews_corrcoef\n print(\"Training loss:\", losses[-1].item(), \"Validation loss:\", val_losses[-1].item(), end = \"\\n\")\n print(\"MCC Train:\", matthews_corrcoef(train_targs, train_preds), \"MCC val:\", matthews_corrcoef(val_targs, val_preds))",
"Training\n\nEpoch: 1\nTraining loss: 0.013788885436952114 Validation loss: 0.00813869759440422\nMCC Train: 0.2709018274000176 MCC val: 0.39113545604352495\n\nEpoch: 2\nTraining loss: 0.007742497604340315 Validation loss: 0.0073868450708687305\nMCC Train: 0.46501172967071797 MCC val: 0.4927249935293596\n\nEpoch: 3\nTraining loss: 0.007487284950911999 Validation loss: 0.0073365080170333385\nMCC Train: 0.4611128671728777 MCC val: 0.5391912503130674\n\nEpoch: 4\nTraining loss: 0.005591066088527441 Validation loss: 0.007285711821168661\nMCC Train: 0.6053983463963063 MCC val: 0.5565829778764796\n\nEpoch: 5\nTraining loss: 0.004863017704337835 Validation loss: 0.00838278979063034\nMCC Train: 0.654373098701067 MCC val: 0.4659638506620496\n"
]
],
[
[
"## MH",
"_____no_output_____"
]
],
[
[
"###############################\n### PERFORMANCE ###\n###############################\n\nepoch = np.arange(1,len(train_acc)+1)\nplt.figure()\nplt.plot(epoch, losses, 'r', epoch, val_losses, 'b')\nplt.legend(['Train Loss','Validation Loss'])\nplt.xlabel('Epoch^'), plt.ylabel('Loss')\n\nepoch = np.arange(1,len(train_acc)+1)\nplt.figure()\nplt.plot(epoch, train_acc, 'r', epoch, valid_acc, 'b')\nplt.legend(['Train Accuracy','Validation Accuracy'])\nplt.xlabel('Epoch'), plt.ylabel('Acc')\n\n#print(\"Train accuracy:\", train_acc, sep = \"\\n\")\n#print(\"Validation accuracy:\", valid_acc, sep = \"\\n\")\n\nfrom sklearn.metrics import matthews_corrcoef\nprint(\"MCC Train:\", matthews_corrcoef(train_targs, train_preds))\nprint(\"MCC Test:\", matthews_corrcoef(val_targs, val_preds))\n\nprint(\"Confusion matrix train:\", confusion_matrix(train_targs, train_preds), sep = \"\\n\")\nprint(\"Confusion matrix test:\", confusion_matrix(val_targs, val_preds), sep = \"\\n\")",
"MCC Train: 0.654373098701067\nMCC Test: 0.4659638506620496\nConfusion matrix train:\n[[2961 171]\n [ 346 696]]\nConfusion matrix test:\n[[1112 37]\n [ 237 146]]\n"
],
[
"def plot_roc(targets, predictions):\n # ROC\n fpr, tpr, threshold = metrics.roc_curve(targets, predictions)\n roc_auc = metrics.auc(fpr, tpr)\n\n # plot ROC\n plt.figure()\n plt.title('Receiver Operating Characteristic')\n plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n #plt.show()",
"_____no_output_____"
],
[
"plot_roc(train_targs, train_preds)\nplt.title(\"Training AUC\")\nplot_roc(val_targs, val_preds)\nplt.title(\"Validation AUC\")",
"_____no_output_____"
]
],
[
[
"# Helpful scripts",
"_____no_output_____"
],
[
"# Show dataset as copied dataframes with named features\nThe dataset is a 3D numpy array, of dimensions n_complexes x features x positions. This makes viewing the features for individual complexes or samples challenging. Below is a function which copies the entire dataset, and converts it into a list of DataFrames with named indices and columns, in order to make understanding the data easier.\n\nNB: This list of dataframes are only copies, and will not be passable into the neural network architecture.",
"_____no_output_____"
]
],
[
[
"pd.read_csv(\"data/example.csv\")",
"_____no_output_____"
],
[
"def copy_as_dataframes(dataset_X):\n \"\"\"\n Returns list of DataFrames with named features from dataset_X,\n using example CSV file\n \"\"\"\n df_raw = pd.read_csv(\"data/example.csv\")\n return [pd.DataFrame(arr, columns = df_raw.columns) for arr in dataset_X]\n\nnamed_dataframes = copy_as_dataframes(X_train)\nprint(\"Showing first complex as dataframe. Columns are positions and indices are calculated features\")\nnamed_dataframes[0]",
"Showing first complex as dataframe. Columns are positions and indices are calculated features\n"
]
],
[
[
"# View complex MHC, peptide and TCR alpha/beta sequences\nYou may want to view the one-hot encoded sequences as sequences in single-letter amino-acid format. The below function will return the TCR, peptide and MHC sequences for the dataset as 3 lists.",
"_____no_output_____"
]
],
[
[
"def oneHot(residue):\n \"\"\"\n Converts string sequence to one-hot encoding\n Example usage:\n seq = \"GSHSMRY\"\n oneHot(seq)\n \"\"\"\n \n mapping = dict(zip(\"ACDEFGHIKLMNPQRSTVWY\", range(20)))\n if residue in \"ACDEFGHIKLMNPQRSTVWY\":\n return np.eye(20)[mapping[residue]]\n else:\n return np.zeros(20)\ndef reverseOneHot(encoding):\n \"\"\"\n Converts one-hot encoded array back to string sequence\n \"\"\"\n mapping = dict(zip(range(20),\"ACDEFGHIKLMNPQRSTVWY\"))\n seq=''\n for i in range(len(encoding)):\n if np.max(encoding[i])>0:\n seq+=mapping[np.argmax(encoding[i])]\n return seq\n\ndef extract_sequences(dataset_X):\n \"\"\"\n Return DataFrame with MHC, peptide and TCR a/b sequences from\n one-hot encoded complex sequences in dataset X\n \"\"\"\n mhc_sequences = [reverseOneHot(arr[0:179,0:20]) for arr in dataset_X]\n pep_sequences = [reverseOneHot(arr[179:190,0:20]) for arr in dataset_X]\n tcr_sequences = [reverseOneHot(arr[192:,0:20]) for arr in dataset_X]\n df_sequences = pd.DataFrame({\"MHC\":mhc_sequences, \"peptide\":pep_sequences,\n \"tcr\":tcr_sequences})\n return df_sequences",
"_____no_output_____"
],
[
"complex_sequences = extract_sequences(X_val)\nprint(\"Showing MHC, peptide and TCR alpha/beta sequences for each complex\")\ncomplex_sequences",
"Showing MHC, peptide and TCR alpha/beta sequences for each complex\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4ac1e615f63329043e85b52c588b30f9f592ed14
| 30,758 |
ipynb
|
Jupyter Notebook
|
Lab5/Lab5_Linear_Regre_Scratch.ipynb
|
jay281/014_JayBhingradiya
|
6a5e9112a35f5b014e5ef624604a35dbbfa42a15
|
[
"MIT"
] | null | null | null |
Lab5/Lab5_Linear_Regre_Scratch.ipynb
|
jay281/014_JayBhingradiya
|
6a5e9112a35f5b014e5ef624604a35dbbfa42a15
|
[
"MIT"
] | null | null | null |
Lab5/Lab5_Linear_Regre_Scratch.ipynb
|
jay281/014_JayBhingradiya
|
6a5e9112a35f5b014e5ef624604a35dbbfa42a15
|
[
"MIT"
] | null | null | null | 57.277467 | 15,178 | 0.712595 |
[
[
[
"<a href=\"https://colab.research.google.com/github/r5racker/012_RahilBhensdadia/blob/main/Lab_05_1_linear_regression_scratch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# Import Numpy & PyTorch\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"A tensor is a number, vector, matrix or any n-dimensional array.",
"_____no_output_____"
],
[
"## Problem Statement",
"_____no_output_____"
],
[
"We'll create a model that predicts crop yeilds for apples (*target variable*) by looking at the average temperature, rainfall and humidity (*input variables or features*) in different regions. \n\nHere's the training data:\n\n>Temp | Rain | Humidity | Prediction\n>--- | --- | --- | ---\n> 73 | 67 | 43 | 56\n> 91 | 88 | 64 | 81\n> 87 | 134 | 58 | 119\n> 102 | 43 | 37 | 22\n> 69 | 96 | 70 | 103\n\nIn a **linear regression** model, each target variable is estimated to be a weighted sum of the input variables, offset by some constant, known as a bias :\n\n```\nyeild_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1\n```\n\nIt means that the yield of apples is a linear or planar function of the temperature, rainfall & humidity.\n\n\n\n**Our objective**: Find a suitable set of *weights* and *biases* using the training data, to make accurate predictions.",
"_____no_output_____"
],
[
"## Training Data\nThe training data can be represented using 2 matrices (inputs and targets), each with one row per observation and one column for variable.",
"_____no_output_____"
]
],
[
[
"# Input (temp, rainfall, humidity)\nX = np.array([[73, 67, 43], \n [91, 88, 64], \n [87, 134, 58], \n [102, 43, 37], \n [69, 96, 70]], dtype='float32')",
"_____no_output_____"
],
[
"# Target (apples)\nY = np.array([[56], \n [81], \n [119], \n [22], \n [103]], dtype='float32')",
"_____no_output_____"
]
],
[
[
"Before we build a model, we need to convert inputs and targets to PyTorch tensors.",
"_____no_output_____"
],
[
"## Linear Regression Model (from scratch)\n\nThe *weights* and *biases* can also be represented as matrices, initialized with random values. The first row of `w` and the first element of `b` are use to predict the first target variable i.e. yield for apples, and similarly the second for oranges.",
"_____no_output_____"
],
[
"The *model* is simply a function that performs a matrix multiplication of the input `x` and the weights `w` (transposed) and adds the bias `w0` (replicated for each observation).\n\n$$\n\\hspace{2.5cm} X \\hspace{1.1cm} \\times \\hspace{1.2cm} W^T \n$$\n\n$$\n\\left[ \\begin{array}{cc}\n1 & 73 & 67 & 43 \\\\\n1 &91 & 88 & 64 \\\\\n\\vdots & \\vdots & \\vdots & \\vdots \\\\\n1 &69 & 96 & 70\n\\end{array} \\right]\n%\n\\times\n%\n\\left[ \\begin{array}{cc}\nw_{0} \\\\\nw_{1} \\\\\nw_{2} \\\\\nw_{3} \n\\end{array} \\right]\n%\n$$",
"_____no_output_____"
]
],
[
[
"mu = np.mean(X, 0)\nsigma = np.std(X, 0)\n#normalizing the input\nX = (X-mu) / sigma\nX = np.hstack((np.ones((Y.size,1)),X))\nprint(X.shape)",
"(5, 4)\n"
],
[
"\n# Weights and biases\nrg = np.random.default_rng(14)\nw = rg.random((1, 4))\nprint(w)",
"[[0.83098332 0.36094667 0.70273931 0.86011879]]\n"
]
],
[
[
"Because we've started with random weights and biases, the model does not perform a good job of predicting the target varaibles.",
"_____no_output_____"
],
[
"## Loss Function\n\nWe can compare the predictions with the actual targets, using the following method: \n* Calculate the difference between the two matrices (`preds` and `targets`).\n* Square all elements of the difference matrix to remove negative values.\n* Calculate the average of the elements in the resulting matrix.\n\nThe result is a single number, known as the **mean squared error** (MSE).",
"_____no_output_____"
]
],
[
[
"# MSE loss function\ndef mse(t1, t2):\n diff = t1 - t2\n return np.sum(diff * diff) / diff.size",
"_____no_output_____"
],
[
"# Compute error\npreds = model(X,w)\ncost_initial = mse(preds, Y)\nprint(\"Cost before regression: \",cost_initial)",
"Cost before regression: 6781.910571962845\n"
]
],
[
[
"## Compute Gradients\n\n",
"_____no_output_____"
]
],
[
[
"# Define the model\ndef model(x,w):\n return x @ w.T",
"_____no_output_____"
],
[
"def gradient_descent(X, y, w, learning_rate, n_iters):\n J_history = np.zeros((n_iters,1))\n for i in range(n_iters):\n h = model(X,w)\n diff = h - y\n delta = (learning_rate/Y.size)*(X.T@diff)\n new_w = w - delta.T\n w=new_w\n J_history[i] = mse(h, y)\n return (J_history, w)",
"_____no_output_____"
]
],
[
[
"## Train for multiple iteration\n\nTo reduce the loss further, we repeat the process of adjusting the weights and biases using the gradients multiple times. Each iteration is called an epoch.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nn_iters = 500\nlearning_rate = 0.01\n\ninitial_cost = mse(model(X,w),Y)\n\nprint(\"Initial cost is: \", initial_cost, \"\\n\")\n\n(J_history, optimal_params) = gradient_descent(X, Y, w, learning_rate, n_iters)\n\nprint(\"Optimal parameters are: \\n\", optimal_params, \"\\n\")\n\nprint(\"Final cost is: \", J_history[-1])\n",
"Initial cost is: 6781.910571962845 \n\nOptimal parameters are: \n [[75.70478939 -4.33543217 23.96706044 10.65154695]] \n\nFinal cost is: [3.26274353]\n"
],
[
"plt.plot(range(len(J_history)), J_history, 'r')\n\nplt.title(\"Convergence Graph of Cost Function\")\nplt.xlabel(\"Number of Iterations\")\nplt.ylabel(\"Cost\")\nplt.show()",
"_____no_output_____"
],
[
"# Calculate error\npreds = model(X,optimal_params)\ncost_final = mse(preds, Y)\n# Print predictions\nprint(\"Prediction:\\n\",preds)\n# Comparing predicted with targets\nprint(\"Targets:\\n\",Y)",
"Prediction:\n [[ 55.41871642]\n [ 83.40565995]\n [116.01080501]\n [ 20.95177326]\n [102.73699157]]\nTargets:\n [[ 56.]\n [ 81.]\n [119.]\n [ 22.]\n [103.]]\n"
],
[
"print(\"Cost after linear regression: \",cost_final)\nprint(\"Cost reduction percentage : {} %\".format(((cost_initial- cost_final)/cost_initial)*100))",
"Cost after linear regression: 3.2456659695291727\nCost reduction percentage : 99.95214230658028 %\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4ac1ec3bf7244c19d32ae354e9fa4f7955ae725b
| 802,050 |
ipynb
|
Jupyter Notebook
|
exploratory_data_analysis.ipynb
|
FranckPrts/nma-cn-project
|
c53946ce3a95d714812c152131e0bf69ebbe2911
|
[
"MIT"
] | 4 |
2021-08-11T04:30:03.000Z
|
2021-11-17T10:39:27.000Z
|
exploratory_data_analysis.ipynb
|
FranckPrts/nma-cn-project
|
c53946ce3a95d714812c152131e0bf69ebbe2911
|
[
"MIT"
] | null | null | null |
exploratory_data_analysis.ipynb
|
FranckPrts/nma-cn-project
|
c53946ce3a95d714812c152131e0bf69ebbe2911
|
[
"MIT"
] | 1 |
2021-07-26T16:55:13.000Z
|
2021-07-26T16:55:13.000Z
| 343.343322 | 346,376 | 0.921385 |
[
[
[
"# Exploratory Data Analysis of AllenSDK",
"_____no_output_____"
]
],
[
[
"# Only for Colab\n#!python -m pip install --upgrade pip\n#!pip install allensdk",
"_____no_output_____"
]
],
[
[
"## References",
"_____no_output_____"
],
[
"- [[AllenNB1]](https://allensdk.readthedocs.io/en/latest/_static/examples/nb/visual_behavior_ophys_data_access.html) Download data using the AllenSDK or directly from our Amazon S3 bucket\n- [[AllenNB2]](https://allensdk.readthedocs.io/en/latest/_static/examples/nb/visual_behavior_ophys_dataset_manifest.html) Identify experiments of interest using the dataset manifest\n- [[AllenNB3]](https://allensdk.readthedocs.io/en/latest/_static/examples/nb/visual_behavior_load_ophys_data.html) Load and visualize data from a 2-photon imaging experiment\n- [[AllenNB4]](https://allensdk.readthedocs.io/en/latest/_static/examples/nb/visual_behavior_mouse_history.html) Examine the full training history of one mouse\n- [[AllenNB5]](https://allensdk.readthedocs.io/en/latest/_static/examples/nb/visual_behavior_compare_across_trial_types.html) Compare behavior and neural activity across different trial types in the task",
"_____no_output_____"
],
[
"## Imports\n\nImport and setup Python packages. You should not need to touch this section.",
"_____no_output_____"
]
],
[
[
"from pathlib import Path\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom allensdk.brain_observatory.behavior.behavior_project_cache import VisualBehaviorOphysProjectCache\nfrom allensdk.core.brain_observatory_cache import BrainObservatoryCache\n\n# import mindscope_utilities\n# import mindscope_utilities.visual_behavior_ophys as ophys",
"/home/seungjaeryanlee/anaconda3/envs/nma-cn/lib/python3.8/site-packages/allensdk/brain_observatory/session_api_utils.py:15: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n from pandas.util.testing import assert_frame_equal\n"
],
[
"np.random.seed(42)",
"_____no_output_____"
]
],
[
[
"## Setup AllenSDK\n\nConfigure AllenSDK to get `cache`, `sessions_df` and `experiments_df`. Data will be stored in `./allensdk_storage` by default.",
"_____no_output_____"
]
],
[
[
"!mkdir -p allensdk_storage\nDATA_STORAGE_DIRECTORY = Path(\"./allensdk_storage\")",
"_____no_output_____"
],
[
"cache = VisualBehaviorOphysProjectCache.from_s3_cache(cache_dir=DATA_STORAGE_DIRECTORY)",
"_____no_output_____"
]
],
[
[
"The data manifest is comprised of three types of tables:\n\n1. `behavior_session_table`\n2. `ophys_session_table`\n3. `ophys_experiment_table`\n\nThe` behavior_session_table` contains metadata for every **behavior session** in the dataset. Some behavior sessions have 2-photon data associated with them, while others took place during training in the behavior facility. The different training stages that mice are progressed through are described by the session_type.\n\nThe `ophys_session_table` contains metadata for every 2-photon imaging (aka optical physiology, or ophys) session in the dataset, associated with a unique `ophys_session_id`. An **ophys session** is one continuous recording session under the microscope, and can contain different numbers of imaging planes (aka experiments) depending on which microscope was used. For Scientifica sessions, there will only be one experiment (aka imaging plane) per session. For Multiscope sessions, there can be up to eight imaging planes per session. Quality Control (QC) is performed on each individual imaging plane within a session, so each can fail QC independent of the others. This means that a Multiscope session may not have exactly eight experiments (imaging planes).\n\nThe `ophys_experiment_table` contains metadata for every **ophys experiment** in the dataset, which corresponds to a single imaging plane recorded in a single session, and associated with a unique `ophys_experiment_id`. A key part of our experimental design is targeting a given population of neurons, contained in one imaging plane, across multiple `session_types` (further described below) to examine the impact of varying sensory and behavioral conditions on single cell responses. The collection of all imaging sessions for a given imaging plane is referred to as an **ophys container**, associated with a unique `ophys_container_id`. Each ophys container may contain different numbers of sessions, depending on which experiments passed QC, and how many retakes occured (when a given session_type fails QC on the first try, an attempt is made to re-acquire the `session_type` on a different recording day - this is called a retake, also described further below).\n\n*Text copied from [[AllenNB2]](#References)*",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"We will just use the `ophys_experiment_table`.",
"_____no_output_____"
]
],
[
[
"experiments_df = cache.get_ophys_experiment_table()",
"_____no_output_____"
]
],
[
[
"## Specify Experiment\n\nThere are a lot of experiments in the table. Let's choose a particular experiment that meet the following criteria:\n- Excitatory cells with fast reporter\n- Single-plane imaging",
"_____no_output_____"
],
[
"### Cre Line and Reporter Line",
"_____no_output_____"
],
[
"<img style=\"width: 50%\" src=\"https://github.com/seungjaeryanlee/nma-cn-project/blob/main/images/cre_lines.png?raw=1\">",
"_____no_output_____"
],
[
" The `cre_line` determines which genetically identified neuron type will be labeled by the reporter_line.\n \n This dataset have 3 `cre_line`:\n - **Slc17a7-IRES2-Cre**, which labels excitatory neurons across all cortical layers\n - **Sst-IRES-Cre** which labels somatostatin expressing inhibitory interneurons\n - **Vip-IRES-Cre**, which labels vasoactive intestinal peptide expressing inhibitory interneurons\n \n*Text copied from [[AllenNB2]](#References)*",
"_____no_output_____"
]
],
[
[
"experiments_df[\"cre_line\"].unique()",
"_____no_output_____"
]
],
[
[
"There are also 3 `reporter_line`:\n - **Ai93(TITL-GCaMP6f)**, which expresses the genetically encoded calcium indicator GCaMP6f (f is for 'fast', this reporter has fast offset kinetics, but is only moderately sensitive to calcium relative to other sensors) in cre labeled neurons\n - **Ai94(TITL-GCaMP6s)**, which expresses the indicator GCaMP6s (s is for 'slow', this reporter is very sensitive to calcium but has slow offset kinetics), and\n - **Ai148(TIT2L-GC6f-ICL-tTA2)**, which expresses GCaMP6f using a self-enhancing system to achieve higher expression than other reporter lines (which proved necessary to label inhibitory neurons specifically). ",
"_____no_output_____"
]
],
[
[
"experiments_df[\"reporter_line\"].unique()",
"_____no_output_____"
]
],
[
[
"The specific `indicator` expressed by each `reporter_line` also has its own column in the table.",
"_____no_output_____"
]
],
[
[
"experiments_df[\"indicator\"].unique()",
"_____no_output_____"
]
],
[
[
"`full_genotype` contains information for both cre line and reporter line.",
"_____no_output_____"
]
],
[
[
"experiments_df[\"full_genotype\"].unique()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"We are looking at excitatory cells, so we should use `cre_line` of `Slc17a7-IRES2-Cre`. We want the fast one, so we select `Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-GCaMP6f)/wt`.",
"_____no_output_____"
]
],
[
[
"FULL_GENOTYPE = \"Slc17a7-IRES2-Cre/wt;Camk2a-tTA/wt;Ai93(TITL-GCaMP6f)/wt\"",
"_____no_output_____"
]
],
[
[
"### Project Code",
"_____no_output_____"
],
[
"<img style=\"width: 50%\" src=\"https://github.com/seungjaeryanlee/nma-cn-project/blob/main/images/datasets.png?raw=1\">",
"_____no_output_____"
],
[
"\"The distinct groups of mice are referred to as dataset variants and can be identified using the `project_code` column.\" [[AllenNB2]](#References)",
"_____no_output_____"
]
],
[
[
"experiments_df[\"project_code\"].unique()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"We are interested in single-plane imaging, so either `VisualBehavior` or `VisualBehaviorTask1B` works.",
"_____no_output_____"
]
],
[
[
"# We are looking at single-plane imaging\n# \"VisualBehavior\" or \"VisualBehaviorTask1B\"\nPROJECT_CODE = \"VisualBehavior\"",
"_____no_output_____"
]
],
[
[
"### Experiment",
"_____no_output_____"
],
[
"<img style=\"width: 50%\" src=\"https://github.com/seungjaeryanlee/nma-cn-project/blob/main/images/data_structure.png?raw=1\">",
"_____no_output_____"
],
[
"(Note that we are looking at single-plane imaging, so there is only one row (container) per mouse.)",
"_____no_output_____"
],
[
"#### `MOUSE_ID`",
"_____no_output_____"
],
[
"\"The mouse_id is a 6-digit unique identifier for each experimental animal in the dataset.\" [[AllenNB2]](#References)",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"We retrieve all mouse that can be used for our experiment and select one mouse.",
"_____no_output_____"
]
],
[
[
"experiments_df.query(\"project_code == @PROJECT_CODE\") \\\n .query(\"full_genotype == @FULL_GENOTYPE\") \\\n [\"mouse_id\"].unique()",
"_____no_output_____"
],
[
"MOUSE_ID = 450471",
"_____no_output_____"
]
],
[
[
"#### `ACTIVE_SESSION`, `PASSIVE_SESSION`",
"_____no_output_____"
],
[
"<img style=\"width: 50%\" src=\"https://github.com/seungjaeryanlee/nma-cn-project/blob/main/images/experiment_design.png?raw=1\">",
"_____no_output_____"
],
[
"The session_type for each behavior session indicates the behavioral training stage or 2-photon imaging conditions for that particular session. This determines what stimuli were shown and what task parameters were used.\n\nDuring the 2-photon imaging portion of the experiment, mice perform the task with the same set of images they saw during training (either image set A or B), as well as an additional novel set of images (whichever of A or B that they did not see during training). This allows evaluation of the impact of different sensory contexts on neural activity - familiarity versus novelty.\n - Sessions with **familiar images** include those starting with `OPHYS_0`, `OPHYS_1`, `OPHYS_2`, and `OPHYS_3`.\n - Sessions with **novel images** include those starting with `OPHYS_4`, `OPHYS_5`, and `OPHYS_6`.\n\nInterleaved between **active behavior sessions** are **passive viewing sessions** where mice are given their daily water ahead of the sesssion (and are thus satiated) and view the stimulus with the lick spout retracted so they are unable to earn water rewards. This allows comparison of neural activity in response to stimuli under different behavioral context - active task engagement and passive viewing without reward. There are two passive sessions:\n - `OPHYS_2_images_A_passive`: passive session with familiar images\n - `OPHYS_5_images_A_passive`: passive session with novel images\n\n\n\n*Text copied from [[AllenNB2]](#References)*",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"We check which sessions are available for this particular mouse and select one active and one passive session type. Not all sessions may be availble due to QC.",
"_____no_output_____"
]
],
[
[
"experiments_df.query(\"project_code == @PROJECT_CODE\") \\\n .query(\"full_genotype == @FULL_GENOTYPE\") \\\n .query(\"mouse_id == @MOUSE_ID\") \\\n [\"session_type\"].unique()",
"_____no_output_____"
]
],
[
[
"Looks like this mouse has all sessions! Let's select the first one then.",
"_____no_output_____"
]
],
[
[
"SESSION_TYPE = \"OPHYS_1_images_A\"",
"_____no_output_____"
]
],
[
[
"#### `EXPERIMENT_ID`",
"_____no_output_____"
],
[
"We retrieve the `ophys_experiment_id` of the session type we chose. We need this ID to get the experiment data.",
"_____no_output_____"
]
],
[
[
"experiments_df.query(\"project_code == @PROJECT_CODE\") \\\n .query(\"full_genotype == @FULL_GENOTYPE\") \\\n .query(\"mouse_id == @MOUSE_ID\") \\\n .query(\"session_type == @SESSION_TYPE\")",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"Looks like this mouse went through the same session multiple times! Let's just select the first experiment ID.",
"_____no_output_____"
]
],
[
[
"EXPERIMENT_ID = 871155338",
"_____no_output_____"
]
],
[
[
"#### `ACTIVE_EXPERIMENT_ID_CONTROL`, `PASSIVE_EXPERIMENT_ID_CONTROL`",
"_____no_output_____"
]
],
[
[
"PASSIVE_EXPERIMENT_ID_CONTROL =884218326",
"_____no_output_____"
]
],
[
[
"## Download Experiment\n\nDownload the experiment with the selected `experiment_id`.",
"_____no_output_____"
],
[
"We can now download the experiment. Each experiment will be approximately 600MB - 2GB in size.",
"_____no_output_____"
]
],
[
[
"experiment = cache.get_behavior_ophys_experiment(EXPERIMENT_ID)",
"_____no_output_____"
],
[
"experiment",
"_____no_output_____"
]
],
[
[
"This returns an instance of `BehaviorOphysExperiment`. It contains multiple attributes that we will need to explore.",
"_____no_output_____"
],
[
"## Attributes of the Experiment\n\nExplore what information we have about the experiment by checking its attributes.",
"_____no_output_____"
],
[
"### `dff_traces`",
"_____no_output_____"
],
[
"\"`dff_traces` dataframe contains traces for all neurons in this experiment, unaligned to any events in the task.\" [[AllenNB3]](#References)",
"_____no_output_____"
]
],
[
[
"experiment.dff_traces.head()",
"_____no_output_____"
]
],
[
[
"Since `dff` is stored as a list, we need to get timestamps for each of those numbers.",
"_____no_output_____"
],
[
"### `ophys_timestamps`",
"_____no_output_____"
],
[
"`ophys_timestamps` contains the timestamps of every record.",
"_____no_output_____"
]
],
[
[
"experiment.ophys_timestamps",
"_____no_output_____"
]
],
[
[
"Let's do a sanity check by checking the length of both lists.",
"_____no_output_____"
]
],
[
[
"print(f\"dff has length {len(experiment.dff_traces.iloc[0]['dff'])}\")\nprint(f\"timestamp has length {len(experiment.ophys_timestamps)}\")",
"dff has length 140164\ntimestamp has length 140164\n"
]
],
[
[
"### `stimulus_presentations`",
"_____no_output_____"
],
[
"We also need timestamps of when stimulus was presented. This information is contained in `stimulus_presentations`.",
"_____no_output_____"
]
],
[
[
"experiment.stimulus_presentations.head()",
"_____no_output_____"
]
],
[
[
"During imaging sessions, stimulus presentations (other than the change and pre-change images) are omitted with a 5% probability, resulting in some inter stimlus intervals appearing as an extended gray screen period. [[AllenNB2]](#References)",
"_____no_output_____"
],
[
"<img style=\"width: 50%\" src=\"https://github.com/seungjaeryanlee/nma-cn-project/blob/main/images/omissions.png?raw=1\">",
"_____no_output_____"
]
],
[
[
"experiment.stimulus_presentations.query(\"omitted\").head()",
"_____no_output_____"
]
],
[
[
"### `stimulus_templates`",
"_____no_output_____"
],
[
"If we want to know what the stimulus looks like, we can check `stimulus_templates`.",
"_____no_output_____"
]
],
[
[
"experiment.stimulus_templates",
"_____no_output_____"
]
],
[
[
"We see that we have a matrix for the `warped` column and a stub matrix for the unwarped column. Let's display the `warped` column.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(4, 2, figsize=(8, 12))\n\nfor i, image_name in enumerate(experiment.stimulus_templates.index):\n ax[i%4][i//4].imshow(experiment.stimulus_templates.loc[image_name][\"warped\"], cmap='gray', vmin=0, vmax=255)\n ax[i%4][i//4].set_title(image_name)\n ax[i%4][i//4].get_xaxis().set_visible(False)\n ax[i%4][i//4].get_yaxis().set_visible(False)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"So this is what the mouse is seeing! But can we see the original, unwarped image? For that, we need to use another AllenSDK cache that contains these images.",
"_____no_output_____"
]
],
[
[
"boc = BrainObservatoryCache()\nscenes_data_set = boc.get_ophys_experiment_data(501498760)",
"_____no_output_____"
]
],
[
[
"This data set contains a lot of images in a form of a 3D matrix (`# images` x `width` x `height` ).",
"_____no_output_____"
]
],
[
[
"scenes = scenes_data_set.get_stimulus_template('natural_scenes')",
"_____no_output_____"
],
[
"scenes.shape",
"_____no_output_____"
]
],
[
[
"We just want the images that were shown above. Notice that the indices are part of the name of the images.",
"_____no_output_____"
]
],
[
[
"experiment.stimulus_templates.index",
"_____no_output_____"
]
],
[
[
"Using this, we can plot the unwarped versions!",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(4, 2, figsize=(6, 12))\nfor i, image_name in enumerate(experiment.stimulus_templates.index):\n scene_id = int(image_name[2:])\n\n ax[i%4][i//4].imshow(scenes[scene_id, :, :], cmap='gray', vmin=0, vmax=255)\n ax[i%4][i//4].set_title(image_name)\n ax[i%4][i//4].get_xaxis().set_visible(False)\n ax[i%4][i//4].get_yaxis().set_visible(False)",
"_____no_output_____"
]
],
[
[
"## Visualization\n\nWe do some basic plots from the information we gathered from various attributes.",
"_____no_output_____"
],
[
"### Plot dF/F Trace",
"_____no_output_____"
],
[
"Let's choose some random `cell_specimen_id` and plots its dff trace for time 400 to 450.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(15, 4))\n\nax.plot(\n experiment.ophys_timestamps,\n experiment.dff_traces.loc[1086545833][\"dff\"],\n)\n\nax.set_xlim(400, 450)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"### Plot Stimulus",
"_____no_output_____"
],
[
"Let's also plot stimulus for a short interval.\n\n*Part of code from [[AllenNB3]](#References)*",
"_____no_output_____"
]
],
[
[
"# Create a color map for each image\nunique_stimuli = [stimulus for stimulus in experiment.stimulus_presentations['image_name'].unique()]\ncolormap = {image_name: sns.color_palette()[image_number] for image_number, image_name in enumerate(np.sort(unique_stimuli))}\n# Keep omitted image as white\ncolormap['omitted'] = (1,1,1)",
"_____no_output_____"
],
[
"stimulus_presentations_sample = experiment.stimulus_presentations.query('stop_time >= 400 and start_time <= 450')",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(15, 4))\n\nfor idx, stimulus in stimulus_presentations_sample.iterrows():\n ax.axvspan(stimulus['start_time'], stimulus['stop_time'], color=colormap[stimulus['image_name']], alpha=0.25)\n\nax.set_xlim(400, 450)\n \nfig.show()",
"_____no_output_____"
]
],
[
[
"### Plot Both dF/F trace and Stimulus",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(15, 4))\n\nax.plot(\n experiment.ophys_timestamps,\n experiment.dff_traces.loc[1086545833][\"dff\"],\n)\n\nfor idx, stimulus in stimulus_presentations_sample.iterrows():\n ax.axvspan(stimulus['start_time'], stimulus['stop_time'], color=colormap[stimulus['image_name']], alpha=0.25)\n\nax.set_xlim(400, 450)\nax.set_ylim(-0.5, 0.5)\nax.legend([\"dff trace\"])\n\nfig.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac1f9e84580732a419e12a7e7ecbcc35ede1b6b
| 7,812 |
ipynb
|
Jupyter Notebook
|
notebooks/Funk_SVD.ipynb
|
jyuan1986/MyDSExperiments
|
8c3b6f5c9a525e180d08bfb52a98f8651be1e6d8
|
[
"MIT"
] | null | null | null |
notebooks/Funk_SVD.ipynb
|
jyuan1986/MyDSExperiments
|
8c3b6f5c9a525e180d08bfb52a98f8651be1e6d8
|
[
"MIT"
] | null | null | null |
notebooks/Funk_SVD.ipynb
|
jyuan1986/MyDSExperiments
|
8c3b6f5c9a525e180d08bfb52a98f8651be1e6d8
|
[
"MIT"
] | null | null | null | 28.100719 | 114 | 0.471198 |
[
[
[
"# Funk SVD\n## ref: \n## 1. https://github.com/gbolmier/funk-svd\n## 2. https://sifter.org/simon/journal/20061211.html",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"from funk_svd.dataset import fetch_ml_ratings\nfrom funk_svd import SVD\nfrom sklearn.metrics import mean_absolute_error",
"_____no_output_____"
]
],
[
[
"# 1. On explicit feedback problem",
"_____no_output_____"
]
],
[
[
"df = fetch_ml_ratings(variant='100k', data_dir_path = '../data/movielens/ml-100k/u.data')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"train = df.sample(frac=0.8, random_state=7)\nval = df.drop(train.index.tolist()).sample(frac=0.5, random_state=8)\ntest = df.drop(train.index.tolist()).drop(val.index.tolist())",
"_____no_output_____"
],
[
"svd = SVD(learning_rate=0.001, regularization=0.005, n_epochs=200, n_factors=15, min_rating=1, max_rating=5)",
"_____no_output_____"
],
[
"svd.fit(X=train, X_val=val, early_stopping=True, shuffle=False)",
"Preprocessing data...\n\nEpoch 1/200 | val_loss: 1.17 - val_rmse: 1.08 - val_mae: 0.91 - took 0.0 sec\nEpoch 2/200 | val_loss: 1.11 - val_rmse: 1.05 - val_mae: 0.87 - took 0.0 sec\nEpoch 3/200 | val_loss: 1.07 - val_rmse: 1.03 - val_mae: 0.85 - took 0.0 sec\nEpoch 4/200 | val_loss: 1.04 - val_rmse: 1.02 - val_mae: 0.83 - took 0.0 sec\nEpoch 5/200 | val_loss: 1.02 - val_rmse: 1.01 - val_mae: 0.82 - took 0.0 sec\nEpoch 6/200 | val_loss: 1.00 - val_rmse: 1.00 - val_mae: 0.81 - took 0.0 sec\nEpoch 7/200 | val_loss: 0.99 - val_rmse: 1.00 - val_mae: 0.80 - took 0.0 sec\nEpoch 8/200 | val_loss: 0.98 - val_rmse: 0.99 - val_mae: 0.80 - took 0.0 sec\nEpoch 9/200 | val_loss: 0.97 - val_rmse: 0.99 - val_mae: 0.79 - took 0.0 sec\nEpoch 10/200 | val_loss: 0.96 - val_rmse: 0.98 - val_mae: 0.79 - took 0.0 sec\nEpoch 11/200 | val_loss: 0.96 - val_rmse: 0.98 - val_mae: 0.78 - took 0.0 sec\nEpoch 12/200 | val_loss: 0.95 - val_rmse: 0.98 - val_mae: 0.78 - took 0.0 sec\nEpoch 13/200 | val_loss: 0.95 - val_rmse: 0.97 - val_mae: 0.78 - took 0.0 sec\nEpoch 14/200 | val_loss: 0.94 - val_rmse: 0.97 - val_mae: 0.77 - took 0.0 sec\nEpoch 15/200 | val_loss: 0.94 - val_rmse: 0.97 - val_mae: 0.77 - took 0.0 sec\nEpoch 16/200 | val_loss: 0.94 - val_rmse: 0.97 - val_mae: 0.77 - took 0.0 sec\nEpoch 17/200 | val_loss: 0.93 - val_rmse: 0.97 - val_mae: 0.77 - took 0.0 sec\nEpoch 18/200 | val_loss: 0.93 - val_rmse: 0.97 - val_mae: 0.77 - took 0.0 sec\nEpoch 19/200 | val_loss: 0.93 - val_rmse: 0.96 - val_mae: 0.77 - took 0.0 sec\nEpoch 20/200 | val_loss: 0.93 - val_rmse: 0.96 - val_mae: 0.77 - took 0.0 sec\nEpoch 21/200 | val_loss: 0.92 - val_rmse: 0.96 - val_mae: 0.76 - took 0.0 sec\nEpoch 22/200 | val_loss: 0.92 - val_rmse: 0.96 - val_mae: 0.76 - took 0.0 sec\nEpoch 23/200 | val_loss: 0.92 - val_rmse: 0.96 - val_mae: 0.76 - took 0.0 sec\n\nTraining took 0 sec\n"
],
[
"pred = svd.predict(test)\nmae = mean_absolute_error(test['rating'], pred)\nprint(f'Test MAE: {mae:.2f}')",
"Test MAE: 0.75\n"
],
[
"#pred",
"_____no_output_____"
]
],
[
[
"# 2. On implicit feedback problem",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4ac208f242fe9cd7c281728ad73a77cf0f00898c
| 1,764 |
ipynb
|
Jupyter Notebook
|
Basics/Exercise 02.ipynb
|
suraj-shakya/learning_python
|
e783e81262d8269b765ca17f12c413fa636990aa
|
[
"MIT"
] | null | null | null |
Basics/Exercise 02.ipynb
|
suraj-shakya/learning_python
|
e783e81262d8269b765ca17f12c413fa636990aa
|
[
"MIT"
] | null | null | null |
Basics/Exercise 02.ipynb
|
suraj-shakya/learning_python
|
e783e81262d8269b765ca17f12c413fa636990aa
|
[
"MIT"
] | null | null | null | 28.918033 | 216 | 0.561791 |
[
[
[
"# Exercise 02 \nCreate a python `RiskPredictor.py` file having followings: \n\n 1. function : prepareData\n\t - Parameters\n\t\t - Parameter 1 :\n\t\t\t - Type : Positional \n\t\t\t - Name : PersonName\n\t\t - Parameter 2 : \n\t\t\t - Type : Positional / Keyword\n\t\t\t - Name : Natinonality\n\t\t - Parameter 3 : \n\t\t\t - Type : Arbitary Keyword Arguments\n\t\t\t - Name : place_visited\n\t - Returns : Dictionary from the above information \n \n2. function : predictRisk\n\t- Parameters :\n\t\t- 1 : \n\t\t\t- positional / keyword \n\t\t\t- type dicitionary \n\t\t\t- Note : return value of function prepareData\n\t\t- 2 : \n\t\t\t- Risk Zones\n\t\t\t- Type : lists\n\t- returns riskiness of that person based on the places s/he has visited. If a person has visited a place which is identified to be in risk zones, then the person should be quarantined for at least 14 days.\n\n\nCreate another file ```TestResult.py``` and import`RiskPredictor.py` and call the functions in this file and predict result.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown"
]
] |
4ac20cccb08cf1f3b2f6d0341493c03999392959
| 700,966 |
ipynb
|
Jupyter Notebook
|
Chapter08/Activity8.03.ipynb
|
machinelearningmasteryindia/The-Pandas-Workshop
|
fb3e31c816db939a152ea329a4d7f9e947227e4e
|
[
"MIT"
] | null | null | null |
Chapter08/Activity8.03.ipynb
|
machinelearningmasteryindia/The-Pandas-Workshop
|
fb3e31c816db939a152ea329a4d7f9e947227e4e
|
[
"MIT"
] | null | null | null |
Chapter08/Activity8.03.ipynb
|
machinelearningmasteryindia/The-Pandas-Workshop
|
fb3e31c816db939a152ea329a4d7f9e947227e4e
|
[
"MIT"
] | 1 |
2022-01-22T22:18:08.000Z
|
2022-01-22T22:18:08.000Z
| 671.423372 | 311,604 | 0.94131 |
[
[
[
"#\n# libraries\n#\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression as OLS\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#",
"In C:\\Users\\bbate\\Miniconda3\\envs\\keras-gpu-3\\lib\\site-packages\\matplotlib\\mpl-data\\stylelib\\_classic_test.mplstyle: \nThe text.latex.unicode rcparam was deprecated in Matplotlib 3.0 and will be removed in 3.2.\nIn C:\\Users\\bbate\\Miniconda3\\envs\\keras-gpu-3\\lib\\site-packages\\matplotlib\\mpl-data\\stylelib\\_classic_test.mplstyle: \nThe savefig.frameon rcparam was deprecated in Matplotlib 3.1 and will be removed in 3.3.\nIn C:\\Users\\bbate\\Miniconda3\\envs\\keras-gpu-3\\lib\\site-packages\\matplotlib\\mpl-data\\stylelib\\_classic_test.mplstyle: \nThe pgf.debug rcparam was deprecated in Matplotlib 3.0 and will be removed in 3.2.\nIn C:\\Users\\bbate\\Miniconda3\\envs\\keras-gpu-3\\lib\\site-packages\\matplotlib\\mpl-data\\stylelib\\_classic_test.mplstyle: \nThe verbose.level rcparam was deprecated in Matplotlib 3.1 and will be removed in 3.3.\nIn C:\\Users\\bbate\\Miniconda3\\envs\\keras-gpu-3\\lib\\site-packages\\matplotlib\\mpl-data\\stylelib\\_classic_test.mplstyle: \nThe verbose.fileo rcparam was deprecated in Matplotlib 3.1 and will be removed in 3.3.\n"
],
[
"#\n# utility function for plotting histograms in a grid\n#\ndef plot_histogram_grid(df, variables, n_rows, n_cols, bins):\n fig = plt.figure(figsize = (11, 11))\n for i, var_name in enumerate(variables):\n ax = fig.add_subplot(n_rows, n_cols, i + 1)\n#\n# for some variables there are relatively few unique values, so we'll\n# adjust the histogram appearance accordingly to avoid \"gaps\" in the plots\n# \n if len(np.unique(df[var_name])) <= bins:\n use_bins = len(np.unique(df[var_name]))\n else:\n use_bins = bins\n# \n df[var_name].hist(bins = use_bins, ax = ax)\n ax.set_title(var_name)\n fig.tight_layout()\n plt.show()\n#\n# utility function for plotting scatterplots in a grid\n#\ndef plot_scatter_grid(df, y_cols, x_var, rows, cols):\n fig, ax = plt.subplots(rows, cols, figsize = (15, 15))\n fig.tight_layout(pad = 2)\n for row in range(rows):\n for col in range(cols):\n if (row * (cols - 1) + col) <= (len(y_cols) - 1):\n plot_col = y_cols[row * (cols - 1) + col]\n ax[row, col].scatter(my_data[x_var], my_data.loc[:, plot_col])\n ax[row, col].set_title(plot_col + ' vs. ' + x_var)\n else:\n fig.delaxes(ax[row, col])\n plt.show()\n# ",
"_____no_output_____"
],
[
"#\n# load data\n#\nmy_data = pd.read_csv('Datasets\\\\CO_sensors.csv')\nmy_data.head()\n#",
"_____no_output_____"
],
[
"#\nmy_data.describe().T\n#",
"_____no_output_____"
],
[
"#\nplot_histogram_grid(my_data, my_data.columns[1:], 7, 3, 25)\n#",
"_____no_output_____"
],
[
"#\nplt.figure(figsize = (13, 11))\nsns.pairplot(my_data.iloc[:, :5])\n#",
"_____no_output_____"
],
[
"#\nplot_cols = list(my_data.loc[:, 'R1 (MOhm)': ].columns)\nplot_scatter_grid(my_data, plot_cols, 'Time (s)', 5, 4)\n#",
"_____no_output_____"
],
[
"#\nfig, ax = plt.subplots(figsize = (11, 9))\nax.scatter(my_data.loc[(my_data['Time (s)'] > 40000) &\n (my_data['Time (s)'] < 45000), 'Time (s)'],\n my_data.loc[(my_data['Time (s)'] > 40000) &\n (my_data['Time (s)'] < 45000), 'R13 (MOhm)'])\nax.set_title('R13 vs. time')\nplt.show()\n#",
"_____no_output_____"
],
[
"#\nplt.figure(figsize = (13, 11))\nsns.heatmap(my_data.loc[:, 'R1 (MOhm)':].corr())\n#",
"_____no_output_____"
],
[
"#\n# we see two or three groups in the sensor correlations\n#\n# the data description \n# (https://archive.ics.uci.edu/ml/datasets/Gas+sensor+array+temperature+modulation)\n# says there are two kinds of sensors:\n# \"(7 units of TGS 3870-A04) and FIS (7 units of SB-500-12)\"\n# \n# let's investigate the behavior of these vs. CO and humidity\n#\nSensor_CO_corr = pd.concat([my_data.loc[:, ['CO (ppm)', 'Humidity (%r.h.)']],\n my_data.loc[:, 'R1 (MOhm)':]], axis = 1).corr().loc['CO (ppm)':'Humidity (%r.h.)', 'R1 (MOhm)':]\n#\n# plot the CO correlations\n#\nfig, ax = plt.subplots(figsize = (11, 11))\nax.bar(x = Sensor_CO_corr.columns, height = Sensor_CO_corr.loc['CO (ppm)'])\nax.xaxis.set_ticks_position('top')\nplt.xticks(rotation = 90)\nplt.show()\n#\n# plot the humidity correlations\n#\nfig, ax = plt.subplots(figsize = (11, 11))\nax.bar(x = Sensor_CO_corr.columns, height = Sensor_CO_corr.loc['Humidity (%r.h.)'])\nax.xaxis.set_ticks_position('top')\nplt.xticks(rotation = 90)\nplt.show()\n#",
"_____no_output_____"
],
[
"#\n# the sensors are impacted by humidity\n# here we know the humidity, but in the field we would not necessarily\n# the fact that the two sensors have very different CO / humidity sensitivity may be usefule\n# we see the sensor data are all skewed\n# let's add sqrt() transform (since there are 0s and near 0s in those data) to all sensors\n# then fit a linaer model\n#\nsensor_cols = list(my_data.loc[:, 'R1 (MOhm)': ].columns)\nfor i in range(len(sensor_cols)):\n my_data['sqrt_' + sensor_cols[i]] = np.sqrt(my_data[sensor_cols[i]])\n#",
"_____no_output_____"
],
[
"#\nmodel_X = my_data.drop(columns = ['Time (s)', 'Temperature (C)', 'Humidity (%r.h.)', 'CO (ppm)'])\nmodel_y = my_data.loc[:, 'CO (ppm)']\nmy_model = OLS()\nmy_model.fit(model_X, model_y)\npreds = my_model.predict(model_X)\nresiduals = preds - model_y\nfig, ax = plt.subplots(figsize = (9, 9))\nax.hist(residuals, bins = 50)\nplt.show()\n#\nfig, ax = plt.subplots(figsize = (9, 9))\nax.scatter(preds, model_y)\nax.plot([0, 20], [0, 20], color = 'black', lw = 1)\nax.set_xlim(0, 20)\nax.set_ylim(0, 20)\nplt.show()\n#",
"_____no_output_____"
],
[
"#\n# scale the data\n# \nscaler = StandardScaler()\nmodel_X = scaler.fit_transform(model_X)\n#\n# fit a non-linear model\n#\nRF_model = RandomForestRegressor(n_estimators = 250)\nRF_model.fit(model_X, model_y)\n#\n# look at the residuals\n#\npreds = RF_model.predict(model_X)\nresiduals = preds - model_y\n#\nfig, ax = plt.subplots(figsize = (9, 9))\nax.hist(residuals, bins = 50)\nplt.show()\n#\n# compare predicted to actual\n#\nfig, ax = plt.subplots(figsize = (9, 9))\nax.scatter(preds, model_y)\nax.plot([0, 20], [0, 20], color = 'black', lw = 1)\nax.set_xlim(0, 20)\nax.set_ylim(0, 20)\nplt.show()\n#",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac2356002d16c1d6a4696c79734e04320462ff2
| 24,066 |
ipynb
|
Jupyter Notebook
|
DynamicProgramming/extra/Consumption Equivalent Variation (CEV).ipynb
|
ChristofferJWeissert/ConsumptionSavingNotebooks
|
1c5cf6c1681d53fc2c0a67b4755a48ade40b31c7
|
[
"MIT"
] | 1 |
2021-11-07T23:37:25.000Z
|
2021-11-07T23:37:25.000Z
|
DynamicProgramming/extra/Consumption Equivalent Variation (CEV).ipynb
|
ChristofferJWeissert/ConsumptionSavingNotebooks
|
1c5cf6c1681d53fc2c0a67b4755a48ade40b31c7
|
[
"MIT"
] | null | null | null |
DynamicProgramming/extra/Consumption Equivalent Variation (CEV).ipynb
|
ChristofferJWeissert/ConsumptionSavingNotebooks
|
1c5cf6c1681d53fc2c0a67b4755a48ade40b31c7
|
[
"MIT"
] | null | null | null | 83.853659 | 15,508 | 0.827433 |
[
[
[
"# Consumption Equivalent Variation (CEV)",
"_____no_output_____"
],
[
"1. Use the model in the **ConsumptionSaving.pdf** slides and solve it using **egm**\n2. This notebooks estimates the *cost of income risk* through the Consumption Equivalent Variation (CEV) \n\nWe will here focus on the cost of income risk, but the CEV can be used to estimate the value of many different aspects of an economy. For eaxample, [Oswald (2019)](http://qeconomics.org/ojs/index.php/qe/article/view/701 \"The option value of homeownership\") estimated the option value of homeownership using a similar strategy as described below.\n\n**Goal:** To estimate the CEV by comparing the *value of life* under the baseline economy and an alternative economy with higher permanent income shock variance along with a consumption compensation.\n\n**Value of Life:** \n1. Let the *utility function* be a generalized version of the CRRA utility function with $\\delta$ included as a potential consumption compensation. \n\\begin{equation}\n{u}(c,\\delta) = \\frac{(c\\cdot(1+\\delta))^{1-\\rho}}{1-\\rho}\n\\end{equation}\n2. Let the *value of life* of a synthetic consumer $s$ for a given level of permanent income shock varaince, $\\sigma_{\\psi}$, and $\\delta$, be\n\\begin{equation}\n{V}_{s}({\\sigma}_{\\psi},\\delta)=\\sum_{t=1}^T \\beta ^{t-1}{u}({c}^{\\star}_{s,t}({\\sigma}_{\\psi},\\delta),\\delta)\n\\end{equation}\nwhere ${c}^{\\star}_{s,t}({\\sigma}_{\\psi},\\delta)$ is optimal consumption found using the **egm**. The value of life is calcualted in the function `value_of_life(.)` defined below.\n\n**Consumption Equivalent Variation:** \n1. Let $V=\\frac{1}{S}\\sum_{s=1}^SV(\\sigma_{\\psi},0)$ be the average value of life under the *baseline* economy with the baseline value of $\\sigma_{\\psi}$ and $\\delta=0$.\n2. Let $\\tilde{V}(\\delta)=\\frac{1}{S}\\sum_{s=1}^SV(\\tilde{\\sigma}_{\\psi},\\delta)$ be the average value of life under the *alternative* economy with $\\tilde{\\sigma}_{\\psi} > \\sigma_{\\psi}$.\n\nThe CEV is the value of $\\delta$ that sets $V=\\tilde{V}(\\delta)$ and can be estimated as \n\\begin{equation}\n\\hat{\\delta} = \\arg\\min_\\delta (V-\\tilde{V}(\\delta))^2\n\\end{equation}\nwhere the objective function is calculated in `obj_func_cev(.)` defined below.",
"_____no_output_____"
],
[
"# Setup",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nimport time\nimport numpy as np\nimport scipy.optimize as optimize\n\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\n\nimport sys\nsys.path.append('../')\n\nimport ConsumptionSavingModel as csm\nfrom ConsumptionSavingModel import ConsumptionSavingModelClass",
"_____no_output_____"
]
],
[
[
"# Setup the baseline model and the alternative model",
"_____no_output_____"
]
],
[
[
"par = {'simT':40}\nmodel = ConsumptionSavingModelClass(name='baseline',solmethod='egm',**par)\n\n# increase the permanent income with 100 percent and allow for consumption compensation\npar_cev = {'sigma_psi':0.2,'do_cev':1,'simT':40}\nmodel_cev = ConsumptionSavingModelClass(name='cev',solmethod='egm',**par_cev)",
"_____no_output_____"
],
[
"model.solve()\nmodel.simulate()",
"model solved in 6.1 secs\nmodel simulated in 5.0 secs\n"
]
],
[
[
"# Average value of life ",
"_____no_output_____"
],
[
"**Define Functions:** value of life and objective function used to estimate \"cev\"",
"_____no_output_____"
]
],
[
[
"def value_of_life(model):\n \n # utility associated with consumption for all N and T\n util = csm.utility(model.sim.c,model.par)\n \n # discounted sum of utility\n disc = np.ones(model.par.simT)\n disc[1:] = np.cumprod(np.ones(model.par.simT-1)*model.par.beta)\n \n disc_util = np.sum(disc*util,axis=1)\n \n # return average of discounted sum of utility\n return np.mean(disc_util) \n\ndef obj_func_cev(theta,model_cev,value_of_life_baseline):\n \n # update cev-parameter\n setattr(model_cev.par,'cev',theta)\n \n # re-solve and simulate alternative model\n model_cev.solve(do_print=False)\n model_cev.simulate(do_print=False)\n \n # calculate value of life\n value_of_life_cev = value_of_life(model_cev)\n \n # return squared difference to baseline\n return (value_of_life_cev - value_of_life_baseline)*(value_of_life_cev - value_of_life_baseline)\n",
"_____no_output_____"
]
],
[
[
"**Baseline value of life and objective function at cev=0**",
"_____no_output_____"
]
],
[
[
"value_of_life_baseline = value_of_life(model)\nobj_func_cev(0.0,model_cev,value_of_life_baseline)",
"_____no_output_____"
],
[
"# plot the objective function\ngrid_cev = np.linspace(0.0,0.2,20)\ngrid_obj = np.empty(grid_cev.size)\n\nfor j,cev in enumerate(grid_cev):\n grid_obj[j] = obj_func_cev(cev,model_cev,value_of_life_baseline)\n \nplt.plot(grid_cev,grid_obj); ",
"_____no_output_____"
]
],
[
[
"# Estimate the Consumption Equivalent Variation (CEV)",
"_____no_output_____"
]
],
[
[
"res = optimize.minimize_scalar(obj_func_cev, bounds=[-0.01,0.5], \n args=(model_cev,value_of_life_baseline),method='golden')\nres",
"_____no_output_____"
]
],
[
[
"The estimated CEV suggests that consumers would be indifferent between the baseline economy and a 100% increase in the permanent income shock variance along with a 10% increase in consumption in all periods.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4ac240bf208a7abd3986b1172f1edc735752df13
| 4,207 |
ipynb
|
Jupyter Notebook
|
authorize.ipynb
|
seek4science/seekAPIexamples
|
72db52e055ff718706cc019ca9bee0cd95e2dabc
|
[
"BSD-3-Clause"
] | null | null | null |
authorize.ipynb
|
seek4science/seekAPIexamples
|
72db52e055ff718706cc019ca9bee0cd95e2dabc
|
[
"BSD-3-Clause"
] | null | null | null |
authorize.ipynb
|
seek4science/seekAPIexamples
|
72db52e055ff718706cc019ca9bee0cd95e2dabc
|
[
"BSD-3-Clause"
] | 4 |
2018-10-18T10:33:48.000Z
|
2020-02-26T09:56:31.000Z
| 26.796178 | 132 | 0.47112 |
[
[
[
"Import the libraries so that they can be used within the notebook\n\n* **requests** is used to make HTTP calls\n* **json** is used to encode and decode strings into JSON\n* **string** is used to perform text manipulation and checking\n* **getpass** is used to do non-echoing password input",
"_____no_output_____"
]
],
[
[
"import requests\nimport json\nimport string\nimport getpass",
"_____no_output_____"
]
],
[
[
"The **base_url** holds the URL to the SEEK instance that will be used in the notebook\n\n**headers** holds the HTTP headers that will be sent with every HTTP call\n\n* **Content-type: application/vnd.api+json** - indicates that any data sent will be in JSON API format\n* **Accept: application/vnd.api+json** - indicates that the notebook expects any data returned to be in JSON API format\n* **Accept-Charset: ISO-8859-1** - indicates that the notebook expects any text returned to be in ISO-8859-1 character set",
"_____no_output_____"
]
],
[
[
"base_url = 'http://www.fairdomhub.org/'\n\nheaders = {\"Content-type\": \"application/vnd.api+json\",\n \"Accept\": \"application/vnd.api+json\",\n \"Accept-Charset\": \"ISO-8859-1\"}",
"_____no_output_____"
]
],
[
[
"Create a **requests** HTTP **Session**. A **Session** has re-usable settings such as **headers**\n\nThe **authorization** is username and password. The user is prompted for this information.",
"_____no_output_____"
]
],
[
[
"session = requests.Session()\nsession.headers.update(headers)\nsession.auth = (input('Username:'), getpass.getpass('Password'))",
"_____no_output_____"
]
],
[
[
"Perform a test **GET** to ensure the username and password worked",
"_____no_output_____"
]
],
[
[
"r = session.get(base_url + 'models/311')\n\nr.raise_for_status()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4ac24ed58f87de97823f862c1506be8c52a08ca8
| 47,141 |
ipynb
|
Jupyter Notebook
|
Modelvgg16 .ipynb
|
BishalLakha/-Emotion-Recognizer
|
c5348bb0e6f289bb67dcbf7da4138defd7f1363f
|
[
"MIT"
] | 1 |
2018-06-06T04:46:34.000Z
|
2018-06-06T04:46:34.000Z
|
Modelvgg16 .ipynb
|
BishalLakha/-Emotion-Recognizer
|
c5348bb0e6f289bb67dcbf7da4138defd7f1363f
|
[
"MIT"
] | null | null | null |
Modelvgg16 .ipynb
|
BishalLakha/-Emotion-Recognizer
|
c5348bb0e6f289bb67dcbf7da4138defd7f1363f
|
[
"MIT"
] | null | null | null | 77.919008 | 15,926 | 0.768015 |
[
[
[
"# Facial Expression Recognizer",
"_____no_output_____"
]
],
[
[
"#The OS module in Python provides a way of using operating system dependent functionality. \n#import os\n# For array manipulation\nimport numpy as np \n#For importing data from csv and other manipulation\nimport pandas as pd\n\n#For displaying images\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n%matplotlib inline\n\n#For displaying graph\n#import seaborn as sns\n\n#For constructing and handling neural network\nimport tensorflow as tf\n\n#Constants\nLEARNING_RATE = 1e-4\nTRAINING_ITERATIONS = 10000 #increase iteration to improve accuracy \nDROPOUT = 0.5\nBATCH_SIZE = 50\nIMAGE_TO_DISPLAY = 3\nVALIDATION_SIZE = 2000",
"_____no_output_____"
],
[
"#Reading data from csv file\ndata = pd.read_csv('Train_updated_six_emotion.csv')\n\n#Seperating images data from labels ie emotion\nimages = data.iloc[:,1:].values\nimages = images.astype(np.float)\n\n#Normalizaton : convert from [0:255] => [0.0:1.0]\nimages = np.multiply(images, 1.0 / 255.0)\n\nimage_size = images.shape[1]\nimage_width = image_height = 48\n\n",
"_____no_output_____"
],
[
"#Displaying an image from 20K images\ndef display(img):\n #Reshaping,(1*2304) pixels into (48*48)\n one_image = img.reshape(image_width,image_height)\n plt.axis('off')\n #Show image\n plt.imshow(one_image, cmap=cm.binary) \ndisplay(images[IMAGE_TO_DISPLAY])",
"_____no_output_____"
],
[
"#Creating an array of emotion labels using dataframe 'data'\nlabels_flat = data[['label']].values.ravel()\n\nlabels_count = np.unique(labels_flat).shape[0]\n\n# convert class labels from scalars to one-hot vectors\n# 0 => [1 0 0]\n# 1 => [0 1 0]\n# 2 => [0 0 1]\ndef dense_to_one_hot(labels_dense, num_classes = 7):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\nlabels = dense_to_one_hot(labels_flat, labels_count)\nlabels = labels.astype(np.uint8)\n\n#Printing example hot-dense label\nprint ('labels[{0}] => {1}'.format(IMAGE_TO_DISPLAY,labels[IMAGE_TO_DISPLAY]))",
"labels[3] => [0 0 0 0 1 0]\n"
],
[
"#Using data for training & cross validation\nvalidation_images = images[:2000]\nvalidation_labels = labels[:2000]\n\ntrain_images = images[2000:]\ntrain_labels = labels[2000:]\n\n\n",
"_____no_output_____"
]
],
[
[
"#Next is the neural network structure.\n#Weights and biases are created.\n#The weights should be initialised with a small a amount of noise\n#for symmetry breaking, and to prevent 0 gradients. Since we are using\n#rectified neurones (ones that contain rectifier function *f(x)=max(0,x)*),\n#we initialise them with a slightly positive initial bias to avoid \"dead neurones.",
"_____no_output_____"
]
],
[
[
"# initialization of weight\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n\n# We use zero padded convolution neural network with a stride of 1 and the size of the output is same as that of input.\n# The convolution layer finds the features in the data the number of filter denoting the number of features to be detected.\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n# Pooling downsamples the data. 2x2 max-pooling splits the image into square 2-pixel blocks and only keeps the maximum value \n# for each of the blocks. \ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')",
"_____no_output_____"
],
[
"# images\nx = tf.placeholder('float', shape=[None, image_size])\n\n# labels (0, 1 or 2)\ny_ = tf.placeholder('float', shape=[None, labels_count])\n",
"_____no_output_____"
],
[
"BATCH_SIZE",
"_____no_output_____"
]
],
[
[
"### VGG-16 architecture",
"_____no_output_____"
]
],
[
[
"W_conv1 = weight_variable([3, 3, 1, 8])\nb_conv1 = bias_variable([8])\n\n\n# we reshape the input data to a 4d tensor, with the first dimension corresponding to the number of images,\n# second and third - to image width and height, and the final dimension - to the number of colour channels.\n# (20000,2304) => (20000,48,48,1)\nimage = tf.reshape(x, [-1,image_width , image_height,1])\nprint (image.get_shape()) \n\nh_conv1 = tf.nn.relu(conv2d(image, W_conv1) + b_conv1)\nprint (h_conv1)\n\n\n\nW_conv2 = weight_variable([3, 3, 8, 8])\nb_conv2 = bias_variable([8])\nh_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)\n\nprint (h_conv2)\n# pooling reduces the size of the output from 48x48 to 24x24.\nh_pool1 = max_pool_2x2(h_conv2)\n#print (h_pool1.get_shape()) => (20000, 24, 24, 8)\n\n# Prepare for visualization\n# display 8 features in 4 by 2 grid\nlayer1 = tf.reshape(h_conv1, (-1, image_height, image_width, 4 ,2)) \n\n# reorder so the channels are in the first dimension, x and y follow.\nlayer1 = tf.transpose(layer1, (0, 3, 1, 4,2))\nlayer1 = tf.reshape(layer1, (-1, image_height*4, image_width*2))\n\n\n\n# The second layer has 16 features for each 5x5 patch. Its weight tensor has a shape of [5, 5, 8, 16].\n# The first two dimensions are the patch size. the next is the number of input channels (8 channels correspond to 8\n# features that we got from previous convolutional layer).\n\nW_conv3 = weight_variable([3, 3, 8, 16])\nb_conv3 = bias_variable([16])\nh_conv3 = tf.nn.relu(conv2d(h_pool1, W_conv3) + b_conv3)\nprint(h_conv3)\n\nW_conv4 = weight_variable([3, 3, 16, 16])\nb_conv4 = bias_variable([16])\nh_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)\nprint(h_conv4)\n\nh_pool2 = max_pool_2x2(h_conv4)\n#print (h_pool2.get_shape()) => (20000, 12, 12, 16)\n\n# The third layer has 16 features for each 5x5 patch. Its weight tensor has a shape of [5, 5, 16, 32].\n# The first two dimensions are the patch size. the next is the number of input channels (16 channels correspond to 16\n# features that we got from previous convolutional layer)\nW_conv5 = weight_variable([3, 3, 16, 32])\nb_conv5 = bias_variable([32])\nh_conv5 = tf.nn.relu(conv2d(h_pool2, W_conv5) + b_conv5)\nprint(h_conv5)\n\n\nW_conv6 = weight_variable([3, 3, 32, 32])\nb_conv6 = bias_variable([32])\nh_conv6 = tf.nn.relu(conv2d(h_conv5, W_conv6) + b_conv6)\nprint(h_conv6)\n\nW_conv7 = weight_variable([3, 3, 32, 32])\nb_conv7 = bias_variable([32])\nh_conv7 = tf.nn.relu(conv2d(h_conv6, W_conv7) + b_conv7)\nprint(h_conv7)\n\nh_pool3 = max_pool_2x2(h_conv7)\n#print (h_pool2.get_shape()) => (20000, 6, 6, 32)\n\n\nW_conv8 = weight_variable([3, 3, 32, 32])\nb_conv8 = bias_variable([32])\nh_conv8 = tf.nn.relu(conv2d(h_pool3, W_conv8) + b_conv8)\nprint(h_conv8)\n\n\nW_conv9 = weight_variable([3, 3, 32, 32])\nb_conv9 = bias_variable([32])\nh_conv9 = tf.nn.relu(conv2d(h_conv8, W_conv9) + b_conv9)\nprint(h_conv9)\n\nW_conv10 = weight_variable([3, 3, 32, 32])\nb_conv10 = bias_variable([32])\nh_conv10 = tf.nn.relu(conv2d(h_conv9, W_conv10) + b_conv10)\nprint(h_conv10)\n\n\nh_pool4 = max_pool_2x2(h_conv10)\nprint (h_pool4.get_shape())\n# Now that the image size is reduced to 3x3, we add a Fully_Connected_layer) with 1024 neurones\n# to allow processing on the entire image (each of the neurons of the fully connected layer is \n# connected to all the activations/outpus of the previous layer)\n\nW_conv11 = weight_variable([3, 3, 32, 32])\nb_conv11 = bias_variable([32])\nh_conv11 = tf.nn.relu(conv2d(h_pool4, W_conv11) + b_conv11)\nprint(h_conv11)\n\nW_conv12 = weight_variable([3, 3, 32, 32])\nb_conv12 = bias_variable([32])\nh_conv12 = tf.nn.relu(conv2d(h_conv11, W_conv12) + b_conv12)\nprint(h_conv12)\n\nW_conv13 = weight_variable([3, 3, 32, 32])\nb_conv13 = bias_variable([32])\nh_conv13 = tf.nn.relu(conv2d(h_conv12, W_conv13) + b_conv13)\nprint(h_conv13)\n\n\n\n# densely connected layer\nW_fc1 = weight_variable([3 * 3 * 32, 512])\nb_fc1 = bias_variable([512])\n\n# (20000, 6, 6, 32) => (20000, 1152 )\nh_pool2_flat = tf.reshape(h_conv13, [-1, 3*3*32])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\nprint (h_fc1.get_shape()) # => (20000, 1024)\n\nW_fc2 = weight_variable([512, 512])\nb_fc2 = bias_variable([512])\n\n\nh_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)\nprint (h_fc2.get_shape()) # => (20000, 1024)\n\nW_fc3 = weight_variable([512, 512])\nb_fc3 = bias_variable([512])\n\n\nh_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)\nprint (h_fc3.get_shape()) # => (20000, 1024)\n\n# To prevent overfitting, we apply dropout before the readout layer.\n# Dropout removes some nodes from the network at each training stage. Each of the nodes is either kept in the\n# network with probability (keep_prob) or dropped with probability (1 - keep_prob).After the training stage \n# is over the nodes are returned to the NN with their original weights.\nkeep_prob = tf.placeholder('float')\nh_fc1_drop = tf.nn.dropout(h_fc2, keep_prob)\n\n# readout layer 1024*3\nW_fc4 = weight_variable([512, labels_count])\nb_fc4 = bias_variable([labels_count])\n\n# Finally, we add a softmax layer\ny = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc4) + b_fc4)\n#print (y.get_shape()) # => (20000, 3)\n\n\ncross_entropy = -tf.reduce_sum(y_*tf.log(y))\ntrain_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\npredict = tf.argmax(y,1)\n\n",
"(?, 48, 48, 1)\nTensor(\"Relu:0\", shape=(?, 48, 48, 8), dtype=float32)\nTensor(\"Relu_1:0\", shape=(?, 48, 48, 8), dtype=float32)\nTensor(\"Relu_2:0\", shape=(?, 24, 24, 16), dtype=float32)\nTensor(\"Relu_3:0\", shape=(?, 24, 24, 16), dtype=float32)\nTensor(\"Relu_4:0\", shape=(?, 12, 12, 32), dtype=float32)\nTensor(\"Relu_5:0\", shape=(?, 12, 12, 32), dtype=float32)\nTensor(\"Relu_6:0\", shape=(?, 12, 12, 32), dtype=float32)\nTensor(\"Relu_7:0\", shape=(?, 6, 6, 32), dtype=float32)\nTensor(\"Relu_8:0\", shape=(?, 6, 6, 32), dtype=float32)\nTensor(\"Relu_9:0\", shape=(?, 6, 6, 32), dtype=float32)\n(?, 3, 3, 32)\nTensor(\"Relu_10:0\", shape=(?, 3, 3, 32), dtype=float32)\nTensor(\"Relu_11:0\", shape=(?, 3, 3, 32), dtype=float32)\nTensor(\"Relu_12:0\", shape=(?, 3, 3, 32), dtype=float32)\n(?, 512)\n(?, 512)\n(?, 512)\n"
],
[
"epochs_completed = 0\nindex_in_epoch = 0\nnum_examples = train_images.shape[0]\n\n# serve data by batches\ndef next_batch(batch_size):\n \n global train_images\n global train_labels\n global index_in_epoch\n global epochs_completed\n \n start = index_in_epoch\n index_in_epoch += batch_size\n \n # when all trainig data have been already used, it is reorder randomly \n if index_in_epoch > num_examples:\n # finished epoch\n epochs_completed += 1\n # shuffle the data\n perm = np.arange(num_examples)\n np.random.shuffle(perm)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n # start next epoch\n start = 0\n index_in_epoch = batch_size\n assert batch_size <= num_examples\n end = index_in_epoch\n return train_images[start:end], train_labels[start:end]",
"_____no_output_____"
],
[
"with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # visualisation variables\n train_accuracies = []\n validation_accuracies = []\n x_range = []\n\n display_step=1\n\n for i in range(TRAINING_ITERATIONS):\n\n #get new batch\n batch_xs, batch_ys = next_batch(BATCH_SIZE) \n\n # check progress on every 1st,2nd,...,10th,20th,...,100th... step\n if i%display_step == 0 or (i+1) == TRAINING_ITERATIONS:\n\n train_accuracy = accuracy.eval(feed_dict={x:batch_xs, \n y_: batch_ys, \n keep_prob: 1.0}) \n if(VALIDATION_SIZE):\n validation_accuracy = accuracy.eval(feed_dict={ x: validation_images[0:BATCH_SIZE], \n y_: validation_labels[0:BATCH_SIZE], \n keep_prob: 1.0}) \n print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d'%(train_accuracy, validation_accuracy, i))\n\n validation_accuracies.append(validation_accuracy)\n\n else:\n print('training_accuracy => %.4f for step %d'%(train_accuracy, i))\n train_accuracies.append(train_accuracy)\n x_range.append(i)\n\n # increase display_step\n if i%(display_step*10) == 0 and i:\n display_step *= 10\n # train on batch\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: DROPOUT})\n \n \n \n if(VALIDATION_SIZE):\n validation_accuracy = accuracy.eval(feed_dict={x: validation_images, \n y_: validation_labels, \n keep_prob: 1.0})\n print('validation_accuracy => %.4f'%validation_accuracy)\n plt.plot(x_range, train_accuracies,'-b', label='Training')\n plt.plot(x_range, validation_accuracies,'-g', label='Validation')\n plt.legend(loc='lower right', frameon=False)\n plt.ylim(ymax = 1.1, ymin = 0.0)\n plt.ylabel('accuracy')\n plt.xlabel('step')\n plt.show()\n\n \n \n\n \n\n \n",
"training_accuracy / validation_accuracy => 0.18 / 0.20 for step 0\ntraining_accuracy / validation_accuracy => 0.18 / 0.20 for step 1\ntraining_accuracy / validation_accuracy => 0.22 / 0.20 for step 2\ntraining_accuracy / validation_accuracy => 0.12 / 0.20 for step 3\ntraining_accuracy / validation_accuracy => 0.20 / 0.20 for step 4\ntraining_accuracy / validation_accuracy => 0.16 / 0.20 for step 5\ntraining_accuracy / validation_accuracy => 0.26 / 0.20 for step 6\ntraining_accuracy / validation_accuracy => 0.12 / 0.20 for step 7\ntraining_accuracy / validation_accuracy => 0.48 / 0.34 for step 8\ntraining_accuracy / validation_accuracy => 0.30 / 0.36 for step 9\ntraining_accuracy / validation_accuracy => 0.30 / 0.36 for step 10\ntraining_accuracy / validation_accuracy => 0.22 / 0.20 for step 20\ntraining_accuracy / validation_accuracy => 0.24 / 0.20 for step 30\ntraining_accuracy / validation_accuracy => 0.22 / 0.20 for step 40\ntraining_accuracy / validation_accuracy => 0.18 / 0.20 for step 50\ntraining_accuracy / validation_accuracy => 0.20 / 0.20 for step 60\ntraining_accuracy / validation_accuracy => 0.14 / 0.20 for step 70\ntraining_accuracy / validation_accuracy => 0.24 / 0.20 for step 80\ntraining_accuracy / validation_accuracy => 0.16 / 0.20 for step 90\ntraining_accuracy / validation_accuracy => 0.28 / 0.20 for step 100\ntraining_accuracy / validation_accuracy => 0.12 / 0.20 for step 200\ntraining_accuracy / validation_accuracy => 0.32 / 0.20 for step 300\ntraining_accuracy / validation_accuracy => 0.12 / 0.18 for step 400\ntraining_accuracy / validation_accuracy => 0.32 / 0.18 for step 500\ntraining_accuracy / validation_accuracy => 0.32 / 0.26 for step 600\ntraining_accuracy / validation_accuracy => 0.12 / 0.30 for step 700\ntraining_accuracy / validation_accuracy => 0.16 / 0.22 for step 800\ntraining_accuracy / validation_accuracy => 0.32 / 0.28 for step 900\ntraining_accuracy / validation_accuracy => 0.44 / 0.26 for step 1000\ntraining_accuracy / validation_accuracy => 0.38 / 0.34 for step 2000\ntraining_accuracy / validation_accuracy => 0.34 / 0.34 for step 3000\ntraining_accuracy / validation_accuracy => 0.40 / 0.34 for step 4000\ntraining_accuracy / validation_accuracy => 0.44 / 0.34 for step 4999\nvalidation_accuracy => 0.3475\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4ac25195e78b66d5d29c78646ead0f8839cd2707
| 2,734 |
ipynb
|
Jupyter Notebook
|
Cap03/range.ipynb
|
carlos-freitas-gitHub/python-analytics
|
4b55cb2acb3383ded700596c5a856b7e2124f2da
|
[
"Apache-2.0"
] | 1 |
2020-07-31T20:31:19.000Z
|
2020-07-31T20:31:19.000Z
|
Cap03/range.ipynb
|
carlos-freitas-gitHub/python-analytics
|
4b55cb2acb3383ded700596c5a856b7e2124f2da
|
[
"Apache-2.0"
] | null | null | null |
Cap03/range.ipynb
|
carlos-freitas-gitHub/python-analytics
|
4b55cb2acb3383ded700596c5a856b7e2124f2da
|
[
"Apache-2.0"
] | null | null | null | 18.348993 | 113 | 0.443307 |
[
[
[
"## Range",
"_____no_output_____"
]
],
[
[
"#range inicio, fim, intervalo 2\nfor i in range(50, 101, 2):\n print(i, ' ', end='')",
"50 52 54 56 58 60 62 64 66 68 70 72 74 76 78 80 82 84 86 88 90 92 94 96 98 100 "
],
[
"# range com 2 parametros\nfor i in range(3, 6):\n print(i, ' ', end='')",
"3 4 5 "
],
[
"# range regressivo, inicio, fim, passo\nfor i in range(0, -20, -2):\n print(i, '', end='')",
"0 -2 -4 -6 -8 -10 -12 -14 -16 -18 "
],
[
"# range em cumprimento de objeto\nlista = ['Morango', 'Banana', 'Maça', 'Uva']\ntamanho = len(lista)\nfor i in range(0, tamanho):\n print('Item(', str(i), ')=>', lista[i])",
"Item( 0 )=> Morango\nItem( 1 )=> Banana\nItem( 2 )=> Maça\nItem( 3 )=> Uva\n"
],
[
"# tudo em python é um objeto\ntype(range(0 ,3))",
"_____no_output_____"
]
],
[
[
"## Fim",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4ac259285c1cdbb9d8d2f147f27f5bb012d9b7b8
| 98,457 |
ipynb
|
Jupyter Notebook
|
notebooks/.ipynb_checkpoints/Coursework_4_part7_rnn_batch_norm-checkpoint.ipynb
|
pligor/msd-music-genre-classification
|
8988ec6e8b15927a52d772fc04540a7c334a5cd4
|
[
"MIT"
] | 5 |
2018-02-16T09:24:19.000Z
|
2021-04-16T01:08:10.000Z
|
notebooks/.ipynb_checkpoints/Coursework_4_part7_rnn_batch_norm-checkpoint.ipynb
|
pligor/msd-music-genre-classification
|
8988ec6e8b15927a52d772fc04540a7c334a5cd4
|
[
"MIT"
] | null | null | null |
notebooks/.ipynb_checkpoints/Coursework_4_part7_rnn_batch_norm-checkpoint.ipynb
|
pligor/msd-music-genre-classification
|
8988ec6e8b15927a52d772fc04540a7c334a5cd4
|
[
"MIT"
] | 2 |
2019-03-15T07:49:16.000Z
|
2019-03-30T09:33:10.000Z
| 185.418079 | 39,450 | 0.872472 |
[
[
[
"2017\n\nMachine Learning Practical\n\nUniversity of Edinburgh\n\nGeorgios Pligoropoulos - s1687568\n\nCoursework 4 (part 7)",
"_____no_output_____"
],
[
"### Imports, Inits, and helper functions",
"_____no_output_____"
]
],
[
[
"jupyterNotebookEnabled = True\nplotting = True\ncoursework, part = 4, 7\nsaving = True\n\nif jupyterNotebookEnabled:\n #%load_ext autoreload\n %reload_ext autoreload\n %autoreload 2",
"_____no_output_____"
],
[
"import sys, os\nmlpdir = os.path.expanduser(\n '~/[email protected]/msc_Artificial_Intelligence/mlp_Machine_Learning_Practical/mlpractical'\n)\nsys.path.append(mlpdir)",
"_____no_output_____"
],
[
"from collections import OrderedDict\nfrom __future__ import division\nimport skopt\nfrom mylibs.jupyter_notebook_helper import show_graph\nimport datetime\nimport os\nimport time\nimport tensorflow as tf\nimport numpy as np\nfrom mlp.data_providers import MSD10GenreDataProvider, MSD25GenreDataProvider,\\\n MSD10Genre_Autoencoder_DataProvider, MSD10Genre_StackedAutoEncoderDataProvider\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom mylibs.batch_norm import fully_connected_layer_with_batch_norm_and_l2\nfrom mylibs.stacked_autoencoder_pretrainer import \\\n constructModelFromPretrainedByAutoEncoderStack,\\\n buildGraphOfStackedAutoencoder, executeNonLinearAutoencoder\n \nfrom mylibs.jupyter_notebook_helper import getRunTime, getTrainWriter, getValidWriter,\\\n plotStats, initStats, gatherStats\n \nfrom mylibs.tf_helper import tfRMSE, tfMSE, fully_connected_layer\n #trainEpoch, validateEpoch\n\nfrom mylibs.py_helper import merge_dicts\n\nfrom mylibs.dropout_helper import constructProbs\n\nfrom mylibs.batch_norm import batchNormWrapper_byExponentialMovingAvg,\\\n fully_connected_layer_with_batch_norm\n \nimport pickle\nfrom skopt.plots import plot_convergence\nfrom mylibs.jupyter_notebook_helper import DynStats\nimport operator\nfrom skopt.space.space import Integer, Categorical\nfrom skopt import gp_minimize\nfrom rnn.rnn_batch_norm import RNNBatchNorm",
"_____no_output_____"
],
[
"seed = 16011984\nrng = np.random.RandomState(seed=seed)",
"_____no_output_____"
],
[
"config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True",
"_____no_output_____"
],
[
"figcount = 0",
"_____no_output_____"
],
[
"tensorboardLogdir = 'tf_cw%d_%d' % (coursework, part)",
"_____no_output_____"
],
[
"curDtype = tf.float32",
"_____no_output_____"
],
[
"reluBias = 0.1",
"_____no_output_____"
],
[
"batch_size = 50\n\nnum_steps = 6 # number of truncated backprop steps ('n' in the discussion above)\n#num_classes = 2\nstate_size = 10 #each state is represented with a certain width, a vector\nlearningRate = 1e-4 #default of Adam is 1e-3\n#momentum = 0.5\n#lamda2 = 1e-2",
"_____no_output_____"
],
[
"best_params_filename = 'best_params_rnn.npy'",
"_____no_output_____"
]
],
[
[
"here the state size is equal to the number of classes because we have given to the last output all the responsibility.\n\nWe are going to follow a repetitive process. For example if num_steps=6 then we break the 120 segments into 20 parts\n\nThe output of each part will be the genre. We are comparing against the genre every little part ",
"_____no_output_____"
],
[
"### MSD 10 genre task",
"_____no_output_____"
]
],
[
[
"segmentCount = 120\nsegmentLen = 25",
"_____no_output_____"
],
[
"from rnn.msd10_data_providers import MSD10Genre_120_rnn_DataProvider",
"_____no_output_____"
]
],
[
[
"### Experiment with Best Parameters",
"_____no_output_____"
]
],
[
[
"best_params = np.load(best_params_filename)\nbest_params",
"_____no_output_____"
],
[
"(state_size, num_steps) = best_params\n(state_size, num_steps)",
"_____no_output_____"
],
[
"rnnModel = RNNBatchNorm(batch_size=batch_size, rng=rng, dtype = curDtype, config=config,\n segment_count=segmentCount, segment_len= segmentLen)",
"_____no_output_____"
],
[
"%%time\n\nepochs = 100\n\nstats, keys = rnnModel.run_rnn(state_size = state_size, num_steps=num_steps,\n epochs = epochs)\n\nif plotting:\n fig_1, ax_1, fig_2, ax_2 = plotStats(stats, keys)\n plt.show()\n if saving:\n figcount += 1\n fig_1.savefig('cw%d_part%d_%02d_fig_error.svg' % (coursework, part, figcount))\n fig_2.savefig('cw%d_part%d_%02d_fig_valid.svg' % (coursework, part, figcount))\n\nprint max(stats[:, -1]) #maximum validation accuracy",
"epochs: 100\nrnn steps: 4\nstate size: 341\nEnd epoch 01 (139.139 secs): err(train)=1.52, acc(train)=0.47, err(valid)=1.86, acc(valid)=0.33, \nEnd epoch 02 (136.111 secs): err(train)=1.47, acc(train)=0.49, err(valid)=1.74, acc(valid)=0.39, \nEnd epoch 03 (136.024 secs): err(train)=1.44, acc(train)=0.50, err(valid)=1.76, acc(valid)=0.39, \nEnd epoch 04 (135.718 secs): err(train)=1.42, acc(train)=0.51, err(valid)=1.71, acc(valid)=0.40, \nEnd epoch 05 (136.560 secs): err(train)=1.39, acc(train)=0.51, err(valid)=1.59, acc(valid)=0.44, \nEnd epoch 06 (134.916 secs): err(train)=1.36, acc(train)=0.53, err(valid)=1.61, acc(valid)=0.43, \nEnd epoch 07 (134.824 secs): err(train)=1.33, acc(train)=0.54, err(valid)=1.62, acc(valid)=0.44, \nEnd epoch 08 (134.745 secs): err(train)=1.31, acc(train)=0.54, err(valid)=1.53, acc(valid)=0.46, \nEnd epoch 09 (134.994 secs): err(train)=1.30, acc(train)=0.55, err(valid)=1.51, acc(valid)=0.46, \nEnd epoch 10 (135.173 secs): err(train)=1.28, acc(train)=0.55, err(valid)=1.47, acc(valid)=0.49, \nEnd epoch 11 (135.006 secs): err(train)=1.26, acc(train)=0.56, err(valid)=1.50, acc(valid)=0.48, \nEnd epoch 12 (134.699 secs): err(train)=1.24, acc(train)=0.57, err(valid)=1.50, acc(valid)=0.49, \nEnd epoch 13 (135.139 secs): err(train)=1.22, acc(train)=0.57, err(valid)=1.48, acc(valid)=0.48, \nEnd epoch 14 (134.665 secs): err(train)=1.21, acc(train)=0.58, err(valid)=1.43, acc(valid)=0.50, \nEnd epoch 15 (135.003 secs): err(train)=1.19, acc(train)=0.59, err(valid)=1.46, acc(valid)=0.49, \nEnd epoch 16 (134.754 secs): err(train)=1.18, acc(train)=0.59, err(valid)=1.42, acc(valid)=0.50, \nEnd epoch 17 (135.116 secs): err(train)=1.17, acc(train)=0.59, err(valid)=1.38, acc(valid)=0.52, \nEnd epoch 18 (135.073 secs): err(train)=1.16, acc(train)=0.60, err(valid)=1.42, acc(valid)=0.51, \nEnd epoch 19 (135.044 secs): err(train)=1.15, acc(train)=0.60, err(valid)=1.41, acc(valid)=0.51, \nEnd epoch 20 (134.449 secs): err(train)=1.14, acc(train)=0.60, err(valid)=1.36, acc(valid)=0.53, \nEnd epoch 21 (133.914 secs): err(train)=1.14, acc(train)=0.61, err(valid)=1.32, acc(valid)=0.55, \nEnd epoch 22 (132.886 secs): err(train)=1.12, acc(train)=0.61, err(valid)=1.39, acc(valid)=0.52, \nEnd epoch 23 (132.495 secs): err(train)=1.12, acc(train)=0.61, err(valid)=1.37, acc(valid)=0.52, \nEnd epoch 24 (132.171 secs): err(train)=1.11, acc(train)=0.61, err(valid)=1.36, acc(valid)=0.53, \nEnd epoch 25 (132.283 secs): err(train)=1.10, acc(train)=0.62, err(valid)=1.33, acc(valid)=0.54, \nEnd epoch 26 (132.222 secs): err(train)=1.10, acc(train)=0.62, err(valid)=1.31, acc(valid)=0.55, \nEnd epoch 27 (132.315 secs): err(train)=1.10, acc(train)=0.62, err(valid)=1.34, acc(valid)=0.54, \nEnd epoch 28 (132.404 secs): err(train)=1.09, acc(train)=0.62, err(valid)=1.36, acc(valid)=0.53, \nEnd epoch 29 (132.017 secs): err(train)=1.08, acc(train)=0.63, err(valid)=1.27, acc(valid)=0.56, \nEnd epoch 30 (132.469 secs): err(train)=1.08, acc(train)=0.63, err(valid)=1.29, acc(valid)=0.56, \nEnd epoch 31 (132.075 secs): err(train)=1.08, acc(train)=0.63, err(valid)=1.31, acc(valid)=0.55, \nEnd epoch 32 (132.539 secs): err(train)=1.07, acc(train)=0.63, err(valid)=1.28, acc(valid)=0.55, \nEnd epoch 33 (132.164 secs): err(train)=1.06, acc(train)=0.63, err(valid)=1.30, acc(valid)=0.55, \nEnd epoch 34 (132.666 secs): err(train)=1.06, acc(train)=0.63, err(valid)=1.27, acc(valid)=0.56, \nEnd epoch 35 (132.103 secs): err(train)=1.06, acc(train)=0.63, err(valid)=1.33, acc(valid)=0.54, \nEnd epoch 36 (132.601 secs): err(train)=1.05, acc(train)=0.63, err(valid)=1.29, acc(valid)=0.55, \nEnd epoch 37 (132.110 secs): err(train)=1.05, acc(train)=0.63, err(valid)=1.26, acc(valid)=0.56, \nEnd epoch 38 (132.868 secs): err(train)=1.05, acc(train)=0.64, err(valid)=1.30, acc(valid)=0.55, \nEnd epoch 39 (132.071 secs): err(train)=1.04, acc(train)=0.64, err(valid)=1.28, acc(valid)=0.56, \nEnd epoch 40 (132.669 secs): err(train)=1.04, acc(train)=0.64, err(valid)=1.25, acc(valid)=0.57, \nEnd epoch 41 (132.112 secs): err(train)=1.03, acc(train)=0.64, err(valid)=1.25, acc(valid)=0.57, \nEnd epoch 42 (132.737 secs): err(train)=1.03, acc(train)=0.65, err(valid)=1.26, acc(valid)=0.56, \nEnd epoch 43 (132.173 secs): err(train)=1.03, acc(train)=0.64, err(valid)=1.30, acc(valid)=0.55, \nEnd epoch 44 (132.676 secs): err(train)=1.03, acc(train)=0.64, err(valid)=1.26, acc(valid)=0.56, \nEnd epoch 45 (132.121 secs): err(train)=1.02, acc(train)=0.65, err(valid)=1.22, acc(valid)=0.58, \nEnd epoch 46 (133.281 secs): err(train)=1.01, acc(train)=0.65, err(valid)=1.24, acc(valid)=0.57, \nEnd epoch 47 (132.024 secs): err(train)=1.01, acc(train)=0.65, err(valid)=1.22, acc(valid)=0.58, \nEnd epoch 48 (132.662 secs): err(train)=1.01, acc(train)=0.65, err(valid)=1.28, acc(valid)=0.56, \nEnd epoch 49 (132.112 secs): err(train)=1.01, acc(train)=0.65, err(valid)=1.23, acc(valid)=0.58, \nEnd epoch 50 (132.779 secs): err(train)=1.00, acc(train)=0.66, err(valid)=1.29, acc(valid)=0.56, \nEnd epoch 51 (132.191 secs): err(train)=1.00, acc(train)=0.66, err(valid)=1.26, acc(valid)=0.56, \nEnd epoch 52 (132.774 secs): err(train)=1.00, acc(train)=0.65, err(valid)=1.27, acc(valid)=0.57, \nEnd epoch 53 (132.140 secs): err(train)=1.00, acc(train)=0.65, err(valid)=1.29, acc(valid)=0.55, \nEnd epoch 54 (132.859 secs): err(train)=1.00, acc(train)=0.66, err(valid)=1.25, acc(valid)=0.57, \nEnd epoch 55 (132.248 secs): err(train)=0.99, acc(train)=0.66, err(valid)=1.22, acc(valid)=0.58, \nEnd epoch 56 (132.784 secs): err(train)=0.99, acc(train)=0.66, err(valid)=1.28, acc(valid)=0.56, \nEnd epoch 57 (132.140 secs): err(train)=0.99, acc(train)=0.66, err(valid)=1.24, acc(valid)=0.58, \nEnd epoch 58 (132.740 secs): err(train)=0.98, acc(train)=0.66, err(valid)=1.19, acc(valid)=0.59, \nEnd epoch 59 (132.080 secs): err(train)=0.98, acc(train)=0.66, err(valid)=1.25, acc(valid)=0.58, \nEnd epoch 60 (132.877 secs): err(train)=0.98, acc(train)=0.66, err(valid)=1.21, acc(valid)=0.58, \nEnd epoch 61 (132.083 secs): err(train)=0.98, acc(train)=0.66, err(valid)=1.25, acc(valid)=0.57, \nEnd epoch 62 (132.842 secs): err(train)=0.97, acc(train)=0.66, err(valid)=1.20, acc(valid)=0.59, \nEnd epoch 63 (132.143 secs): err(train)=0.97, acc(train)=0.66, err(valid)=1.22, acc(valid)=0.59, \nEnd epoch 64 (132.388 secs): err(train)=0.97, acc(train)=0.66, err(valid)=1.24, acc(valid)=0.58, \nEnd epoch 65 (132.199 secs): err(train)=0.97, acc(train)=0.67, err(valid)=1.26, acc(valid)=0.57, \nEnd epoch 66 (132.392 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.22, acc(valid)=0.59, \nEnd epoch 67 (132.141 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.24, acc(valid)=0.58, \nEnd epoch 68 (132.522 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.22, acc(valid)=0.59, \nEnd epoch 69 (132.546 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.22, acc(valid)=0.58, \nEnd epoch 70 (132.253 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.20, acc(valid)=0.59, \nEnd epoch 71 (132.534 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.18, acc(valid)=0.60, \nEnd epoch 72 (132.317 secs): err(train)=0.96, acc(train)=0.67, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 73 (132.451 secs): err(train)=0.95, acc(train)=0.67, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 74 (132.243 secs): err(train)=0.95, acc(train)=0.67, err(valid)=1.19, acc(valid)=0.59, \nEnd epoch 75 (132.654 secs): err(train)=0.95, acc(train)=0.67, err(valid)=1.23, acc(valid)=0.59, \nEnd epoch 76 (132.255 secs): err(train)=0.95, acc(train)=0.67, err(valid)=1.17, acc(valid)=0.60, \nEnd epoch 77 (132.654 secs): err(train)=0.95, acc(train)=0.68, err(valid)=1.25, acc(valid)=0.59, \nEnd epoch 78 (132.187 secs): err(train)=0.94, acc(train)=0.67, err(valid)=1.16, acc(valid)=0.60, \nEnd epoch 79 (132.895 secs): err(train)=0.94, acc(train)=0.67, err(valid)=1.23, acc(valid)=0.58, \nEnd epoch 80 (132.320 secs): err(train)=0.94, acc(train)=0.68, err(valid)=1.24, acc(valid)=0.58, \nEnd epoch 81 (132.546 secs): err(train)=0.94, acc(train)=0.67, err(valid)=1.20, acc(valid)=0.59, \nEnd epoch 82 (132.374 secs): err(train)=0.94, acc(train)=0.68, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 83 (132.774 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.23, acc(valid)=0.58, \nEnd epoch 84 (132.262 secs): err(train)=0.94, acc(train)=0.67, err(valid)=1.20, acc(valid)=0.60, \nEnd epoch 85 (132.672 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 86 (132.240 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.18, acc(valid)=0.60, \nEnd epoch 87 (132.643 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.20, acc(valid)=0.59, \nEnd epoch 88 (132.271 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.18, acc(valid)=0.61, \nEnd epoch 89 (132.771 secs): err(train)=0.93, acc(train)=0.68, err(valid)=1.21, acc(valid)=0.59, \nEnd epoch 90 (132.536 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.22, acc(valid)=0.59, \nEnd epoch 91 (136.182 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.19, acc(valid)=0.61, \nEnd epoch 92 (134.153 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.20, acc(valid)=0.60, \nEnd epoch 93 (134.401 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 94 (132.846 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.21, acc(valid)=0.59, \nEnd epoch 95 (133.465 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.14, acc(valid)=0.61, \nEnd epoch 96 (133.097 secs): err(train)=0.92, acc(train)=0.68, err(valid)=1.18, acc(valid)=0.60, \nEnd epoch 97 (141.926 secs): err(train)=0.91, acc(train)=0.68, err(valid)=1.19, acc(valid)=0.60, \nEnd epoch 98 (139.937 secs): err(train)=0.91, acc(train)=0.68, err(valid)=1.18, acc(valid)=0.61, \nEnd epoch 99 (135.033 secs): err(train)=0.91, acc(train)=0.68, err(valid)=1.21, acc(valid)=0.60, \nEnd epoch 100 (132.227 secs): err(train)=0.91, acc(train)=0.69, err(valid)=1.16, acc(valid)=0.61, \n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4ac267190a74a64caa4bd28cd462f6bcb27ef6ee
| 2,155 |
ipynb
|
Jupyter Notebook
|
local_examples/Custom Local Server.ipynb
|
NickolausDS/jupyterlab
|
5b734e6c1a1fe3aaeb68efb2a75c4e721063ffc4
|
[
"BSD-3-Clause"
] | null | null | null |
local_examples/Custom Local Server.ipynb
|
NickolausDS/jupyterlab
|
5b734e6c1a1fe3aaeb68efb2a75c4e721063ffc4
|
[
"BSD-3-Clause"
] | null | null | null |
local_examples/Custom Local Server.ipynb
|
NickolausDS/jupyterlab
|
5b734e6c1a1fe3aaeb68efb2a75c4e721063ffc4
|
[
"BSD-3-Clause"
] | null | null | null | 24.770115 | 125 | 0.510441 |
[
[
[
"template = \"\"\"\n<h1>$app_name</h1>\n<p>\n $login_result <br> You may close this tab.\n</p>\n<p>\n $error\n</p>\n<p>\n $post_login_message\n</p>\n\"\"\"\n\ntemplate_vars = {\n 'defaults': {\n 'app_name': '', # Auto-populated if blank, but can be changed\n 'post_login_message': '',\n 'error': '', # Present if there is an error in Globus Auth\n },\n 'success': {\n 'login_result': '<p>Login Successful</p><img src=\"https://i.imgur.com/XRzThJH.jpg\" height=600 width=800>',\n },\n 'error': {\n 'login_result': '<p>Login Failed</p><img src=\"https://i.imgur.com/YjMR0E6.jpg\" height=700 width=700>',\n }\n}",
"_____no_output_____"
],
[
"from fair_research_login import NativeClient, LocalServerCodeHandler\n\napp = NativeClient(\n client_id='7414f0b4-7d05-4bb6-bb00-076fa3f17cf5',\n # Use our custom local server\n local_server_code_handler=LocalServerCodeHandler(template, template_vars),\n # Automatically populates 'app_name' in template if defined\n app_name='Native Login Examples',\n)\n\napp.login()\napp.logout()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4ac279fd760a7b8f6fc836547dc25d24d4c4f56a
| 9,484 |
ipynb
|
Jupyter Notebook
|
Day_044_HW.ipynb
|
semishen/ML100Days
|
423ee8fc4beeae43694a33143b9a94bf5e15fd92
|
[
"MIT"
] | null | null | null |
Day_044_HW.ipynb
|
semishen/ML100Days
|
423ee8fc4beeae43694a33143b9a94bf5e15fd92
|
[
"MIT"
] | null | null | null |
Day_044_HW.ipynb
|
semishen/ML100Days
|
423ee8fc4beeae43694a33143b9a94bf5e15fd92
|
[
"MIT"
] | null | null | null | 32.258503 | 522 | 0.510122 |
[
[
[
"<a href=\"https://colab.research.google.com/github/semishen/ML100Days/blob/master/Day_044_HW.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## [作業重點]\n確保你了解隨機森林模型中每個超參數的意義,並觀察調整超參數對結果的影響",
"_____no_output_____"
],
[
"## 作業\n\n1. 試著調整 RandomForestClassifier(...) 中的參數,並觀察是否會改變結果?\n2. 改用其他資料集 (boston, wine),並與回歸模型與決策樹的結果進行比較",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets, metrics\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"wine = datasets.load_wine()\nx = wine.data\ny = wine.target\nprint('x sahpe: ', x.shape)\nprint('y sample: ', y[: 6]) # classification",
"x sahpe: (178, 13)\ny sample: [0 0 0 0 0 0]\n"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=4)\n",
"_____no_output_____"
],
[
"# baseline logistic regression\nlogreg = LogisticRegression(solver='newton-cg')\nlogreg.fit(x_train, y_train)\n\nprint('params: ', logreg.coef_)\nprint('acc: ', logreg.score(x_test, y_test))\n",
"params: [[ 5.19149194e-01 4.46818377e-01 7.01360494e-01 -2.41354158e-01\n -3.37662593e-02 2.96003693e-01 8.17975088e-01 6.69511321e-02\n 1.23766351e-01 1.42628825e-01 1.22797606e-02 6.79007848e-01\n 9.42339666e-03]\n [-7.28557645e-01 -8.38669577e-01 -7.33993168e-01 1.03649061e-01\n -2.22170531e-02 2.20399215e-01 2.00112252e-01 7.07250984e-02\n 4.61132350e-01 -9.43397660e-01 2.85903004e-01 4.37550463e-02\n -8.47572631e-03]\n [ 2.09408451e-01 3.91851200e-01 3.26326740e-02 1.37705096e-01\n 5.59833124e-02 -5.16402908e-01 -1.01808734e+00 -1.37676230e-01\n -5.84898701e-01 8.00768835e-01 -2.98182765e-01 -7.22762895e-01\n -9.47670350e-04]]\nacc: 0.9722222222222222\n"
],
[
"clf = RandomForestClassifier(n_estimators=10, max_depth=4)\nclf.fit(x_train, y_train)\n\nprint('acc: ', clf.score(x_test, y_test))\nprint('feature importances: ', {name:value for (name, value) in zip(wine.feature_names, clf.feature_importances_)})",
"acc: 1.0\nfeature importances: {'alcohol': 0.11961433728832875, 'malic_acid': 0.007154698728635592, 'ash': 0.014328870297433936, 'alcalinity_of_ash': 0.04554983355560036, 'magnesium': 0.024122619222171755, 'total_phenols': 0.0655113302701085, 'flavanoids': 0.1881473900345513, 'nonflavanoid_phenols': 0.0014371742320732756, 'proanthocyanins': 0.04097631065380022, 'color_intensity': 0.14935910758650114, 'hue': 0.0402354351907608, 'od280/od315_of_diluted_wines': 0.07551538995129863, 'proline': 0.22804750298873572}\n"
],
[
"# boston\nboston = datasets.load_boston()\nx = boston.data\ny = boston.target\nprint('x sahpe: ', x.shape)\nprint('y sample: ', y[: 6]) # linear regression\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5)",
"x sahpe: (506, 13)\ny sample: [24. 21.6 34.7 33.4 36.2 28.7]\n"
],
[
"# baseline linear regression\nlinear = LinearRegression()\nlinear.fit(x_train, y_train)\n\nprint('params: ', linear.coef_)\nprint('R2: ', linear.score(x_test, y_test))",
"params: [-1.30799852e-01 4.94030235e-02 1.09535045e-03 2.70536624e+00\n -1.59570504e+01 3.41397332e+00 1.11887670e-03 -1.49308124e+00\n 3.64422378e-01 -1.31718155e-02 -9.52369666e-01 1.17492092e-02\n -5.94076089e-01]\nR2: 0.7334492147453064\n"
],
[
"clf2 = RandomForestRegressor(n_estimators=10, max_depth=4)\nclf2.fit(x_train, y_train)\n\nprint('R2: ', clf2.score(x_test, y_test))\nprint('feature importances: ', {name:value for (name, value) in zip(boston.feature_names, clf2.feature_importances_)})",
"R2: 0.837963895356755\nfeature importances: {'CRIM': 0.03946751128402972, 'ZN': 0.0, 'INDUS': 0.002276176307010874, 'CHAS': 0.0, 'NOX': 0.009136104068026727, 'RM': 0.3577398862093797, 'AGE': 0.0029516843209303235, 'DIS': 0.08636260460188355, 'RAD': 0.002480758878488432, 'TAX': 0.00436853956189145, 'PTRATIO': 0.0034113614528555186, 'B': 0.0033432546939453226, 'LSTAT': 0.4884621186215584}\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4ac29643511eed35d783996a5c9a3d330573f20a
| 6,465 |
ipynb
|
Jupyter Notebook
|
examples/reinforcement-learning/pg/reinforce.ipynb
|
ianshmean/Knet.jl
|
5c73ee444801e77a9927b2d6ad3c25dfb6ba6257
|
[
"MIT"
] | 1 |
2019-07-13T09:10:01.000Z
|
2019-07-13T09:10:01.000Z
|
examples/reinforcement-learning/pg/reinforce.ipynb
|
ianshmean/Knet.jl
|
5c73ee444801e77a9927b2d6ad3c25dfb6ba6257
|
[
"MIT"
] | null | null | null |
examples/reinforcement-learning/pg/reinforce.ipynb
|
ianshmean/Knet.jl
|
5c73ee444801e77a9927b2d6ad3c25dfb6ba6257
|
[
"MIT"
] | null | null | null | 29.520548 | 319 | 0.529621 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4ac29e63695567e709fedb786e0bd94548724b48
| 8,745 |
ipynb
|
Jupyter Notebook
|
python-for-data/Ex06 - Strings and Dictionaries.ipynb
|
interphuoc/atom-assignments
|
ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5
|
[
"MIT"
] | null | null | null |
python-for-data/Ex06 - Strings and Dictionaries.ipynb
|
interphuoc/atom-assignments
|
ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5
|
[
"MIT"
] | null | null | null |
python-for-data/Ex06 - Strings and Dictionaries.ipynb
|
interphuoc/atom-assignments
|
ac5ae4d7d1b8666f560a123b4ab1ab88b0acd6b5
|
[
"MIT"
] | null | null | null | 27.850318 | 391 | 0.566838 |
[
[
[
"# Exercises 06 - Strings and Dictionaries",
"_____no_output_____"
],
[
"## 0. Length of Strings\n\nLet's start with a string lightning round to warm up. What are the lengths of the strings below?\n\nFor each of the five strings below, predict what `len()` would return when passed that string. Use the variable `length` to record your answer.",
"_____no_output_____"
]
],
[
[
"a = \"\"\nlength = 0\nprint(length==len(a))",
"True\n"
],
[
"b = \"it's ok\"\nlength = 7\nprint(length==len(b))",
"True\n"
],
[
"c = 'it\\'s ok'\nlength = 7\nprint(length==len(c))",
"True\n"
],
[
"d = \"\"\"hey\"\"\"\nlength = 3\nprint(length==len(d))",
"True\n"
],
[
"e = '\\n'\nlength = 1\nprint(length==len(e))",
"True\n"
]
],
[
[
"## 1. Check the Zip Code\n\nThere is a saying that *\\\"Data scientists spend 80% of their time cleaning data, and 20% of their time complaining about cleaning data.\\\"* Let's see if you can write a function to help clean US zip code data. Given a string, it should return whether or not that string represents a valid zip code. For our purposes, a valid zip code is any string consisting of exactly 5 digits.\n\nHINT: `str` has a method that will be useful here. Use `help(str)` to review a list of string methods.",
"_____no_output_____"
]
],
[
[
"def is_valid_zip(zip_code):\n \"\"\"Returns whether the input string is a valid (5 digit) zip code\n \"\"\"\n return len(zip_code) == 5 and zip_code.isdigit() # make sure that zip_code is in digit format\nprint(is_valid_zip(\"123456\"))\nprint(is_valid_zip(\"abcde\"))\nprint(is_valid_zip(\"12345\"))",
"False\nFalse\nTrue\n"
]
],
[
[
"## 2. Searching a Word\n\nA researcher has gathered thousands of news articles. But she wants to focus her attention on articles including a specific word. Complete the function below to help her filter her list of articles.\n\nYour function should meet the following criteria\n\n- Do not include documents where the keyword string shows up only as a part of a larger word. For example, if she were looking for the keyword “closed”, you would not include the string “enclosed.” \n- She does not want you to distinguish upper case from lower case letters. So the phrase “Closed the case.” would be included when the keyword is “closed”\n- Do not let periods or commas affect what is matched. “It is closed.” would be included when the keyword is “closed”. But you can assume there are no other types of punctuation.\n\n*HINT*: Some methods that may be useful here: `str.split()`, `str.strip()`, `str.lower()`",
"_____no_output_____"
]
],
[
[
"def word_search(doc_list, keyword):\n \"\"\"\n Takes a list of documents (each document is a string) and a keyword. \n Returns list of the index values into the original list for all documents \n containing the keyword.\n\n Example:\n doc_list = [\"The Learn Python Challenge Casino.\", \"They bought a car\", \"Casinoville\"]\n >>> word_search(doc_list, 'casino')\n >>> [0]\n \"\"\"\n index_list = []\n for i in range(len(doc_list)-1):\n if keyword.lower() in doc_list[i].lower().strip('.').split():\n index_list.append(i)\n return index_list\n\ndoc_list = [\"The Learn Python Challenge Casino.\", \"They bought a car\", \"Casinoville\"]\nword_search(doc_list, 'casino')",
"_____no_output_____"
]
],
[
[
"## 3. Searching Multiple Words\n\nNow the researcher wants to supply multiple keywords to search for. Complete the function below to help her.\n\n(You're encouraged to use the `word_search` function you just wrote when implementing this function. Reusing code in this way makes your programs more robust and readable - and it saves typing!)",
"_____no_output_____"
]
],
[
[
"def multi_word_search(doc_list, keywords):\n \"\"\"\n Takes list of documents (each document is a string) and a list of keywords. \n Returns a dictionary where each key is a keyword, and the value is a list of indices\n (from doc_list) of the documents containing that keyword\n\n >>> doc_list = [\"The Learn Python Challenge Casino.\", \"They bought a car and a casino\", \"Casinoville\"]\n >>> keywords = ['casino', 'they']\n >>> multi_word_search(doc_list, keywords)\n {'casino': [0, 1], 'they': [1]}\n \"\"\"\n dictionary = {}\n for keyword in keywords:\n dictionary[keyword] = word_search(doc_list, keyword)\n return dictionary\n \ndoc_list = [\"The Learn Python Challenge Casino.\", \"They bought a car and a casino\", \"Casinoville\"]\nkeywords = ['casino', 'they']\nmulti_word_search(doc_list, keywords)",
"_____no_output_____"
]
],
[
[
"# Keep Going 💪",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.