hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1f014e3bba70105112750a452eac9c45e25771
| 7,862 |
ipynb
|
Jupyter Notebook
|
notebooks/book1/11/linreg_2d_bayes_demo.ipynb
|
karm-patel/pyprobml
|
af8230a0bc0d01bb0f779582d87e5856d25e6211
|
[
"MIT"
] | null | null | null |
notebooks/book1/11/linreg_2d_bayes_demo.ipynb
|
karm-patel/pyprobml
|
af8230a0bc0d01bb0f779582d87e5856d25e6211
|
[
"MIT"
] | null | null | null |
notebooks/book1/11/linreg_2d_bayes_demo.ipynb
|
karm-patel/pyprobml
|
af8230a0bc0d01bb0f779582d87e5856d25e6211
|
[
"MIT"
] | null | null | null | 38.920792 | 120 | 0.547443 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a1f047380df3c7cb145ad7b7e76c8241f7e6695
| 30,809 |
ipynb
|
Jupyter Notebook
|
Basic-Course/01-02-Python-Refresher/refresher.ipynb
|
suzynakayama/python-flask-udemy
|
95d2c5fa328e2f50d0893d73fd386fb713d1f12b
|
[
"MIT"
] | 1 |
2021-11-30T14:13:10.000Z
|
2021-11-30T14:13:10.000Z
|
Basic-Course/01-02-Python-Refresher/refresher.ipynb
|
suzynakayama/python-flask-udemy
|
95d2c5fa328e2f50d0893d73fd386fb713d1f12b
|
[
"MIT"
] | null | null | null |
Basic-Course/01-02-Python-Refresher/refresher.ipynb
|
suzynakayama/python-flask-udemy
|
95d2c5fa328e2f50d0893d73fd386fb713d1f12b
|
[
"MIT"
] | null | null | null | 34.423464 | 1,602 | 0.545003 |
[
[
[
"a = 'ok'\nb = 'test'\nprint(a+b)\nprint(a*2)",
"oktest\nokok\n"
],
[
"name = 'Bob'\nprint(f'Hello, {name}')",
"Hello, Bob\n"
],
[
"greeting = 'Hello, {}'\nwith_name = greeting.format(name)\nprint(with_name)",
"Hello, Bob\n"
],
[
"size = input('Enter the size of your house: ')\ninteger = int(size)\nfloating = float(size)\nprint(integer, floating)\nsquare_meters = integer / 10.8\nprint(f'{integer} square feet is {square_meters} square meters.')\nprint(f'{integer} square feet is {square_meters:.2f} square meters.')",
"_____no_output_____"
],
[
"user_age = input('Enter your age: ')\nyears = int(user_age)\nmonths = years * 12\ndays = months * 30\nhours = days * 24\nminutes = hours * 60\nseconds = minutes * 60\nprint(f'Your age, {years}, is equal to {months} months or {seconds} seconds.')",
"Your age, 35, is equal to 420 months or 1088640000 seconds.\n"
],
[
"friends = {'Bob', 'Anne', 'Rolf'}\nabroad = {'Bob', 'Rolf'}\nlocal_friends = friends.difference(abroad)\nprint(local_friends)\n\nlocal_friends_opposite = abroad.difference(friends)\nprint(local_friends_opposite)\n\nother_friends = {'Maria', 'Jose'}\nall_friends = friends.union(other_friends)\nprint(all_friends)\n\nabroad.add('Lara')\nprint(abroad)\n\nfriends_study_science = {'Ellen', 'Renato', 'Bob', 'Rolf'}\nabroad_study_science = abroad.intersection(friends_study_science)\nprint(abroad_study_science)",
"{'Anne'}\nset()\n{'Rolf', 'Maria', 'Bob', 'Anne', 'Jose'}\n{'Rolf', 'Bob', 'Lara'}\n{'Rolf', 'Bob'}\n"
],
[
"colors = {'blue', 'red', 'white', 'black'}\nuser_color = input('Enter a color that you think is in the game: ').lower()\nif user_color in colors:\n print('You are right!')\nelse:\n print(\"Sorry, you're wrong\")",
"You are right!\n"
],
[
"friends = ['Suzy', 'Ellie', 'Sarah', 'Anna', 'Sayuri']\nfriends_starts_s = []\nfriends_starts_s_list_comprehension = []\n\nfor friend in friends:\n if friend.startswith('S'):\n friends_starts_s.append(friend)\nprint(friends_starts_s)\n\n# using list comprehension\nfriends_starts_s_list_comprehension = [friend for friend in friends if friend.startswith('S')]\nprint(friends_starts_s_list_comprehension)",
"['Suzy', 'Sarah', 'Sayuri']\n['Suzy', 'Sarah', 'Sayuri']\n"
],
[
"student_attendance = {'Rolf': 96, 'Bob': 80, 'Anne': 100}\n\nfor student, attendance in student_attendance.items():\n print(f'{student} has {attendance}% of attendance')\n\nattendance_values = student_attendance.values()\nprint(sum(attendance_values) / len(attendance_values))",
"Rolf has 96% of attendance\nBob has 80% of attendance\nAnne has 100% of attendance\n92.0\n"
],
[
"person = ('Jose', 30, 'artist')\nname, _, profession = person\n\nprint(name, profession)",
"Jose artist\n"
],
[
"friends = ['Ella', 'Ellie']\n\ndef add_friend():\n friend_name = input('Enter your friend name: ')\n f = friends + [friend_name]\n print(f) \n\nadd_friend()",
"['Ella', 'Ellie', 'Ellen']\n"
],
[
"def say_hello(name, surname='Doe'):\n print(f'Hello, {name} {surname}.')\n\nsay_hello(surname='Filly', name='Phil') \nsay_hello('Filly', 'Phil') \nsay_hello('Filly', surname='Phil') \nsay_hello('Phil') ",
"Hello, Phil Filly.\nHello, Filly Phil.\nHello, Filly Phil.\nHello, Phil Doe.\n"
],
[
"def add(x, y):\n return x + y\n\n# transform into Lambda\nadd = lambda x, y: x + y\n\nprint(add(5, 7))\n\n# you can also call it right away, like an IIFE\nprint((lambda x, y: x + y)(5, 7))\n\n\n# Another Example\ndef double(x):\n return x * 2\n\nsequence = [1, 3, 5, 7]\ndoubled = [double(x) for x in sequence]\ndoubled_inline = [(lambda x: x * 2)(y) for y in sequence]\nprint(doubled)\nprint(doubled_inline)\n\n# same thing - you can use map, it will go through each number in the sequence and apply double on it, it will then return a list with it\n# NOTE: it is a little bit slower than list comprehension\ndoubled_same = list(map(double, sequence))\n\nprint(doubled_same)",
"12\n12\n[2, 6, 10, 14]\n[2, 6, 10, 14]\n[2, 6, 10, 14]\n"
],
[
"def multiply(*args):\n print(args)\n total = 1\n for arg in args:\n total = total * arg\n return total\n\nmultiply(1,3,5)\n\ndef add(x,y):\n return x + y\n\nnums = [3, 5]\nprint(add(*nums)) # it will destructure the nums when calling add, so 3 will be x and 5 will be y\n\n# Another way\nnums = {'x': 15, 'y': 25}\nprint(add(x=nums['x'], y=nums['y']))\n# instead of doing like that, we can use `**`\nprint(add(**nums))\n\n# Going back to the mulpiply example and using with another function\ndef apply(*args, operator):\n if operator == '*':\n return multiply(*args) # we need to add the `*` to destructure, otherwise we will send a tuple and the multiply function will create a tuple with the tuple\n elif operator == '+':\n return sum(args)\n else:\n return 'No valid operator provided to apply()'\n\nprint(apply(1, 3, 6, 9, operator='*')) # we need to use the keyword argument for operator, otherwise the `*args` from the function will get everything as the args and the operator will be missing.",
"(1, 3, 5)\n8\n40\n40\n(1, 3, 6, 9)\n162\n"
],
[
"def named(**kwargs):\n print(kwargs)\n\nnamed(name='Bob', age=25)\n\n# Another option\ndef named1(name, age):\n print(name, age)\n\ndetails = {'name': 'Bob', 'age': 25}\n\nnamed1(**details)\nnamed(**details)\n\ndef print_nicely(**kwargs):\n named(**kwargs)\n for arg, value in kwargs.items():\n print(f'{arg}: {value}')\n\nprint_nicely(name='Bob', age=25)\n\ndef both(*args, **kwargs):\n print(args)\n print(kwargs)\n\nboth(1, 3, 5, name='Bob', age=25)",
"{'name': 'Bob', 'age': 25}\nBob 25\n{'name': 'Bob', 'age': 25}\n{'name': 'Bob', 'age': 25}\nname: Bob\nage: 25\n(1, 3, 5)\n{'name': 'Bob', 'age': 25}\n"
],
[
"# create the Student class\nclass Student:\n # all objects has the self ('this'), but they can have other properties, like name or grades\n def __init__(self, name, grades):\n self.name = name\n self.grades = grades\n \n def average(self):\n return sum(self.grades) / len(self.grades)\n\n# create a new student\nstudent1 = Student('Matt', (90, 90, 80, 75, 80))\nstudent2 = Student('Rob', (40, 50, 60, 75, 60))\nprint(student1.name)\nprint(student2.grades)\nprint(Student.average(student1)) #same as below\nprint(student1.average())",
"Matt\n(40, 50, 60, 75, 60)\n83.0\n83.0\n"
],
[
"class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\nbob = Person('Bob', 35)\nprint(bob)\n\nclass Person_modified:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n # what to print when we print the string representation of the instance\n def __str__(self):\n return f'I am {self.name}, and I have {self.age} years.'\n \n # this method goal is to be unambiguous and it should return a string that allows us to recreate the object very easily\n def __repr__(self):\n return f\"<Person('{self.name}', {self.age})>\"\n\nbob_modified = Person_modified('Bob', 35)\nprint(bob_modified) # I am Bob, and I have 35.\n# in order to print the __repr__ method, you can call it or comment the __str__ and just print the instance:\nprint(bob_modified.__repr__())",
"<__main__.Person object at 0x10a68e990>\nI am Bob, and I have 35 years.\n<Person('Bob', 35)>\n"
],
[
"class ClassTest:\n def instance_method(self):\n print(f'Called instance_method of {self}')\n \n @classmethod\n def class_method(cls):\n print(f'Called class_method of {cls}')\n \n @staticmethod\n def static_method():\n print('Called static_method')\n\ntest = ClassTest()\n# instance method because it is called on the instance - it will receive 'self', which is the instance and you can use it in the return\ntest.instance_method()\n# class method because it is called on the class - it will receive 'cls', which is the class and you can use it in the return => Very used as factory\nClassTest.class_method()\n# static method is called without 'passing' the object/instance to it, it is really just a function that you pasted inside the class, it doesn't have any info of the class or the instance\nClassTest.static_method()\n\n# Another Example\nclass Book:\n TYPES = ('hardcover', 'paperback')\n\n def __init__(self, name, book_type, weight):\n self.name = name\n self.book_type = book_type\n self.weight = weight\n\n def __repr__(self):\n return f'<Book {self.name}, {self.book_type}, weighing {self.weight}g>'\n \n # factory => create a new instance within the class using the class ==> since cls is the class, you can use Book or cls, but it is best practices to use cls, also because of inheritance\n @classmethod\n def hardcover(cls, name, page_weight):\n return cls(name, Book.TYPES[0], page_weight + 100)\n\n @classmethod\n def paperback(cls, name, page_weight):\n return cls(name, Book.TYPES[1], page_weight + 100)\n\nbook = Book.hardcover('Harry Potter', 1500)\nlight = Book.hardcover('Python', 600)\n\nprint(book)\nprint(light)",
"Called instance_method of <__main__.ClassTest object at 0x107e0f3d0>\nCalled class_method of <class '__main__.ClassTest'>\nCalled static_method\n<Book Harry Potter, hardcover, weighing 1600g>\n<Book Python, hardcover, weighing 700g>\n"
],
[
"class Device:\n def __init__(self, name, connected_by):\n self.name = name\n self.connected_by = connected_by\n self.connected = True\n \n def __str__(self):\n # the '!r' calls the repr method on self.name, so it adds the quotes automatically\n return f'Device {self.name!r} ({self.connected_by})'\n \n def disconnect(self):\n self.connected = False\n print('Disconnected.')\n\n# create a Printer class who inherits from Device, so you have access to all the methods from the Device class and can also add new methods specific to the Printer class\nclass Printer(Device):\n def __init__(self, name, connected_by, capacity):\n # get the parent class with super() and then call the __init__ method of it passing the variables => this way you don't have to copy everything again\n super().__init__(name, connected_by)\n self.capacity = capacity\n self.remaining_pages = capacity\n\n def __str__(self):\n return f'{super().__str__()} ({self.remaining_pages} pages remaining.)'\n \n def print(self, pages):\n if not self.connected:\n print('Your printer is not connected!')\n return\n print(f'Printing {pages} pages')\n self.remaining_pages -= pages\n\nheadphones = Device('Headphones', 'Bluetooth')\nprint(headphones)\n\nprinter = Printer('Printer', 'USB', 500)\nprinter.print(20)\nprint(printer)\n\nprinter.disconnect()\nprinter.print(30)",
"Device 'Headphones' (Bluetooth)\nPrinting 20 pages\nDevice 'Printer' (USB) (480 pages remaining.)\nDisconnected.\nYour printer is not connected!\n"
],
[
"class Bookshelf:\n def __init__(self, quantity):\n self.quantity = quantity\n \n def __str__(self):\n # python ternary operator: 'true' if 'condition' else 'false'\n end = 's.' if self.quantity > 1 else '.'\n return f'Bookshelf with {self.quantity} book{end}'\n\nshelf = Bookshelf(300)\n\n# with inheritance ==> not the best way, you are saying that books are also bookshelves, which is not technically true. Also, you are completely overriding the __str__ method from Bookshelf and you are not using the Bookshelf anywhere.\nclass Book_inheritance(Bookshelf):\n def __init__(self, name, quantity):\n super().__init__(quantity)\n self.name = name\n \n def __str__(self):\n return f'Book {self.name}'\n\nbook = Book_inheritance('Harry Potter', 120)\nprint(book)\n\n# with composition ==> better to use in this case, since with this you mean: a bookshelf has many books. But a book is not a bookshelf.\nclass Bookshelf_composition:\n def __init__(self, *books):\n self.books = books\n \n def __str__(self):\n # python ternary operator: 'true' if 'condition' else 'false'\n end = 's.' if len(self.books) > 1 else '.'\n return f'Bookshelf with {len(self.books)} book{end}'\n\nclass Book_composition:\n def __init__(self, name):\n self.name = name\n \n def __str__(self):\n return f'Book {self.name}'\n\nbook = Book_composition('Harry Potter')\nbook1 = Book_composition('Harry Potter II')\nshelf1 = Bookshelf_composition(book, book1)\nprint(shelf1)",
"Book Harry Potter\nBookshelf with 2 books.\n"
],
[
"from typing import List\n\ndef list_avg(sequence: List) -> float:\n return sum(sequence) / len(sequence)\n\n# list_avg(123)\nlist_avg([1,2,3])",
"_____no_output_____"
],
[
"class TooManyPagesReadError(ValueError):\n pass\n\nclass Book:\n def __init__(self, name: str, page_count: int):\n self.name = name\n self.page_count = page_count\n self.pages_read = 0\n \n def __repr__(self):\n return (\n f'<Book {self.name}, read{self.pages_read} pages out of {self.page_count}>'\n )\n \n def read(self, pages: int):\n if self.pages_read + pages > self.page_count:\n raise TooManyPagesReadError(f'You tried to read {self.pages_read + pages} pages, but this book only has {self.page_count} pages.')\n self.pages_read += pages\n print(f'You have now read {self.pages_read} pages out of {self.page_count}.')\n\npython101 = Book('Python 101', 50)\npython101.read(35)\npython101.read(10)\npython101.read(30)",
"You have now read 35 pages out of 50.\nYou have now read 45 pages out of 50.\n"
],
[
"user = {'username': 'jose', 'access_level': 'guest'}\n\n# unprotected route\ndef get_admin_password():\n return '1234'\n\n# create decorator to protect the route\ndef make_secure(func):\n def secure_function():\n if user['access_level'] == 'admin':\n return func()\n else:\n return f'No admin permissions for {user[\"username\"]}'\n return secure_function\n\nget_admin_password = make_secure(get_admin_password)\nprint(get_admin_password())\n\n# With The '@' syntax\ndef make_secure1(func):\n def secure_function():\n if user['access_level'] == 'admin':\n return func()\n else:\n return f'No admin permissions for {user[\"username\"]}'\n return secure_function\n\n# just add the '@' and the decorator function name to secure this route and then call it\n@make_secure1\ndef get_admin_password1():\n return '1234'\n\nprint(get_admin_password1())\n\n# it will return the name as 'secure_function' and any documentation from get_admin_password1 would be lost and replaced with the secure_function\nprint(get_admin_password1.__name__)\n\n# in order to fix this, we need to import functools and add the decorator before the secure_function\nimport functools\n\ndef make_secure2(func): # decorator\n @functools.wraps(func) #it will protect the name and documentation of the 'func', in this case, the get_admin_password\n def secure_function(): # function that will replace the other one\n if user['access_level'] == 'admin':\n return func()\n else:\n return f'No admin permissions for {user[\"username\"]}'\n return secure_function\n\n@make_secure2\ndef get_admin_password2():\n return '1234'\n\n# returns get_admin_password2\nprint(get_admin_password2.__name__)",
"No admin permissions for jose\nNo admin permissions for jose\nsecure_function\nget_admin_password2\n"
],
[
"from typing import List, Optional\n\nclass Student:\n # this is BAD\n def __init__(self, name: str, grades: List[int] = []):\n self.name = name\n self.grades = grades\n \n def take_exam(self, result: int):\n self.grades.append(result)\n\nbob = Student('Bob')\nmatt = Student('Matt')\nbob.take_exam(90)\nprint(bob.grades) # [90]\nprint(matt.grades) # [90]\n\nclass Student1:\n # this is BAD\n def __init__(self, name: str, grades: Optional[List[int]] = None):\n self.name = name\n self.grades = grades or []\n \n def take_exam(self, result: int):\n self.grades.append(result)\n\nbob1 = Student1('Bob')\nmatt1 = Student1('Matt')\nbob1.take_exam(90)\nprint(bob1.grades) # [90]\nprint(matt1.grades) # []",
"[90]\n[90]\n[90]\n[]\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f0de9e9b8be683d78913bee8c375c10c62eb2
| 11,007 |
ipynb
|
Jupyter Notebook
|
3_Naive Bayes dan K-Nearest Neighbor/iris_NaiveBayes.ipynb
|
Mirfani340/Machine-Learning
|
3c1ac7ab785ea64f45d1cd55bc94c536a83f9593
|
[
"MIT"
] | null | null | null |
3_Naive Bayes dan K-Nearest Neighbor/iris_NaiveBayes.ipynb
|
Mirfani340/Machine-Learning
|
3c1ac7ab785ea64f45d1cd55bc94c536a83f9593
|
[
"MIT"
] | null | null | null |
3_Naive Bayes dan K-Nearest Neighbor/iris_NaiveBayes.ipynb
|
Mirfani340/Machine-Learning
|
3c1ac7ab785ea64f45d1cd55bc94c536a83f9593
|
[
"MIT"
] | null | null | null | 23.773218 | 102 | 0.412737 |
[
[
[
"#loading the dataset\nfrom sklearn.datasets import load_iris\nimport numpy as np",
"_____no_output_____"
],
[
"iris_data=load_iris()\n# print(iris_data)",
"_____no_output_____"
],
[
"print(iris_data.keys())\n#keys of data set",
"dict_keys(['data', 'target', 'frame', 'target_names', 'DESCR', 'feature_names', 'filename'])\n"
],
[
"print(iris_data['target_names'])\n#the value of the key target_names is an array of strings,\n#containing the species of flowers which we want to predict",
"['setosa' 'versicolor' 'virginica']\n"
],
[
"print(iris_data['feature_names'])\n#The value of feature_names is a list of strings, giving the description of each feature:\n",
"['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\n"
],
[
"X=iris_data['data']\nprint(X)",
"[[5.1 3.5 1.4 0.2]\n [4.9 3. 1.4 0.2]\n [4.7 3.2 1.3 0.2]\n [4.6 3.1 1.5 0.2]\n [5. 3.6 1.4 0.2]\n [5.4 3.9 1.7 0.4]\n [4.6 3.4 1.4 0.3]\n [5. 3.4 1.5 0.2]\n [4.4 2.9 1.4 0.2]\n [4.9 3.1 1.5 0.1]\n [5.4 3.7 1.5 0.2]\n [4.8 3.4 1.6 0.2]\n [4.8 3. 1.4 0.1]\n [4.3 3. 1.1 0.1]\n [5.8 4. 1.2 0.2]\n [5.7 4.4 1.5 0.4]\n [5.4 3.9 1.3 0.4]\n [5.1 3.5 1.4 0.3]\n [5.7 3.8 1.7 0.3]\n [5.1 3.8 1.5 0.3]\n [5.4 3.4 1.7 0.2]\n [5.1 3.7 1.5 0.4]\n [4.6 3.6 1. 0.2]\n [5.1 3.3 1.7 0.5]\n [4.8 3.4 1.9 0.2]\n [5. 3. 1.6 0.2]\n [5. 3.4 1.6 0.4]\n [5.2 3.5 1.5 0.2]\n [5.2 3.4 1.4 0.2]\n [4.7 3.2 1.6 0.2]\n [4.8 3.1 1.6 0.2]\n [5.4 3.4 1.5 0.4]\n [5.2 4.1 1.5 0.1]\n [5.5 4.2 1.4 0.2]\n [4.9 3.1 1.5 0.2]\n [5. 3.2 1.2 0.2]\n [5.5 3.5 1.3 0.2]\n [4.9 3.6 1.4 0.1]\n [4.4 3. 1.3 0.2]\n [5.1 3.4 1.5 0.2]\n [5. 3.5 1.3 0.3]\n [4.5 2.3 1.3 0.3]\n [4.4 3.2 1.3 0.2]\n [5. 3.5 1.6 0.6]\n [5.1 3.8 1.9 0.4]\n [4.8 3. 1.4 0.3]\n [5.1 3.8 1.6 0.2]\n [4.6 3.2 1.4 0.2]\n [5.3 3.7 1.5 0.2]\n [5. 3.3 1.4 0.2]\n [7. 3.2 4.7 1.4]\n [6.4 3.2 4.5 1.5]\n [6.9 3.1 4.9 1.5]\n [5.5 2.3 4. 1.3]\n [6.5 2.8 4.6 1.5]\n [5.7 2.8 4.5 1.3]\n [6.3 3.3 4.7 1.6]\n [4.9 2.4 3.3 1. ]\n [6.6 2.9 4.6 1.3]\n [5.2 2.7 3.9 1.4]\n [5. 2. 3.5 1. ]\n [5.9 3. 4.2 1.5]\n [6. 2.2 4. 1. ]\n [6.1 2.9 4.7 1.4]\n [5.6 2.9 3.6 1.3]\n [6.7 3.1 4.4 1.4]\n [5.6 3. 4.5 1.5]\n [5.8 2.7 4.1 1. ]\n [6.2 2.2 4.5 1.5]\n [5.6 2.5 3.9 1.1]\n [5.9 3.2 4.8 1.8]\n [6.1 2.8 4. 1.3]\n [6.3 2.5 4.9 1.5]\n [6.1 2.8 4.7 1.2]\n [6.4 2.9 4.3 1.3]\n [6.6 3. 4.4 1.4]\n [6.8 2.8 4.8 1.4]\n [6.7 3. 5. 1.7]\n [6. 2.9 4.5 1.5]\n [5.7 2.6 3.5 1. ]\n [5.5 2.4 3.8 1.1]\n [5.5 2.4 3.7 1. ]\n [5.8 2.7 3.9 1.2]\n [6. 2.7 5.1 1.6]\n [5.4 3. 4.5 1.5]\n [6. 3.4 4.5 1.6]\n [6.7 3.1 4.7 1.5]\n [6.3 2.3 4.4 1.3]\n [5.6 3. 4.1 1.3]\n [5.5 2.5 4. 1.3]\n [5.5 2.6 4.4 1.2]\n [6.1 3. 4.6 1.4]\n [5.8 2.6 4. 1.2]\n [5. 2.3 3.3 1. ]\n [5.6 2.7 4.2 1.3]\n [5.7 3. 4.2 1.2]\n [5.7 2.9 4.2 1.3]\n [6.2 2.9 4.3 1.3]\n [5.1 2.5 3. 1.1]\n [5.7 2.8 4.1 1.3]\n [6.3 3.3 6. 2.5]\n [5.8 2.7 5.1 1.9]\n [7.1 3. 5.9 2.1]\n [6.3 2.9 5.6 1.8]\n [6.5 3. 5.8 2.2]\n [7.6 3. 6.6 2.1]\n [4.9 2.5 4.5 1.7]\n [7.3 2.9 6.3 1.8]\n [6.7 2.5 5.8 1.8]\n [7.2 3.6 6.1 2.5]\n [6.5 3.2 5.1 2. ]\n [6.4 2.7 5.3 1.9]\n [6.8 3. 5.5 2.1]\n [5.7 2.5 5. 2. ]\n [5.8 2.8 5.1 2.4]\n [6.4 3.2 5.3 2.3]\n [6.5 3. 5.5 1.8]\n [7.7 3.8 6.7 2.2]\n [7.7 2.6 6.9 2.3]\n [6. 2.2 5. 1.5]\n [6.9 3.2 5.7 2.3]\n [5.6 2.8 4.9 2. ]\n [7.7 2.8 6.7 2. ]\n [6.3 2.7 4.9 1.8]\n [6.7 3.3 5.7 2.1]\n [7.2 3.2 6. 1.8]\n [6.2 2.8 4.8 1.8]\n [6.1 3. 4.9 1.8]\n [6.4 2.8 5.6 2.1]\n [7.2 3. 5.8 1.6]\n [7.4 2.8 6.1 1.9]\n [7.9 3.8 6.4 2. ]\n [6.4 2.8 5.6 2.2]\n [6.3 2.8 5.1 1.5]\n [6.1 2.6 5.6 1.4]\n [7.7 3. 6.1 2.3]\n [6.3 3.4 5.6 2.4]\n [6.4 3.1 5.5 1.8]\n [6. 3. 4.8 1.8]\n [6.9 3.1 5.4 2.1]\n [6.7 3.1 5.6 2.4]\n [6.9 3.1 5.1 2.3]\n [5.8 2.7 5.1 1.9]\n [6.8 3.2 5.9 2.3]\n [6.7 3.3 5.7 2.5]\n [6.7 3. 5.2 2.3]\n [6.3 2.5 5. 1.9]\n [6.5 3. 5.2 2. ]\n [6.2 3.4 5.4 2.3]\n [5.9 3. 5.1 1.8]]\n"
],
[
"y=iris_data['target']\nprint(y)",
"[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n 2 2]\n"
],
[
"iris_data['data'].shape",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n#Random state digunakan untuk acak data 0 =false\n#we have assigned random_state as 0 so that each time when we run this ,we get the same output",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"X_test.shape",
"_____no_output_____"
],
[
"from sklearn.naive_bayes import GaussianNB\ngnb = GaussianNB()",
"_____no_output_____"
],
[
"gnb.fit(X_train,y_train)",
"_____no_output_____"
],
[
"sepel_len=eval(input(\"Enter the sepel length- \"))\nsepel_wid=eval(input(\"Enter the sepel width- \"))\npetal_len=eval(input(\"Enter the petal length- \"))\npetal_wid=eval(input(\"Enter the petal width- \"))\nX_new = np.array([[sepel_len,sepel_wid,petal_len,petal_wid]])",
"_____no_output_____"
],
[
"#calling predict method from knn for making prediction\nprediction = gnb.predict(X_new)\nprint(\"Predicted target name\",iris_data['target_names'][prediction])\n",
"Predicted target name ['virginica']\n"
],
[
"y_pred = gnb.predict(X_test)\nprint(\"Test set prediction:\\n {}\".format(y_pred))",
"Test set prediction:\n [2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0\n 1]\n"
],
[
"#accuracy\nprint('Test score is: {:.2f}'.format(gnb.score(X_test,y_test)))",
"Test score is: 1.00\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f1d80ee52322119f03a78c1f0997ed3b22a59
| 13,024 |
ipynb
|
Jupyter Notebook
|
notebooks/unsorted/mean_variance_probability_function.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 2 |
2018-05-03T09:41:03.000Z
|
2022-03-26T12:39:27.000Z
|
notebooks/unsorted/mean_variance_probability_function.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 1 |
2018-04-22T09:04:13.000Z
|
2018-04-22T09:04:13.000Z
|
notebooks/unsorted/mean_variance_probability_function.ipynb
|
alexlib/engineering_experiments_measurements_course
|
0b80d90519a2a72547ffd9ef4da2158530016196
|
[
"CC0-1.0"
] | 4 |
2015-07-02T11:39:57.000Z
|
2021-05-03T15:49:42.000Z
| 46.348754 | 7,810 | 0.755528 |
[
[
[
"# Statistical parameters using probability density function\n### Given probability density function, $p(x)$\n\n$ p = 2x/b^2$, $0 < x < b$\n\n### The mean value of $x$ is estimated analytically:\n$\\overline{x} = \\int\\limits_0^b x\\, p(x)\\, dx = \\int\\limits_0^b 2x^2/b^2 = \\left. 2x^3/3b^2\\right|_0^b =2b^3/3b^2 = 2b/3$\n\n\n### the median\nmedian: $ \\int\\limits_0^m p(x)\\,dx = 1/2 = \\int\\limits_0^m 2x/b^2\\,dx = \\left. x^2/b^2 \\right|_0^m = m^2/b^2 = 1/2$, $m = b/\\sqrt(2)$\n\n### the second moment\nsecond moment: $x^{(2)} = \\int\\limits_0^b x^2\\, p(x)\\, dx = \\int\\limits_0^b 2x^3/b^2 = \\left. x^4/2b^2\\right|_0^b =b^4/2b^2 = b^2/2$\n\n### the variance is the second moment less the squared mean value\n$var(x) = x^{(2)} - \\overline{x}^2 = b^2/2 - 4b^2/9 = b^2/18$\n\n",
"_____no_output_____"
]
],
[
[
"def p(x,b):\n return 2*x/(b**2)\n",
"_____no_output_____"
],
[
"b = 2\nx = linspace(0,b,200)\ny = p(x,b) ",
"_____no_output_____"
],
[
"plot(x,y)\nxlabel('$x$')\nylabel('$p(x)$')",
"_____no_output_____"
],
[
"# approximate using the numerical integration\nprint trapz(y*x,x)\nprint 2.*b/3",
"1.33335016793\n1.33333333333\n"
],
[
"print trapz(y*x**2,x)\nprint b^2/18",
"2.00005050378\n2\n"
],
[
"import sympy",
"_____no_output_____"
],
[
"sympy.var('x,b,p,m')\np = 2*x/b**2\nprint p",
"2*x/b**2\n"
],
[
"sympy.integrate(p*x,(x,0,b))",
"_____no_output_____"
],
[
"sympy.integrate(p*x**2,(x,0,b))",
"_____no_output_____"
],
[
"sympy.integrate(p,(x,0,m))",
"_____no_output_____"
],
[
"sympy.solve(m**2/b**2 - 0.5,m)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f2ac0c1290d43c2fee83ae44db72c203ea84b
| 199,129 |
ipynb
|
Jupyter Notebook
|
TensorFlow/unused/archived_notebooks/Kestrel+Model+50Dropout0.5+V2.ipynb
|
WenzelArifiandi/kestrel
|
6a1b044c1a1e1c591e86294f68e0b79c58dc98a1
|
[
"MIT"
] | 2 |
2021-05-11T06:21:28.000Z
|
2021-12-03T12:32:43.000Z
|
TensorFlow/unused/archived_notebooks/Kestrel+Model+50Dropout0.5+V2.ipynb
|
WenzelArifiandi/kestrel
|
6a1b044c1a1e1c591e86294f68e0b79c58dc98a1
|
[
"MIT"
] | 1 |
2021-11-10T20:14:56.000Z
|
2021-11-10T20:14:56.000Z
|
TensorFlow/unused/archived_notebooks/Kestrel+Model+50Dropout0.5+V2.ipynb
|
WenzelArifiandi/kestrel
|
6a1b044c1a1e1c591e86294f68e0b79c58dc98a1
|
[
"MIT"
] | 1 |
2021-05-16T12:27:20.000Z
|
2021-05-16T12:27:20.000Z
| 199,129 | 199,129 | 0.823215 |
[
[
[
"# Kestrel+Model\n### A [Bangkit 2021](https://grow.google/intl/id_id/bangkit/) Capstone Project\n\nKestrel is a TensorFlow powered American Sign Language translator Android app that will make it easier for anyone to seamlessly communicate with people who have vision or hearing impairments. The Kestrel model builds on the state of the art MobileNetV2 model that is optimized for speed and latency on smartphones to accurately recognize and interpret sign language from the phone’s camera and display the translation through a beautiful, convenient and easily accessible Android app.\n\n# American Sign Language \nFingerspelling alphabets\nfrom the [National Institute on Deafness and Other Communication Disorders (NIDCD)](https://www.nidcd.nih.gov/health/american-sign-language-fingerspelling-alphabets-image) \n\n<table>\n <tr><td>\n <img src=\"https://www.nidcd.nih.gov/sites/default/files/Content%20Images/NIDCD-ASL-hands-2019_large.jpg\"\n alt=\"Fashion MNIST sprite\" width=\"600\">\n </td></tr>\n <tr><td align=\"center\">\n <b>Figure 1.</b> <a href=\"https://www.nidcd.nih.gov/health/american-sign-language-fingerspelling-alphabets-image\">ASL Fingerspelling Alphabets</a> <br/> \n </td></tr>\n</table>",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
]
],
[
[
"# Initial setup",
"_____no_output_____"
]
],
[
[
"try:\n %tensorflow_version 2.x\nexcept:\n pass\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nimport PIL\nimport PIL.Image\nfrom os import listdir\n\nimport pathlib\n\nfrom tqdm import tqdm\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\n\nprint(\"\\u2022 Using TensorFlow Version:\", tf.__version__)\nprint(\"\\u2022 Using TensorFlow Hub Version: \", hub.__version__)\nprint('\\u2022 GPU Device Found.' if tf.config.list_physical_devices('GPU') else '\\u2022 GPU Device Not Found. Running on CPU')\n",
"• Using TensorFlow Version: 2.4.1\n• Using TensorFlow Hub Version: 0.9.0\n• GPU Device Found.\n"
]
],
[
[
"# Data preprocessing",
"_____no_output_____"
],
[
"### (Optional) Unzip file on Google Drive",
"_____no_output_____"
]
],
[
[
"import zipfile\nimport pathlib\nzip_dir = pathlib.Path('/content/drive/Shareddrives/Kestrel/A - Copy.zip')\nunzip_dir = pathlib.Path('/content/drive/Shareddrives/Kestrel/A_Unzipped')\nwith zipfile.ZipFile(zip_dir, 'r') as zip_ref:\n zip_ref.extractall(unzip_dir)",
"_____no_output_____"
]
],
[
[
"### Loading images from directory",
"_____no_output_____"
]
],
[
[
"data_dir = pathlib.Path('/Dev/A')",
"_____no_output_____"
]
],
[
[
"### (Optional) Counting the number of images in the dataset",
"_____no_output_____"
]
],
[
[
"image_count = len(list(data_dir.glob('*/color*.png')))\nprint(image_count)",
"12547\n"
]
],
[
[
"### (Optional) Displaying one of the \"a\" letter sign language image:",
"_____no_output_____"
]
],
[
[
"two = list(data_dir.glob('*/color*.png'))\nPIL.Image.open(str(two[0]))",
"_____no_output_____"
]
],
[
[
"# Create the dataset",
"_____no_output_____"
],
[
"Loading the images off disk using [image_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory). Define some parameters for the loader:",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 30\nIMG_SIZE = (160, 160)",
"_____no_output_____"
]
],
[
[
"### Coursera method using ImageDataGenerator",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\ntrain_generator = ImageDataGenerator(\n rescale = 1./255,\n\t rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest',\n validation_split=0.2)\nvalidation_generator = ImageDataGenerator(\n rescale = 1./255,\n validation_split=0.2)\ntrain_dataset = train_generator.flow_from_directory(data_dir,\n batch_size = BATCH_SIZE,\n class_mode = 'categorical',\n subset='training', \n target_size = IMG_SIZE,\n shuffle=True,\n )\nvalidation_dataset = validation_generator.flow_from_directory(data_dir,\n batch_size = BATCH_SIZE,\n class_mode = 'categorical',\n subset='validation', \n target_size = IMG_SIZE,\n shuffle=True,\n )",
"Found 10046 images belonging to 24 classes.\nFound 2501 images belonging to 24 classes.\n"
]
],
[
[
"Splitting images for training and validation",
"_____no_output_____"
],
[
"### (Optional) Visualize the data",
"_____no_output_____"
],
[
"Show the first 9 images and labels from the training set:",
"_____no_output_____"
]
],
[
[
"#@title Showing 9 images\nplt.figure(figsize=(10, 10))\nfor images, labels in train_dataset.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")",
"_____no_output_____"
],
[
"for image_batch, labels_batch in train_dataset:\n print(image_batch.shape)\n print(labels_batch.shape)\n break",
"(30, 160, 160, 3)\n(30, 24)\n"
]
],
[
[
"### (Deprecated) Create a test set",
"_____no_output_____"
],
[
"To create a Test Set, determine how many batches of data are available in the validation set using ```tf.data.experimental.cardinality```, then move 20% of them to a test set.",
"_____no_output_____"
]
],
[
[
"validation_batches = tf.data.experimental.cardinality(validation_dataset)\ntest_dataset = validation_dataset.take(validation_batches // 5)\nvalidation_dataset = validation_dataset.skip(validation_batches // 5)\nprint('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))\nprint('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))",
"Number of validation batches: 64\nNumber of test batches: 15\n"
]
],
[
[
"### Configure the dataset for performance",
"_____no_output_____"
],
[
"Use buffered prefetching to load images from disk without having I/O become blocking. To learn more about this method see the [data performance](https://www.tensorflow.org/guide/data_performance) guide.",
"_____no_output_____"
]
],
[
[
"AUTOTUNE = tf.data.AUTOTUNE\n\ntrain_dataset = train_dataset.cache().prefetch(buffer_size=AUTOTUNE)\nvalidation_dataset = validation_dataset.cache().prefetch(buffer_size=AUTOTUNE)\n# test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"# Create the model",
"_____no_output_____"
],
[
"### Create the base model from the pre-trained convnets \nYou will create the base model from the **MobileNet V2** model developed at Google. This is pre-trained on the ImageNet dataset, a large dataset consisting of 1.4M images and 1000 classes. ImageNet is a research training dataset with a wide variety of categories like `jackfruit` and `syringe`. This base of knowledge will help us classify cats and dogs from our specific dataset.\n\nFirst, you need to pick which layer of MobileNet V2 you will use for feature extraction. The very last classification layer (on \"top\", as most diagrams of machine learning models go from bottom to top) is not very useful. Instead, you will follow the common practice to depend on the very last layer before the flatten operation. This layer is called the \"bottleneck layer\". The bottleneck layer features retain more generality as compared to the final/top layer.\n\nFirst, instantiate a MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the **include_top=False** argument, you load a network that doesn't include the classification layers at the top, which is ideal for feature extraction.",
"_____no_output_____"
]
],
[
[
"# Create the base model from the pre-trained model MobileNet V2\nIMG_SHAPE = IMG_SIZE + (3,)\nbase_model = tf.keras.applications.MobileNetV2(input_shape=(160, 160, 3),\n include_top=False,\n weights='imagenet')",
"_____no_output_____"
]
],
[
[
"This feature extractor converts each `224 x 224` image into a `7x7x1280` block of features. Let's see what it does to an example batch of images:",
"_____no_output_____"
]
],
[
[
"image_batch, label_batch = next(iter(train_dataset))\nfeature_batch = base_model(image_batch)\nprint(feature_batch.shape)",
"(30, 5, 5, 1280)\n"
]
],
[
[
"### Freeze the convolutional base\nIn this step, you will freeze the convolutional base created from the previous step and to use as a feature extractor. Additionally, you add a classifier on top of it and train the top-level classifier.\n\nIt is important to freeze the convolutional base before you compile and train the model. Freezing (by setting layer.trainable = False) prevents the weights in a given layer from being updated during training. MobileNet V2 has many layers, so setting the entire model's `trainable` flag to False will freeze all of them.",
"_____no_output_____"
]
],
[
[
"base_model.trainable = False",
"_____no_output_____"
],
[
"# Let's take a look at the base model architecture\nbase_model.summary()",
"Model: \"mobilenetv2_1.00_160\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 160, 160, 3) 0 \n__________________________________________________________________________________________________\nConv1 (Conv2D) (None, 80, 80, 32) 864 input_1[0][0] \n__________________________________________________________________________________________________\nbn_Conv1 (BatchNormalization) (None, 80, 80, 32) 128 Conv1[0][0] \n__________________________________________________________________________________________________\nConv1_relu (ReLU) (None, 80, 80, 32) 0 bn_Conv1[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise (Depthw (None, 80, 80, 32) 288 Conv1_relu[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_BN (Bat (None, 80, 80, 32) 128 expanded_conv_depthwise[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_relu (R (None, 80, 80, 32) 0 expanded_conv_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_project (Conv2D) (None, 80, 80, 16) 512 expanded_conv_depthwise_relu[0][0\n__________________________________________________________________________________________________\nexpanded_conv_project_BN (Batch (None, 80, 80, 16) 64 expanded_conv_project[0][0] \n__________________________________________________________________________________________________\nblock_1_expand (Conv2D) (None, 80, 80, 96) 1536 expanded_conv_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_BN (BatchNormali (None, 80, 80, 96) 384 block_1_expand[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_relu (ReLU) (None, 80, 80, 96) 0 block_1_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_pad (ZeroPadding2D) (None, 81, 81, 96) 0 block_1_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise (DepthwiseCon (None, 40, 40, 96) 864 block_1_pad[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_BN (BatchNorm (None, 40, 40, 96) 384 block_1_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_relu (ReLU) (None, 40, 40, 96) 0 block_1_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_project (Conv2D) (None, 40, 40, 24) 2304 block_1_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_project_BN (BatchNormal (None, 40, 40, 24) 96 block_1_project[0][0] \n__________________________________________________________________________________________________\nblock_2_expand (Conv2D) (None, 40, 40, 144) 3456 block_1_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_2_expand[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_relu (ReLU) (None, 40, 40, 144) 0 block_2_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise (DepthwiseCon (None, 40, 40, 144) 1296 block_2_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_BN (BatchNorm (None, 40, 40, 144) 576 block_2_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_relu (ReLU) (None, 40, 40, 144) 0 block_2_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_project (Conv2D) (None, 40, 40, 24) 3456 block_2_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_project_BN (BatchNormal (None, 40, 40, 24) 96 block_2_project[0][0] \n__________________________________________________________________________________________________\nblock_2_add (Add) (None, 40, 40, 24) 0 block_1_project_BN[0][0] \n block_2_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_expand (Conv2D) (None, 40, 40, 144) 3456 block_2_add[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_3_expand[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_relu (ReLU) (None, 40, 40, 144) 0 block_3_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_pad (ZeroPadding2D) (None, 41, 41, 144) 0 block_3_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise (DepthwiseCon (None, 20, 20, 144) 1296 block_3_pad[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_BN (BatchNorm (None, 20, 20, 144) 576 block_3_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_relu (ReLU) (None, 20, 20, 144) 0 block_3_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_project (Conv2D) (None, 20, 20, 32) 4608 block_3_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_project_BN (BatchNormal (None, 20, 20, 32) 128 block_3_project[0][0] \n__________________________________________________________________________________________________\nblock_4_expand (Conv2D) (None, 20, 20, 192) 6144 block_3_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_4_expand[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_relu (ReLU) (None, 20, 20, 192) 0 block_4_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_4_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_4_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_4_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_project (Conv2D) (None, 20, 20, 32) 6144 block_4_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_project_BN (BatchNormal (None, 20, 20, 32) 128 block_4_project[0][0] \n__________________________________________________________________________________________________\nblock_4_add (Add) (None, 20, 20, 32) 0 block_3_project_BN[0][0] \n block_4_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_expand (Conv2D) (None, 20, 20, 192) 6144 block_4_add[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_5_expand[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_relu (ReLU) (None, 20, 20, 192) 0 block_5_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_5_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_5_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_5_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_project (Conv2D) (None, 20, 20, 32) 6144 block_5_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_project_BN (BatchNormal (None, 20, 20, 32) 128 block_5_project[0][0] \n__________________________________________________________________________________________________\nblock_5_add (Add) (None, 20, 20, 32) 0 block_4_add[0][0] \n block_5_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_expand (Conv2D) (None, 20, 20, 192) 6144 block_5_add[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_6_expand[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_relu (ReLU) (None, 20, 20, 192) 0 block_6_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_pad (ZeroPadding2D) (None, 21, 21, 192) 0 block_6_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise (DepthwiseCon (None, 10, 10, 192) 1728 block_6_pad[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_BN (BatchNorm (None, 10, 10, 192) 768 block_6_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_relu (ReLU) (None, 10, 10, 192) 0 block_6_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_project (Conv2D) (None, 10, 10, 64) 12288 block_6_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_project_BN (BatchNormal (None, 10, 10, 64) 256 block_6_project[0][0] \n__________________________________________________________________________________________________\nblock_7_expand (Conv2D) (None, 10, 10, 384) 24576 block_6_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_7_expand[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_relu (ReLU) (None, 10, 10, 384) 0 block_7_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_7_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_7_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_7_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_project (Conv2D) (None, 10, 10, 64) 24576 block_7_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_project_BN (BatchNormal (None, 10, 10, 64) 256 block_7_project[0][0] \n__________________________________________________________________________________________________\nblock_7_add (Add) (None, 10, 10, 64) 0 block_6_project_BN[0][0] \n block_7_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_expand (Conv2D) (None, 10, 10, 384) 24576 block_7_add[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_8_expand[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_relu (ReLU) (None, 10, 10, 384) 0 block_8_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_8_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_8_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_8_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_project (Conv2D) (None, 10, 10, 64) 24576 block_8_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_project_BN (BatchNormal (None, 10, 10, 64) 256 block_8_project[0][0] \n__________________________________________________________________________________________________\nblock_8_add (Add) (None, 10, 10, 64) 0 block_7_add[0][0] \n block_8_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_expand (Conv2D) (None, 10, 10, 384) 24576 block_8_add[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_9_expand[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_relu (ReLU) (None, 10, 10, 384) 0 block_9_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_9_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_9_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_9_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_project (Conv2D) (None, 10, 10, 64) 24576 block_9_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_project_BN (BatchNormal (None, 10, 10, 64) 256 block_9_project[0][0] \n__________________________________________________________________________________________________\nblock_9_add (Add) (None, 10, 10, 64) 0 block_8_add[0][0] \n block_9_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_expand (Conv2D) (None, 10, 10, 384) 24576 block_9_add[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_BN (BatchNormal (None, 10, 10, 384) 1536 block_10_expand[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_relu (ReLU) (None, 10, 10, 384) 0 block_10_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise (DepthwiseCo (None, 10, 10, 384) 3456 block_10_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_BN (BatchNor (None, 10, 10, 384) 1536 block_10_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_10_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_project (Conv2D) (None, 10, 10, 96) 36864 block_10_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_project_BN (BatchNorma (None, 10, 10, 96) 384 block_10_project[0][0] \n__________________________________________________________________________________________________\nblock_11_expand (Conv2D) (None, 10, 10, 576) 55296 block_10_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_11_expand[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_relu (ReLU) (None, 10, 10, 576) 0 block_11_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_11_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_11_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_11_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_project (Conv2D) (None, 10, 10, 96) 55296 block_11_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_project_BN (BatchNorma (None, 10, 10, 96) 384 block_11_project[0][0] \n__________________________________________________________________________________________________\nblock_11_add (Add) (None, 10, 10, 96) 0 block_10_project_BN[0][0] \n block_11_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_expand (Conv2D) (None, 10, 10, 576) 55296 block_11_add[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_12_expand[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_relu (ReLU) (None, 10, 10, 576) 0 block_12_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_12_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_12_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_12_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_project (Conv2D) (None, 10, 10, 96) 55296 block_12_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_project_BN (BatchNorma (None, 10, 10, 96) 384 block_12_project[0][0] \n__________________________________________________________________________________________________\nblock_12_add (Add) (None, 10, 10, 96) 0 block_11_add[0][0] \n block_12_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_expand (Conv2D) (None, 10, 10, 576) 55296 block_12_add[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_13_expand[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_relu (ReLU) (None, 10, 10, 576) 0 block_13_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_pad (ZeroPadding2D) (None, 11, 11, 576) 0 block_13_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise (DepthwiseCo (None, 5, 5, 576) 5184 block_13_pad[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_BN (BatchNor (None, 5, 5, 576) 2304 block_13_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_relu (ReLU) (None, 5, 5, 576) 0 block_13_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_project (Conv2D) (None, 5, 5, 160) 92160 block_13_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_project_BN (BatchNorma (None, 5, 5, 160) 640 block_13_project[0][0] \n__________________________________________________________________________________________________\nblock_14_expand (Conv2D) (None, 5, 5, 960) 153600 block_13_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_14_expand[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_relu (ReLU) (None, 5, 5, 960) 0 block_14_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_14_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_14_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_14_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_project (Conv2D) (None, 5, 5, 160) 153600 block_14_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_project_BN (BatchNorma (None, 5, 5, 160) 640 block_14_project[0][0] \n__________________________________________________________________________________________________\nblock_14_add (Add) (None, 5, 5, 160) 0 block_13_project_BN[0][0] \n block_14_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_expand (Conv2D) (None, 5, 5, 960) 153600 block_14_add[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_15_expand[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_relu (ReLU) (None, 5, 5, 960) 0 block_15_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_15_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_15_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_15_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_project (Conv2D) (None, 5, 5, 160) 153600 block_15_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_project_BN (BatchNorma (None, 5, 5, 160) 640 block_15_project[0][0] \n__________________________________________________________________________________________________\nblock_15_add (Add) (None, 5, 5, 160) 0 block_14_add[0][0] \n block_15_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_expand (Conv2D) (None, 5, 5, 960) 153600 block_15_add[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_16_expand[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_relu (ReLU) (None, 5, 5, 960) 0 block_16_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_16_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_16_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_16_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_project (Conv2D) (None, 5, 5, 320) 307200 block_16_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_project_BN (BatchNorma (None, 5, 5, 320) 1280 block_16_project[0][0] \n__________________________________________________________________________________________________\nConv_1 (Conv2D) (None, 5, 5, 1280) 409600 block_16_project_BN[0][0] \n__________________________________________________________________________________________________\nConv_1_bn (BatchNormalization) (None, 5, 5, 1280) 5120 Conv_1[0][0] \n__________________________________________________________________________________________________\nout_relu (ReLU) (None, 5, 5, 1280) 0 Conv_1_bn[0][0] \n==================================================================================================\nTotal params: 2,257,984\nTrainable params: 0\nNon-trainable params: 2,257,984\n__________________________________________________________________________________________________\n"
]
],
[
[
"### Adding new layer to the model",
"_____no_output_____"
]
],
[
[
"last_layer = base_model.get_layer('out_relu')\nprint('last layer output shape: ', last_layer.output_shape)\nlast_output = last_layer.output",
"last layer output shape: (None, 5, 5, 1280)\n"
],
[
"from tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n# Flatten the output layer to 1 dimension\nx = layers.Flatten()(last_output)\n# Add a dropout rate of 0.5\n# x = layers.Dropout(0.5)(x) \n# Add a fully connected layer with 1,024 hidden units and ReLU activation\n# x = layers.Dense(1024, activation='relu', kernel_regularizer='l2')(x)\nx = layers.Dense(1024, activation='relu')(x)\n# Add a dropout rate of 0.5\nx = layers.Dropout(0.5)(x) \n# Add a final layer for classification\nx = layers.Dense (24, activation='softmax')(x) \n\nmodel = Model( base_model.input, x) \nmodel.summary()\n",
"Model: \"model\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 160, 160, 3) 0 \n__________________________________________________________________________________________________\nConv1 (Conv2D) (None, 80, 80, 32) 864 input_1[0][0] \n__________________________________________________________________________________________________\nbn_Conv1 (BatchNormalization) (None, 80, 80, 32) 128 Conv1[0][0] \n__________________________________________________________________________________________________\nConv1_relu (ReLU) (None, 80, 80, 32) 0 bn_Conv1[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise (Depthw (None, 80, 80, 32) 288 Conv1_relu[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_BN (Bat (None, 80, 80, 32) 128 expanded_conv_depthwise[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_relu (R (None, 80, 80, 32) 0 expanded_conv_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_project (Conv2D) (None, 80, 80, 16) 512 expanded_conv_depthwise_relu[0][0\n__________________________________________________________________________________________________\nexpanded_conv_project_BN (Batch (None, 80, 80, 16) 64 expanded_conv_project[0][0] \n__________________________________________________________________________________________________\nblock_1_expand (Conv2D) (None, 80, 80, 96) 1536 expanded_conv_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_BN (BatchNormali (None, 80, 80, 96) 384 block_1_expand[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_relu (ReLU) (None, 80, 80, 96) 0 block_1_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_pad (ZeroPadding2D) (None, 81, 81, 96) 0 block_1_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise (DepthwiseCon (None, 40, 40, 96) 864 block_1_pad[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_BN (BatchNorm (None, 40, 40, 96) 384 block_1_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_relu (ReLU) (None, 40, 40, 96) 0 block_1_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_project (Conv2D) (None, 40, 40, 24) 2304 block_1_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_project_BN (BatchNormal (None, 40, 40, 24) 96 block_1_project[0][0] \n__________________________________________________________________________________________________\nblock_2_expand (Conv2D) (None, 40, 40, 144) 3456 block_1_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_2_expand[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_relu (ReLU) (None, 40, 40, 144) 0 block_2_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise (DepthwiseCon (None, 40, 40, 144) 1296 block_2_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_BN (BatchNorm (None, 40, 40, 144) 576 block_2_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_relu (ReLU) (None, 40, 40, 144) 0 block_2_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_project (Conv2D) (None, 40, 40, 24) 3456 block_2_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_project_BN (BatchNormal (None, 40, 40, 24) 96 block_2_project[0][0] \n__________________________________________________________________________________________________\nblock_2_add (Add) (None, 40, 40, 24) 0 block_1_project_BN[0][0] \n block_2_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_expand (Conv2D) (None, 40, 40, 144) 3456 block_2_add[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_BN (BatchNormali (None, 40, 40, 144) 576 block_3_expand[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_relu (ReLU) (None, 40, 40, 144) 0 block_3_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_pad (ZeroPadding2D) (None, 41, 41, 144) 0 block_3_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise (DepthwiseCon (None, 20, 20, 144) 1296 block_3_pad[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_BN (BatchNorm (None, 20, 20, 144) 576 block_3_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_relu (ReLU) (None, 20, 20, 144) 0 block_3_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_project (Conv2D) (None, 20, 20, 32) 4608 block_3_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_project_BN (BatchNormal (None, 20, 20, 32) 128 block_3_project[0][0] \n__________________________________________________________________________________________________\nblock_4_expand (Conv2D) (None, 20, 20, 192) 6144 block_3_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_4_expand[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_relu (ReLU) (None, 20, 20, 192) 0 block_4_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_4_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_4_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_4_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_project (Conv2D) (None, 20, 20, 32) 6144 block_4_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_project_BN (BatchNormal (None, 20, 20, 32) 128 block_4_project[0][0] \n__________________________________________________________________________________________________\nblock_4_add (Add) (None, 20, 20, 32) 0 block_3_project_BN[0][0] \n block_4_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_expand (Conv2D) (None, 20, 20, 192) 6144 block_4_add[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_5_expand[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_relu (ReLU) (None, 20, 20, 192) 0 block_5_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise (DepthwiseCon (None, 20, 20, 192) 1728 block_5_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_BN (BatchNorm (None, 20, 20, 192) 768 block_5_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_relu (ReLU) (None, 20, 20, 192) 0 block_5_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_project (Conv2D) (None, 20, 20, 32) 6144 block_5_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_project_BN (BatchNormal (None, 20, 20, 32) 128 block_5_project[0][0] \n__________________________________________________________________________________________________\nblock_5_add (Add) (None, 20, 20, 32) 0 block_4_add[0][0] \n block_5_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_expand (Conv2D) (None, 20, 20, 192) 6144 block_5_add[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_BN (BatchNormali (None, 20, 20, 192) 768 block_6_expand[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_relu (ReLU) (None, 20, 20, 192) 0 block_6_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_pad (ZeroPadding2D) (None, 21, 21, 192) 0 block_6_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise (DepthwiseCon (None, 10, 10, 192) 1728 block_6_pad[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_BN (BatchNorm (None, 10, 10, 192) 768 block_6_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_relu (ReLU) (None, 10, 10, 192) 0 block_6_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_project (Conv2D) (None, 10, 10, 64) 12288 block_6_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_project_BN (BatchNormal (None, 10, 10, 64) 256 block_6_project[0][0] \n__________________________________________________________________________________________________\nblock_7_expand (Conv2D) (None, 10, 10, 384) 24576 block_6_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_7_expand[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_relu (ReLU) (None, 10, 10, 384) 0 block_7_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_7_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_7_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_7_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_project (Conv2D) (None, 10, 10, 64) 24576 block_7_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_project_BN (BatchNormal (None, 10, 10, 64) 256 block_7_project[0][0] \n__________________________________________________________________________________________________\nblock_7_add (Add) (None, 10, 10, 64) 0 block_6_project_BN[0][0] \n block_7_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_expand (Conv2D) (None, 10, 10, 384) 24576 block_7_add[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_8_expand[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_relu (ReLU) (None, 10, 10, 384) 0 block_8_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_8_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_8_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_8_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_project (Conv2D) (None, 10, 10, 64) 24576 block_8_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_project_BN (BatchNormal (None, 10, 10, 64) 256 block_8_project[0][0] \n__________________________________________________________________________________________________\nblock_8_add (Add) (None, 10, 10, 64) 0 block_7_add[0][0] \n block_8_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_expand (Conv2D) (None, 10, 10, 384) 24576 block_8_add[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_BN (BatchNormali (None, 10, 10, 384) 1536 block_9_expand[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_relu (ReLU) (None, 10, 10, 384) 0 block_9_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise (DepthwiseCon (None, 10, 10, 384) 3456 block_9_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_BN (BatchNorm (None, 10, 10, 384) 1536 block_9_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_9_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_project (Conv2D) (None, 10, 10, 64) 24576 block_9_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_project_BN (BatchNormal (None, 10, 10, 64) 256 block_9_project[0][0] \n__________________________________________________________________________________________________\nblock_9_add (Add) (None, 10, 10, 64) 0 block_8_add[0][0] \n block_9_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_expand (Conv2D) (None, 10, 10, 384) 24576 block_9_add[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_BN (BatchNormal (None, 10, 10, 384) 1536 block_10_expand[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_relu (ReLU) (None, 10, 10, 384) 0 block_10_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise (DepthwiseCo (None, 10, 10, 384) 3456 block_10_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_BN (BatchNor (None, 10, 10, 384) 1536 block_10_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_relu (ReLU) (None, 10, 10, 384) 0 block_10_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_project (Conv2D) (None, 10, 10, 96) 36864 block_10_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_project_BN (BatchNorma (None, 10, 10, 96) 384 block_10_project[0][0] \n__________________________________________________________________________________________________\nblock_11_expand (Conv2D) (None, 10, 10, 576) 55296 block_10_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_11_expand[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_relu (ReLU) (None, 10, 10, 576) 0 block_11_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_11_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_11_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_11_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_project (Conv2D) (None, 10, 10, 96) 55296 block_11_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_project_BN (BatchNorma (None, 10, 10, 96) 384 block_11_project[0][0] \n__________________________________________________________________________________________________\nblock_11_add (Add) (None, 10, 10, 96) 0 block_10_project_BN[0][0] \n block_11_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_expand (Conv2D) (None, 10, 10, 576) 55296 block_11_add[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_12_expand[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_relu (ReLU) (None, 10, 10, 576) 0 block_12_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise (DepthwiseCo (None, 10, 10, 576) 5184 block_12_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_BN (BatchNor (None, 10, 10, 576) 2304 block_12_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_relu (ReLU) (None, 10, 10, 576) 0 block_12_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_project (Conv2D) (None, 10, 10, 96) 55296 block_12_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_project_BN (BatchNorma (None, 10, 10, 96) 384 block_12_project[0][0] \n__________________________________________________________________________________________________\nblock_12_add (Add) (None, 10, 10, 96) 0 block_11_add[0][0] \n block_12_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_expand (Conv2D) (None, 10, 10, 576) 55296 block_12_add[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_BN (BatchNormal (None, 10, 10, 576) 2304 block_13_expand[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_relu (ReLU) (None, 10, 10, 576) 0 block_13_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_pad (ZeroPadding2D) (None, 11, 11, 576) 0 block_13_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise (DepthwiseCo (None, 5, 5, 576) 5184 block_13_pad[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_BN (BatchNor (None, 5, 5, 576) 2304 block_13_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_relu (ReLU) (None, 5, 5, 576) 0 block_13_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_project (Conv2D) (None, 5, 5, 160) 92160 block_13_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_project_BN (BatchNorma (None, 5, 5, 160) 640 block_13_project[0][0] \n__________________________________________________________________________________________________\nblock_14_expand (Conv2D) (None, 5, 5, 960) 153600 block_13_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_14_expand[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_relu (ReLU) (None, 5, 5, 960) 0 block_14_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_14_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_14_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_14_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_project (Conv2D) (None, 5, 5, 160) 153600 block_14_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_project_BN (BatchNorma (None, 5, 5, 160) 640 block_14_project[0][0] \n__________________________________________________________________________________________________\nblock_14_add (Add) (None, 5, 5, 160) 0 block_13_project_BN[0][0] \n block_14_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_expand (Conv2D) (None, 5, 5, 960) 153600 block_14_add[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_15_expand[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_relu (ReLU) (None, 5, 5, 960) 0 block_15_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_15_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_15_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_15_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_project (Conv2D) (None, 5, 5, 160) 153600 block_15_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_project_BN (BatchNorma (None, 5, 5, 160) 640 block_15_project[0][0] \n__________________________________________________________________________________________________\nblock_15_add (Add) (None, 5, 5, 160) 0 block_14_add[0][0] \n block_15_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_expand (Conv2D) (None, 5, 5, 960) 153600 block_15_add[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_BN (BatchNormal (None, 5, 5, 960) 3840 block_16_expand[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_relu (ReLU) (None, 5, 5, 960) 0 block_16_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise (DepthwiseCo (None, 5, 5, 960) 8640 block_16_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_BN (BatchNor (None, 5, 5, 960) 3840 block_16_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_relu (ReLU) (None, 5, 5, 960) 0 block_16_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_project (Conv2D) (None, 5, 5, 320) 307200 block_16_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_project_BN (BatchNorma (None, 5, 5, 320) 1280 block_16_project[0][0] \n__________________________________________________________________________________________________\nConv_1 (Conv2D) (None, 5, 5, 1280) 409600 block_16_project_BN[0][0] \n__________________________________________________________________________________________________\nConv_1_bn (BatchNormalization) (None, 5, 5, 1280) 5120 Conv_1[0][0] \n__________________________________________________________________________________________________\nout_relu (ReLU) (None, 5, 5, 1280) 0 Conv_1_bn[0][0] \n__________________________________________________________________________________________________\nflatten (Flatten) (None, 32000) 0 out_relu[0][0] \n__________________________________________________________________________________________________\ndense (Dense) (None, 1024) 32769024 flatten[0][0] \n__________________________________________________________________________________________________\ndropout (Dropout) (None, 1024) 0 dense[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 24) 24600 dropout[0][0] \n==================================================================================================\nTotal params: 35,051,608\nTrainable params: 32,793,624\nNon-trainable params: 2,257,984\n__________________________________________________________________________________________________\n"
],
[
"# !pip install scipy",
"_____no_output_____"
]
],
[
[
"### Training the model",
"_____no_output_____"
]
],
[
[
"checkpoint_path = \"TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5/cp.ckpt\"",
"_____no_output_____"
],
[
"import os\n# base_learning_rate = 0.0001\ndef get_uncompiled_model():\n model = Model( base_model.input, x) \n return model\n\ndef get_compiled_model():\n model = get_uncompiled_model()\n model.compile(\n optimizer=\"rmsprop\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"],\n )\n return model\n\ncheckpoint_dir = os.path.dirname(checkpoint_path)\nif not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\ndef make_or_restore_model():\n # Either restore the latest model, or create a fresh one\n # if there is no checkpoint available.\n checkpoints = [checkpoint_dir + \"/\" + name for name in os.listdir(checkpoint_dir)]\n if checkpoints:\n latest_checkpoint = max(checkpoints, key=os.path.getctime)\n print(\"Restoring from\", latest_checkpoint)\n #return tf.keras.models.load_model(latest_checkpoint)\n model = Model( base_model.input, x) \n model.load_weights(checkpoint_path)\n model.compile(\n optimizer=\"rmsprop\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"],\n )\n return model\n print(\"Creating a new model\")\n return get_compiled_model()\n\n# Create a callback that saves the model's weights\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n monitor='val_accuracy',\n mode='auto',\n save_best_only=True, # Only save a model if `val_loss` has improved.\n verbose=1)\n\nearly_callbacks = [\n tf.keras.callbacks.EarlyStopping(\n # Stop training when `val_loss` is no longer improving\n monitor=\"val_accuracy\",\n # \"no longer improving\" being defined as \"no better than 1e-2 less\"\n # min_delta=1e-2,\n # \"no longer improving\" being further defined as \"for at least 2 epochs\"\n patience=30,\n verbose=1,\n )\n]\n\nmodel = make_or_restore_model()\nhistory = model.fit(train_dataset, \n epochs=50, \n validation_data = validation_dataset, \n verbose = 1, \n callbacks=[model_checkpoint_callback, early_callbacks])# Pass callback to training\n\n# This may generate warnings related to saving the state of the optimizer.\n# These warnings (and similar warnings throughout this notebook)\n# are in place to discourage outdated usage, and can be ignored.\n\n# # EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. \n# export_dir = 'saved_model/2'\n\n# # YOUR CODE HERE\n# tf.saved_model.save(model, export_dir)",
"Restoring from TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5/checkpoint\nEpoch 1/50\n335/335 [==============================] - 81s 231ms/step - loss: 1.2344 - accuracy: 0.7391 - val_loss: 2.8626 - val_accuracy: 0.6062\n\nEpoch 00001: val_accuracy improved from -inf to 0.60616, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 2/50\n335/335 [==============================] - 86s 257ms/step - loss: 1.3046 - accuracy: 0.7344 - val_loss: 2.4742 - val_accuracy: 0.6206\n\nEpoch 00002: val_accuracy improved from 0.60616 to 0.62055, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 3/50\n335/335 [==============================] - 94s 280ms/step - loss: 1.2807 - accuracy: 0.7303 - val_loss: 1.9004 - val_accuracy: 0.6625\n\nEpoch 00003: val_accuracy improved from 0.62055 to 0.66254, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 4/50\n335/335 [==============================] - 100s 299ms/step - loss: 1.2427 - accuracy: 0.7548 - val_loss: 2.2709 - val_accuracy: 0.6669\n\nEpoch 00004: val_accuracy improved from 0.66254 to 0.66693, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 5/50\n335/335 [==============================] - 108s 323ms/step - loss: 1.2090 - accuracy: 0.7513 - val_loss: 2.6979 - val_accuracy: 0.6249\n\nEpoch 00005: val_accuracy did not improve from 0.66693\nEpoch 6/50\n335/335 [==============================] - 121s 360ms/step - loss: 1.2392 - accuracy: 0.7494 - val_loss: 2.8215 - val_accuracy: 0.6645\n\nEpoch 00006: val_accuracy did not improve from 0.66693\nEpoch 7/50\n335/335 [==============================] - 133s 397ms/step - loss: 1.2738 - accuracy: 0.7529 - val_loss: 3.2927 - val_accuracy: 0.5914\n\nEpoch 00007: val_accuracy did not improve from 0.66693\nEpoch 8/50\n335/335 [==============================] - 136s 405ms/step - loss: 1.2660 - accuracy: 0.7622 - val_loss: 2.3953 - val_accuracy: 0.6457\n\nEpoch 00008: val_accuracy did not improve from 0.66693\nEpoch 9/50\n335/335 [==============================] - 159s 474ms/step - loss: 1.3314 - accuracy: 0.7521 - val_loss: 2.4619 - val_accuracy: 0.6845\n\nEpoch 00009: val_accuracy improved from 0.66693 to 0.68453, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 10/50\n335/335 [==============================] - 149s 445ms/step - loss: 1.2877 - accuracy: 0.7585 - val_loss: 2.7570 - val_accuracy: 0.6485\n\nEpoch 00010: val_accuracy did not improve from 0.68453\nEpoch 11/50\n335/335 [==============================] - 151s 452ms/step - loss: 1.2726 - accuracy: 0.7599 - val_loss: 2.3454 - val_accuracy: 0.7217\n\nEpoch 00011: val_accuracy improved from 0.68453 to 0.72171, saving model to TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5\\cp.ckpt\nEpoch 12/50\n335/335 [==============================] - 151s 451ms/step - loss: 1.3083 - accuracy: 0.7565 - val_loss: 3.8821 - val_accuracy: 0.6357\n\nEpoch 00012: val_accuracy did not improve from 0.72171\nEpoch 13/50\n335/335 [==============================] - 151s 452ms/step - loss: 1.3355 - accuracy: 0.7591 - val_loss: 2.9525 - val_accuracy: 0.6745\n\nEpoch 00013: val_accuracy did not improve from 0.72171\nEpoch 14/50\n335/335 [==============================] - 154s 459ms/step - loss: 1.2751 - accuracy: 0.7611 - val_loss: 3.1017 - val_accuracy: 0.6769\n\nEpoch 00014: val_accuracy did not improve from 0.72171\nEpoch 15/50\n335/335 [==============================] - 150s 447ms/step - loss: 1.3399 - accuracy: 0.7695 - val_loss: 3.6761 - val_accuracy: 0.6553\n\nEpoch 00015: val_accuracy did not improve from 0.72171\nEpoch 16/50\n335/335 [==============================] - 152s 453ms/step - loss: 1.3490 - accuracy: 0.7739 - val_loss: 2.0660 - val_accuracy: 0.6869\n\nEpoch 00016: val_accuracy did not improve from 0.72171\nEpoch 17/50\n335/335 [==============================] - 143s 425ms/step - loss: 1.2736 - accuracy: 0.7719 - val_loss: 2.9342 - val_accuracy: 0.6593\n\nEpoch 00017: val_accuracy did not improve from 0.72171\nEpoch 18/50\n335/335 [==============================] - 139s 416ms/step - loss: 1.3426 - accuracy: 0.7631 - val_loss: 3.5292 - val_accuracy: 0.6805\n\nEpoch 00018: val_accuracy did not improve from 0.72171\nEpoch 19/50\n335/335 [==============================] - 143s 425ms/step - loss: 1.3391 - accuracy: 0.7722 - val_loss: 2.5838 - val_accuracy: 0.6917\n\nEpoch 00019: val_accuracy did not improve from 0.72171\nEpoch 20/50\n335/335 [==============================] - 162s 482ms/step - loss: 1.4206 - accuracy: 0.7695 - val_loss: 4.7071 - val_accuracy: 0.6142\n\nEpoch 00020: val_accuracy did not improve from 0.72171\nEpoch 21/50\n335/335 [==============================] - 152s 454ms/step - loss: 1.3702 - accuracy: 0.7739 - val_loss: 4.1329 - val_accuracy: 0.6373\n\nEpoch 00021: val_accuracy did not improve from 0.72171\nEpoch 22/50\n335/335 [==============================] - 159s 473ms/step - loss: 1.4842 - accuracy: 0.7546 - val_loss: 2.7895 - val_accuracy: 0.6513\n\nEpoch 00022: val_accuracy did not improve from 0.72171\nEpoch 23/50\n335/335 [==============================] - 151s 449ms/step - loss: 1.3671 - accuracy: 0.7715 - val_loss: 2.2519 - val_accuracy: 0.6753\n\nEpoch 00023: val_accuracy did not improve from 0.72171\nEpoch 24/50\n335/335 [==============================] - 151s 452ms/step - loss: 1.3698 - accuracy: 0.7673 - val_loss: 2.9007 - val_accuracy: 0.6389\n\nEpoch 00024: val_accuracy did not improve from 0.72171\nEpoch 25/50\n335/335 [==============================] - 154s 460ms/step - loss: 1.3932 - accuracy: 0.7624 - val_loss: 2.7224 - val_accuracy: 0.6421\n\nEpoch 00025: val_accuracy did not improve from 0.72171\nEpoch 26/50\n335/335 [==============================] - 155s 460ms/step - loss: 1.3032 - accuracy: 0.7673 - val_loss: 2.4505 - val_accuracy: 0.6825\n\nEpoch 00026: val_accuracy did not improve from 0.72171\nEpoch 27/50\n335/335 [==============================] - 149s 445ms/step - loss: 1.3319 - accuracy: 0.7618 - val_loss: 4.1784 - val_accuracy: 0.6621\n\nEpoch 00027: val_accuracy did not improve from 0.72171\nEpoch 28/50\n335/335 [==============================] - 153s 456ms/step - loss: 1.4657 - accuracy: 0.7747 - val_loss: 4.5933 - val_accuracy: 0.6389\n\nEpoch 00028: val_accuracy did not improve from 0.72171\nEpoch 29/50\n335/335 [==============================] - 145s 433ms/step - loss: 1.4491 - accuracy: 0.7758 - val_loss: 3.7241 - val_accuracy: 0.6385\n\nEpoch 00029: val_accuracy did not improve from 0.72171\nEpoch 30/50\n335/335 [==============================] - 146s 436ms/step - loss: 1.4175 - accuracy: 0.7593 - val_loss: 3.5248 - val_accuracy: 0.6501\n\nEpoch 00030: val_accuracy did not improve from 0.72171\nEpoch 31/50\n335/335 [==============================] - 152s 453ms/step - loss: 1.4186 - accuracy: 0.7627 - val_loss: 3.9398 - val_accuracy: 0.6102\n\nEpoch 00031: val_accuracy did not improve from 0.72171\nEpoch 32/50\n335/335 [==============================] - 153s 457ms/step - loss: 1.4926 - accuracy: 0.7678 - val_loss: 4.4802 - val_accuracy: 0.6505\n\nEpoch 00032: val_accuracy did not improve from 0.72171\nEpoch 33/50\n335/335 [==============================] - 152s 453ms/step - loss: 1.5623 - accuracy: 0.7716 - val_loss: 2.9357 - val_accuracy: 0.6617\n\nEpoch 00033: val_accuracy did not improve from 0.72171\nEpoch 34/50\n335/335 [==============================] - 150s 448ms/step - loss: 1.4359 - accuracy: 0.7678 - val_loss: 4.2771 - val_accuracy: 0.6409\n\nEpoch 00034: val_accuracy did not improve from 0.72171\nEpoch 35/50\n335/335 [==============================] - 152s 452ms/step - loss: 1.3430 - accuracy: 0.7617 - val_loss: 3.7129 - val_accuracy: 0.6545\n\nEpoch 00035: val_accuracy did not improve from 0.72171\nEpoch 36/50\n335/335 [==============================] - 151s 451ms/step - loss: 1.4803 - accuracy: 0.7705 - val_loss: 4.6087 - val_accuracy: 0.6309\n\nEpoch 00036: val_accuracy did not improve from 0.72171\nEpoch 37/50\n335/335 [==============================] - 149s 443ms/step - loss: 1.4164 - accuracy: 0.7662 - val_loss: 3.6378 - val_accuracy: 0.6849\n\nEpoch 00037: val_accuracy did not improve from 0.72171\nEpoch 38/50\n335/335 [==============================] - 149s 444ms/step - loss: 1.4969 - accuracy: 0.7709 - val_loss: 5.2159 - val_accuracy: 0.6329\n\nEpoch 00038: val_accuracy did not improve from 0.72171\nEpoch 39/50\n335/335 [==============================] - 152s 453ms/step - loss: 1.5137 - accuracy: 0.7638 - val_loss: 3.8660 - val_accuracy: 0.6421\n\nEpoch 00039: val_accuracy did not improve from 0.72171\nEpoch 40/50\n335/335 [==============================] - 152s 452ms/step - loss: 1.4915 - accuracy: 0.7724 - val_loss: 5.7044 - val_accuracy: 0.6146\n\nEpoch 00040: val_accuracy did not improve from 0.72171\nEpoch 41/50\n335/335 [==============================] - 142s 424ms/step - loss: 1.5807 - accuracy: 0.7651 - val_loss: 3.9052 - val_accuracy: 0.6621\n\nEpoch 00041: val_accuracy did not improve from 0.72171\nEpoch 00041: early stopping\n"
]
],
[
[
"### Plotting the accuracy and loss",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'r', label='Training accuracy')\nplt.plot(epochs, val_acc, 'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.legend(loc=0)\nplt.figure()\n\nplt.plot(epochs, loss, 'r', label='Training Loss')\nplt.plot(epochs, val_loss, 'b', label='Validation Loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Exporting to TFLite\nYou will now save the model to TFLite. We should note, that you will probably see some warning messages when running the code below. These warnings have to do with software updates and should not cause any errors or prevent your code from running. \n",
"_____no_output_____"
]
],
[
[
"# EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. \nexport_dir = 'saved_model/10_50Dropout0.5V2'\n\n# YOUR CODE HERE\ntf.saved_model.save(model, export_dir)",
"INFO:tensorflow:Assets written to: saved_model/10_50Dropout0.5V2\\assets\n"
],
[
"# # Select mode of optimization\n# mode = \"Speed\" \n\n# if mode == 'Storage':\n# optimization = tf.lite.Optimize.OPTIMIZE_FOR_SIZE\n# elif mode == 'Speed':\n# optimization = tf.lite.Optimize.OPTIMIZE_FOR_LATENCY\n# else:\n# optimization = tf.lite.Optimize.DEFAULT",
"_____no_output_____"
],
[
"# EXERCISE: Use the TFLiteConverter SavedModel API to initialize the converter\nimport tensorflow as tf\nconverter = tf.lite.TFLiteConverter.from_saved_model(export_dir) # YOUR CODE HERE\n\n# Set the optimzations\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]# YOUR CODE HERE\n\n# Invoke the converter to finally generate the TFLite model\ntflite_model = converter.convert()# YOUR CODE HERE",
"_____no_output_____"
],
[
"tflite_model_file = pathlib.Path('saved_model/10_50Dropout0.5V2/model.tflite')\ntflite_model_file.write_bytes(tflite_model)",
"_____no_output_____"
],
[
"# path_to_pb = \"C:/saved_model/saved_model.pb\"\n# def load_pb(path_to_pb):\n# with tf.gfile.GFile(path_to_pb, \"rb\") as f:\n# graph_def = tf.GraphDef()\n# graph_def.ParseFromString(f.read())\n# with tf.Graph().as_default() as graph:\n# tf.import_graph_def(graph_def, name='')\n# return graph\n# print(graph)",
"_____no_output_____"
]
],
[
[
"# Test the model with TFLite interpreter",
"_____no_output_____"
]
],
[
[
"# Load TFLite model and allocate tensors.\ninterpreter = tf.lite.Interpreter(model_content=tflite_model)\ninterpreter.allocate_tensors()\n\ninput_index = interpreter.get_input_details()[0][\"index\"]\noutput_index = interpreter.get_output_details()[0][\"index\"]",
"_____no_output_____"
],
[
"# Gather results for the randomly sampled test images\npredictions = []\ntest_labels = []\ntest_images = []\ntest_batches = data_dir.map(format_example).batch(1)\nfor img, label in test_batches.take(50):\n interpreter.set_tensor(input_index, img)\n interpreter.invoke()\n predictions.append(interpreter.get_tensor(output_index))\n test_labels.append(label[0])\n test_images.append(np.array(img))",
"_____no_output_____"
],
[
"# Utilities functions for plotting\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n \n img = np.squeeze(img)\n \n plt.imshow(img, cmap=plt.cm.binary)\n \n predicted_label = np.argmax(predictions_array)\n \n if predicted_label == true_label.numpy():\n color = 'green'\n else:\n color = 'red'\n \n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks(list(range(10)))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array[0], color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array[0])\n \n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')",
"_____no_output_____"
],
[
"# Visualize the outputs\n\n# Select index of image to display. Minimum index value is 1 and max index value is 50. \nindex = 5 \n\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(index, predictions, test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(index, predictions, test_labels)\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a1f2bc2d2cbd1f6481ea0f5b3d7003b62cb0058
| 543,797 |
ipynb
|
Jupyter Notebook
|
21 - Customer Analytics in Python/9_Modeling Brand Choice/3_Interpreting the Coefficients (3:15)/Purchase Analytics Predictive Analysis 10.3.ipynb
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null |
21 - Customer Analytics in Python/9_Modeling Brand Choice/3_Interpreting the Coefficients (3:15)/Purchase Analytics Predictive Analysis 10.3.ipynb
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null |
21 - Customer Analytics in Python/9_Modeling Brand Choice/3_Interpreting the Coefficients (3:15)/Purchase Analytics Predictive Analysis 10.3.ipynb
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null | 50.585767 | 46,464 | 0.518754 |
[
[
[
"## Libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\n\nimport pickle\n\nfrom sklearn.linear_model import LogisticRegression \n\nimport matplotlib.pyplot as plt\nimport matplotlib.axes as axs\nimport seaborn as sns\nsns.set()",
"_____no_output_____"
]
],
[
[
"## Data Preparation",
"_____no_output_____"
]
],
[
[
"df_purchase = pd.read_csv('purchase data.csv')\n\nscaler = pickle.load(open('scaler.pickle', 'rb'))\npca = pickle.load(open('pca.pickle', 'rb'))\nkmeans_pca = pickle.load(open('kmeans_pca.pickle', 'rb'))\n\nfeatures = df_purchase[['Sex', 'Marital status', 'Age', 'Education', 'Income', 'Occupation', 'Settlement size']]\ndf_purchase_segm_std = scaler.transform(features)\ndf_purchase_segm_pca = pca.transform(df_purchase_segm_std)\npurchase_segm_kmeans_pca = kmeans_pca.predict(df_purchase_segm_pca)\n\ndf_purchase_predictors = df_purchase.copy()\ndf_purchase_predictors['Segment'] = purchase_segm_kmeans_pca\nsegment_dummies = pd.get_dummies(purchase_segm_kmeans_pca, prefix = 'Segment', prefix_sep = '_')\ndf_purchase_predictors = pd.concat([df_purchase_predictors, segment_dummies], axis = 1)\n\ndf_pa = df_purchase_predictors",
"_____no_output_____"
]
],
[
[
"## Purchase Probability Model",
"_____no_output_____"
]
],
[
[
"Y = df_pa['Incidence']",
"_____no_output_____"
],
[
"X = pd.DataFrame()\nX['Mean_Price'] = (df_pa['Price_1'] +\n df_pa['Price_2'] +\n df_pa['Price_3'] +\n df_pa['Price_4'] +\n df_pa['Price_5'] ) / 5",
"_____no_output_____"
],
[
"model_purchase = LogisticRegression(solver = 'sag')\nmodel_purchase.fit(X, Y)",
"_____no_output_____"
],
[
"model_purchase.coef_",
"_____no_output_____"
]
],
[
[
"## Price Elasticity of Purchase Probability",
"_____no_output_____"
]
],
[
[
"df_pa[['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5']].describe()",
"_____no_output_____"
],
[
"price_range = np.arange(0.5, 3.5, 0.01)\nprice_range",
"_____no_output_____"
],
[
"df_price_range = pd.DataFrame(price_range)",
"_____no_output_____"
],
[
"Y_pr = model_purchase.predict_proba(df_price_range)\npurchase_pr = Y_pr[:][:, 1]\npe = model_purchase.coef_[:, 0] * price_range * (1 - purchase_pr)",
"_____no_output_____"
],
[
"df_price_elasticities = pd.DataFrame(price_range)",
"_____no_output_____"
],
[
"df_price_elasticities = df_price_elasticities.rename(columns = {0: \"Price_Point\"})\ndf_price_elasticities['Mean_PE'] = pe\ndf_price_elasticities",
"_____no_output_____"
],
[
"pd.options.display.max_rows = None\ndf_price_elasticities",
"_____no_output_____"
],
[
"plt.figure(figsize = (9, 6))\nplt.plot(price_range, pe, color = 'grey')\nplt.xlabel('Price')\nplt.ylabel('Elasticity')\nplt.title('Price Elasticity of Purchase Probability')",
"_____no_output_____"
]
],
[
[
"## Purchase Probability by Segments",
"_____no_output_____"
],
[
"### $\\color{green}{\\text{Segment 1 - Career-Focused}}$",
"_____no_output_____"
]
],
[
[
"df_pa_segment_1 = df_pa[df_pa['Segment'] == 1]",
"_____no_output_____"
],
[
"Y = df_pa_segment_1['Incidence']",
"_____no_output_____"
],
[
"X = pd.DataFrame()",
"_____no_output_____"
],
[
"X['Mean_Price'] = (df_pa_segment_1['Price_1'] + \n df_pa_segment_1['Price_2'] + \n df_pa_segment_1['Price_3'] + \n df_pa_segment_1['Price_4'] + \n df_pa_segment_1['Price_5']) / 5",
"_____no_output_____"
],
[
"model_incidence_segment_1 = LogisticRegression(solver = 'sag')\nmodel_incidence_segment_1.fit(X, Y)",
"_____no_output_____"
],
[
"model_incidence_segment_1.coef_",
"_____no_output_____"
],
[
"Y_segment_1 = model_incidence_segment_1.predict_proba(df_price_range)\npurchase_pr_segment_1 = Y_segment_1[:][:, 1]\npe_segment_1 = model_incidence_segment_1.coef_[:, 0] * price_range * (1 - purchase_pr_segment_1)",
"_____no_output_____"
]
],
[
[
"### Results",
"_____no_output_____"
]
],
[
[
"df_price_elasticities['PE_Segment_1'] = pe_segment_1",
"_____no_output_____"
],
[
"plt.figure(figsize = (9, 6))\nplt.plot(price_range, pe, color = 'grey')\nplt.plot(price_range, pe_segment_1, color = 'green')\nplt.xlabel('Price')\nplt.ylabel('Elasticity')\nplt.title('Price Elasticity of Purchase Probability')",
"_____no_output_____"
]
],
[
[
"### $\\color{red}{\\text{Segment 2 - Fewer-Opportunities}}$",
"_____no_output_____"
]
],
[
[
"df_pa_segment_2 = df_pa[df_pa['Segment'] == 2]",
"_____no_output_____"
],
[
"Y = df_pa_segment_2['Incidence']\n\nX = pd.DataFrame()\nX['Mean_Price'] = (df_pa_segment_2['Price_1'] + \n df_pa_segment_2['Price_2'] + \n df_pa_segment_2['Price_3'] + \n df_pa_segment_2['Price_4'] + \n df_pa_segment_2['Price_5']) / 5\n\nmodel_incidence_segment2 = LogisticRegression(solver = 'sag')\nmodel_incidence_segment2.fit(X, Y)\n\nmodel_incidence_segment2.coef_\nY_segment_2 = model_incidence_segment2.predict_proba(df_price_range)\npurchase_pr_segment2 = Y_segment_2[:][: , 1]\npe_segment2 = model_incidence_segment2.coef_[:,0] * price_range * ( 1- purchase_pr_segment2)",
"_____no_output_____"
]
],
[
[
"### Results",
"_____no_output_____"
]
],
[
[
"df_price_elasticities['PE_Segment_2'] = pe_segment2",
"_____no_output_____"
],
[
"plt.figure(figsize = (9, 6))\nplt.plot(price_range, pe, color = 'grey')\nplt.plot(price_range, pe_segment_1, color = 'green')\nplt.plot(price_range, pe_segment2, color = 'r')\nplt.xlabel('Price')\nplt.ylabel('Elasticity')\nplt.title('Price Elasticity of Purchase Probability')",
"_____no_output_____"
]
],
[
[
"## ${\\textbf{Homework}}$",
"_____no_output_____"
],
[
"### $\\color{blue}{\\text{Segment 0 - Standard}}$",
"_____no_output_____"
]
],
[
[
"df_pa_segment_0 = df_pa[df_pa['Segment'] == 0]\n\nY = df_pa_segment_0['Incidence']\n\nX = pd.DataFrame()\nX['Mean_Price'] = (df_pa_segment_0['Price_1'] + \n df_pa_segment_0['Price_2'] + \n df_pa_segment_0['Price_3'] + \n df_pa_segment_0['Price_4'] + \n df_pa_segment_0['Price_5']) / 5\n\nmodel_incidence_segment0 = LogisticRegression(solver = 'sag')\nmodel_incidence_segment0.fit(X, Y)\n\nmodel_incidence_segment0.coef_\nY_segment_0 = model_incidence_segment0.predict_proba(df_price_range)\npurchase_pr_segment0 = Y_segment_0[:][: , 1]\npe_segment0 = model_incidence_segment0.coef_[:,0] * price_range *( 1- purchase_pr_segment0)\ndf_price_elasticities.insert(2, column = 'PE_Segment_0', value = pe_segment0)",
"_____no_output_____"
]
],
[
[
"### $\\color{orange}{\\text{Segment 3 - Well-Off}}$",
"_____no_output_____"
]
],
[
[
"df_pa_segment_3 = df_pa[df_pa['Segment'] == 3]\n\nY = df_pa_segment_3['Incidence']\n\nX = pd.DataFrame()\nX['Mean_Price'] = (df_pa_segment_3['Price_1'] + \n df_pa_segment_3['Price_2'] + \n df_pa_segment_3['Price_3'] + \n df_pa_segment_3['Price_4'] + \n df_pa_segment_3['Price_5']) / 5\n\nmodel_incidence_segment3 = LogisticRegression(solver = 'sag')\nmodel_incidence_segment3.fit(X, Y)\n\nmodel_incidence_segment3.coef_\nY_segment_3 = model_incidence_segment2.predict_proba(df_price_range)\npurchase_pr_segment3 = Y_segment_3[:][: , 1]\npe_segment3 = model_incidence_segment3.coef_[:,0] * price_range *( 1- purchase_pr_segment3)\ndf_price_elasticities['PE_Segment_3'] = pe_segment3\ndf_price_elasticities",
"_____no_output_____"
]
],
[
[
"### ${\\textbf{Results}}$",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (9, 6))\nplt.plot(price_range, pe, color = 'grey')\nplt.plot(price_range, pe_segment0, color = 'b')\nplt.plot(price_range, pe_segment_1, color = 'green')\nplt.plot(price_range, pe_segment2, color = 'r')\nplt.plot(price_range, pe_segment3, color = 'orange')\nplt.xlabel('Price')\nplt.ylabel('Elasticity')\nplt.title('Price Elasticity of Purchase Probability')",
"_____no_output_____"
]
],
[
[
"## Purchase Probability with Promotion Feature",
"_____no_output_____"
],
[
"### Data Preparation",
"_____no_output_____"
]
],
[
[
"Y = df_pa['Incidence']",
"_____no_output_____"
],
[
"X = pd.DataFrame()\nX['Mean_Price'] = (df_pa['Price_1'] + \n df_pa['Price_2'] + \n df_pa['Price_3'] + \n df_pa['Price_4'] + \n df_pa['Price_5']) / 5",
"_____no_output_____"
],
[
"X['Mean_Promotion'] = (df_pa['Promotion_1'] +\n df_pa['Promotion_2'] +\n df_pa['Promotion_3'] +\n df_pa['Promotion_4'] +\n df_pa['Promotion_5'] ) / 5\nX.head()",
"_____no_output_____"
]
],
[
[
"## Model Estimation",
"_____no_output_____"
]
],
[
[
"model_incidence_promotion = LogisticRegression(solver = 'sag')\nmodel_incidence_promotion.fit(X, Y)\nmodel_incidence_promotion.coef_",
"_____no_output_____"
]
],
[
[
"## Price Elasticity with Promotion",
"_____no_output_____"
]
],
[
[
"df_price_elasticity_promotion = pd.DataFrame(price_range)\ndf_price_elasticity_promotion = df_price_elasticity_promotion.rename(columns = {0: \"Price_Range\"})",
"_____no_output_____"
],
[
"df_price_elasticity_promotion['Promotion'] = 1",
"_____no_output_____"
],
[
"Y_promotion = model_incidence_promotion.predict_proba(df_price_elasticity_promotion)",
"_____no_output_____"
],
[
"promo = Y_promotion[:, 1]\nprice_elasticity_promo = (model_incidence_promotion.coef_[:, 0] * price_range) * (1 - promo)",
"_____no_output_____"
],
[
"df_price_elasticities['Elasticity_Promotion_1'] = price_elasticity_promo\ndf_price_elasticities",
"_____no_output_____"
]
],
[
[
"## Price Elasticity without Promotion",
"_____no_output_____"
]
],
[
[
"df_price_elasticity_promotion_no = pd.DataFrame(price_range)\ndf_price_elasticity_promotion_no = df_price_elasticity_promotion_no.rename(columns = {0: \"Price_Range\"})",
"_____no_output_____"
],
[
"df_price_elasticity_promotion_no['Promotion'] = 0",
"_____no_output_____"
],
[
"Y_no_promo = model_incidence_promotion.predict_proba(df_price_elasticity_promotion_no)",
"_____no_output_____"
],
[
"no_promo = Y_no_promo[: , 1]",
"_____no_output_____"
],
[
"price_elasticity_no_promo = model_incidence_promotion.coef_[:, 0] * price_range *(1- no_promo)",
"_____no_output_____"
],
[
"df_price_elasticities['Elasticity_Promotion_0'] = price_elasticity_no_promo",
"_____no_output_____"
],
[
"plt.figure(figsize = (9, 6))\nplt.plot(price_range, price_elasticity_no_promo)\nplt.plot(price_range, price_elasticity_promo)\nplt.xlabel('Price')\nplt.ylabel('Elasticity')\nplt.title('Price Elasticity of Purchase Probability with and without Promotion')",
"_____no_output_____"
]
],
[
[
"## ${\\textbf{Brand Choice}}$",
"_____no_output_____"
],
[
"### Data Preparation",
"_____no_output_____"
]
],
[
[
"brand_choice = df_pa[df_pa['Incidence'] == 1]",
"_____no_output_____"
],
[
"pd.options.display.max_rows = 100\nbrand_choice",
"_____no_output_____"
],
[
"Y = brand_choice['Brand']",
"_____no_output_____"
],
[
"brand_choice.columns.values",
"_____no_output_____"
],
[
"features = ['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5']\nX = brand_choice[features]",
"_____no_output_____"
],
[
"model_brand_choice = LogisticRegression(solver = 'sag', multi_class = 'multinomial')\nmodel_brand_choice.fit(X, Y)",
"_____no_output_____"
],
[
"model_brand_choice.coef_",
"_____no_output_____"
],
[
"bc_coef = pd.DataFrame(model_brand_choice.coef_)\nbc_coef",
"_____no_output_____"
],
[
"bc_coef = pd.DataFrame(np.transpose(model_brand_choice.coef_))\ncoefficients = ['Coef_Brand_1', 'Coef_Brand_2', 'Coef_Brand_3', 'Coef_Brand_4', 'Coef_Brand_5']\nbc_coef.columns = [coefficients]\nprices = ['Price_1', 'Price_2', 'Price_3', 'Price_4', 'Price_5']\nbc_coef.index = [prices]\nbc_coef = bc_coef.round(2)\nbc_coef",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f31997be33dff6d9e0c66fd145e233c3ae198
| 19,384 |
ipynb
|
Jupyter Notebook
|
03-General Pandas/Pandas-Exercises/Pandas Exercise SOLUTIONS.ipynb
|
marcocaldera/python-for-finance
|
4011baeee8803ce1def8774d017afa3fd8f90ca2
|
[
"MIT"
] | null | null | null |
03-General Pandas/Pandas-Exercises/Pandas Exercise SOLUTIONS.ipynb
|
marcocaldera/python-for-finance
|
4011baeee8803ce1def8774d017afa3fd8f90ca2
|
[
"MIT"
] | null | null | null |
03-General Pandas/Pandas-Exercises/Pandas Exercise SOLUTIONS.ipynb
|
marcocaldera/python-for-finance
|
4011baeee8803ce1def8774d017afa3fd8f90ca2
|
[
"MIT"
] | null | null | null | 23.754902 | 172 | 0.417922 |
[
[
[
"# Pandas Exercise - Solutions",
"_____no_output_____"
],
[
"Time to test your new pandas skills! Use the two csv files in this folder to complete the tasks in bold below!\n\n** NOTE: ALL TASKS MUST BE DONE IN ONE LINE OF PANDAS CODE. GOT STUCK? NO PROBLEM! CHECK OUT THE SOLUTIONS LECTURE! **",
"_____no_output_____"
],
[
"** Import pandas and read in the banklist.csv file into a dataframe called banks. **",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"banks = pd.read_csv('banklist.csv')",
"_____no_output_____"
]
],
[
[
"** Show the head of the dataframe **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks.head()",
"_____no_output_____"
]
],
[
[
"** What are the column names? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks.columns",
"_____no_output_____"
]
],
[
[
"** How many States (ST) are represented in this data set? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks['ST'].nunique()",
"_____no_output_____"
]
],
[
[
"** Get a list or array of all the states in the data set. **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks['ST'].unique()",
"_____no_output_____"
]
],
[
[
"** What are the top 5 states with the most failed banks? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks.groupby(\"ST\").count().sort_values('Bank Name',ascending=False).iloc[:5]['Bank Name']",
"_____no_output_____"
]
],
[
[
"** What are the top 5 acquiring institutions? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks['Acquiring Institution'].value_counts().iloc[:5]",
"_____no_output_____"
]
],
[
[
"** How many banks has the State Bank of Texas acquired? How many of them were actually in Texas?**",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks[banks['Acquiring Institution']=='State Bank of Texas']",
"_____no_output_____"
]
],
[
[
"** What is the most common city in California for a bank to fail in?**",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"banks[banks['ST']=='CA'].groupby('City').count().sort_values('Bank Name',ascending=False).head(1)",
"_____no_output_____"
]
],
[
[
"** How many failed banks don't have the word \"Bank\" in their name? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"# banks['Bank Name'].apply(lambda name: 'Bank' not in name).value_counts()\nsum(banks['Bank Name'].apply(lambda name: 'Bank' not in name))",
"_____no_output_____"
]
],
[
[
"** How many bank names start with the letter 's' ? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"sum(banks['Bank Name'].apply(lambda name:name[0].upper() =='S'))",
"_____no_output_____"
]
],
[
[
"** How many CERT values are above 20000 ? **",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"sum(banks['CERT']>20000)",
"_____no_output_____"
]
],
[
[
"** How many bank names consist of just two words? (e.g. \"First Bank\" , \"Bank Georgia\" )**",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"sum(banks['Bank Name'].apply(lambda name: len(name.split())==2))",
"_____no_output_____"
]
],
[
[
"**Bonus: How many banks closed in the year 2008? (this is hard because we technically haven't learned about time series with pandas yet! Feel free to skip this one!**",
"_____no_output_____"
]
],
[
[
"# CODE HERE",
"_____no_output_____"
],
[
"# WE WILL LEARN A MUCH BETTER WAY TO DO THIS SOON!\nsum(banks['Closing Date'].apply(lambda date: date[-2:]) == '08')\n\n# Better way\n# sum(pd.to_datetime(banks['Closing Date']).apply(lambda date: date.year) == 2008)",
"_____no_output_____"
]
],
[
[
"# GREAT JOB!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a1f34ab88beafa7a2278d7b978720e0b2ee5618
| 86,974 |
ipynb
|
Jupyter Notebook
|
module1-statistics-probability-and-inference/LS_DS_141_Statistics_Probability_and_Inference.ipynb
|
cocoisland/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
|
a15fb98ad38b66698752bfccf8158fe5601cc1b0
|
[
"MIT"
] | null | null | null |
module1-statistics-probability-and-inference/LS_DS_141_Statistics_Probability_and_Inference.ipynb
|
cocoisland/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
|
a15fb98ad38b66698752bfccf8158fe5601cc1b0
|
[
"MIT"
] | null | null | null |
module1-statistics-probability-and-inference/LS_DS_141_Statistics_Probability_and_Inference.ipynb
|
cocoisland/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
|
a15fb98ad38b66698752bfccf8158fe5601cc1b0
|
[
"MIT"
] | null | null | null | 37.424269 | 8,958 | 0.393106 |
[
[
[
"# Lambda School Data Science Module 141\n## Statistics, Probability, and Inference",
"_____no_output_____"
],
[
"## Prepare - examine what's available in SciPy\n\nAs we delve into statistics, we'll be using more libraries - in particular the [stats package from SciPy](https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html).",
"_____no_output_____"
]
],
[
[
"from scipy import stats\ndir(stats)",
"_____no_output_____"
],
[
"# As usual, lots of stuff here! There's our friend, the normal distribution\nnorm = stats.norm()\nprint(norm.mean())\nprint(norm.std())\nprint(norm.var())",
"0.0\n1.0\n1.0\n"
],
[
"# And a new friend - t\nt1 = stats.t(5) # 5 is df \"shape\" parameter\nprint(t1.mean())\nprint(t1.std())\nprint(t1.var())",
"0.0\n1.2909944487358056\n1.6666666666666667\n"
]
],
[
[
"\n\n*(Picture from [Wikipedia](https://en.wikipedia.org/wiki/Student's_t-distribution#/media/File:Student_t_pdf.svg))*\n\nThe t-distribution is \"normal-ish\" - the larger the parameter (which reflects its degrees of freedom - more input data/features will increase it), the closer to true normal.",
"_____no_output_____"
]
],
[
[
"t2 = stats.t(30) # Will be closer to normal\nprint(t2.mean())\nprint(t2.std())\nprint(t2.var())",
"0.0\n1.0350983390135313\n1.0714285714285714\n"
]
],
[
[
"Why is it different from normal? To better reflect the tendencies of small data and situations with unknown population standard deviation. In other words, the normal distribution is still the nice pure ideal in the limit (thanks to the central limit theorem), but the t-distribution is much more useful in many real-world situations.\n\nHistory sidenote - this is \"Student\":\n\n\n\n*(Picture from [Wikipedia](https://en.wikipedia.org/wiki/File:William_Sealy_Gosset.jpg))*\n\nHis real name is William Sealy Gosset, and he published under the pen name \"Student\" because he was not an academic. He was a brewer, working at Guinness and using trial and error to determine the best ways to yield barley. He's also proof that, even 100 years ago, you don't need official credentials to do real data science!",
"_____no_output_____"
],
[
"## Live Lecture - let's perform and interpret a t-test\n\nWe'll generate our own data, so we can know and alter the \"ground truth\" that the t-test should find. We will learn about p-values and how to interpret \"statistical significance\" based on the output of a hypothesis test.",
"_____no_output_____"
]
],
[
[
"# TODO - during class, but please help!\nsurvey_data = [0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0]\n\nimport numpy as np\nimport pandas as pd\n\ndf = pd.DataFrame(survey_data)\ndf.describe()",
"_____no_output_____"
],
[
"df.plot.hist()",
"_____no_output_____"
]
],
[
[
"### Student t-distribution applet \n\nShowing t-statistic data point outputing probability mass under curve, assuming unbiased (mean=0) normal distribution. When probability mass is less than pvalue=0.05%, then null hypothesis can be rejected as random outlier.\n\nhttps://homepage.stat.uiowa.edu/~mbognar/applets/t.html",
"_____no_output_____"
]
],
[
[
"# Now with confidence!\n\nimport scipy\n\n# 0.5 indicate unbiased hypothesis probability of\n# equal 50% chance of liking coke or pepsi\n# However df.mean()=0.66, indicate under no biased influence, \n# 0.66 people like pepsi over coke.\nscipy.stats.ttest_1samp(survey_data, 0.5) \n\n# statistic value (2.364) at point value under fair (0.5) normal distribution,\n# the probability mass is 2.2%, which is less than pvalue of 5%\n# Therefore null hypothesis of no biased random chance of 2.2%, can be rejected.\n# In other word, the survey_data can be accepted with more than 95% confidence.",
"_____no_output_____"
],
[
"# the t-statistic is the ratio of the departure of the estimated value of a\n# parameter from its hypothesized value to its standard error\n\n# We want to calculate: tstat = 2.364321853156195\n# df.std() = 0.478518\n# standard deviation is not adjusted to standard error, meaning the big sample\n# size, the bigger the stardard deviation.\n# stderr - adjust for error. Sample size variation does not affect its value.\n\nsample_stderr = 0.478518 / np.sqrt(len(survey_data))\nsample_mean = 0.660000\nnull_hypothesis_mean = 0.5\n\nt_stat = (sample_mean - null_hypothesis_mean) / sample_stderr\nprint(t_stat)",
"2.364322449518046\n"
],
[
"len(survey_data)",
"_____no_output_____"
],
[
"# Science! Reproducibility...\nimport random\n\ndef make_soda_data(n=50):\n # Fair version\n # return pd.DataFrame([random.randint(0, 1) for _ in range(n)])\n # Unfair version!\n return pd.DataFrame(np.random.binomial(n=1, p=0.5, size=n))",
"_____no_output_____"
],
[
"make_soda_data(n=500).describe()",
"_____no_output_____"
],
[
"t_statistics = []\np_values = []\nn_experiments = 10 # Number of visitors\n\nfor _ in range(n_experiments):\n df = make_soda_data(n=500000)\n ttest = scipy.stats.ttest_1samp(df, 0.5)\n t_statistics.append(ttest.statistic)\n p_values.append(ttest.pvalue)\n\npd.DataFrame(t_statistics).describe()",
"_____no_output_____"
],
[
"pd.DataFrame(p_values).describe()\n",
"_____no_output_____"
],
[
"random.choice([0,1,1]) # 0.666 unfairness favouring 1",
"_____no_output_____"
],
[
"np.random.binomial(100,0.7) # 70% biased favouring 1 out of 100 trials",
"_____no_output_____"
],
[
"np.random.binomial(1,0.7) # 70% biased out 1 trial",
"_____no_output_____"
]
],
[
[
"## Assignment - apply the t-test to real data\n\nYour assignment is to determine which issues have \"statistically significant\" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!\n\nYour goals:\n\n1. Load and clean the data (or determine the best method to drop observations when running tests)\n2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01\n3. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01\n4. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)\n\nNote that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.\n\nStretch goals:\n\n1. Refactor your code into functions so it's easy to rerun with arbitrary variables\n2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)",
"_____no_output_____"
]
],
[
[
"# TODO - your code here!\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data', \n na_values=['?'],\n header=None)\n\ncolumn_names = [\n 'party',\n 'handicapped_infants',\n 'water_proj',\n 'budget_resolution',\n 'physician_fee',\n 'elsalvador_aid',\n 'religious_schools',\n 'antiban_satellite',\n 'nicaraguan_aid',\n 'mx_missile',\n 'immigration',\n 'synfuels-cutback'\n 'education_spending',\n 'superfund_sue',\n 'crime',\n 'duty_free',\n 'safrica_export'\n ]\n \ndf.head()",
"_____no_output_____"
],
[
"df.fillna(0, inplace=True)\ndf.replace('y',1, inplace=True)\ndf.replace('n',-1, inplace=True)\ndf.head()",
"_____no_output_____"
],
[
"rvotes = df[df[0] == 'republican']\nrvotes.shape",
"_____no_output_____"
],
[
"dvotes = df[ df[0] == 'democrat']\ndvotes.shape",
"_____no_output_____"
],
[
"votes = df.groupby(df[0]).sum()\nvotes.head()",
"_____no_output_____"
],
[
"d_issues=votes.loc['democrat'].abs() > votes.loc['republican'].abs()\nr_issues=votes.loc['democrat'].abs() < votes.loc['republican'].abs()\nd_issues",
"_____no_output_____"
],
[
"vote.columns = colname",
"_____no_output_____"
],
[
"vote.iat[0,1]",
"_____no_output_____"
]
],
[
[
"### T Test (Student Test)\nNull hypothesis of T Test is based on normal distribution centered at 0.5 (no biased).\n* mean = 0.5\n* std = 1.0\n* var = 1.0\n* statistic = 1.96 , pvalue = 0.05\n\nSample T test return (statistics, pvalue)\n* statistic = data point value under normal distribution where pvalue is currently showing.\n* pvalue = tail area under normal distribution where statistic data point value is pointing.\n\n### One sample t test\n* The observed mean (from a single sample) is compared to an expected mean of \npopulation.\n\n## Two sample test\n* One sample compares to another sample.\n\n### Dependent sample t test\n* Sample related to members of other samples.\n* Within group variation.\n* Two groups of measurements are based on the same sample observation. \n* Eg before and after treatment of a drug on a patient.\n\n\n\n### Independent t test\n* Sample unrelated to members of other samples.\n* Differences in means between two groups.\n* Eg blood pressure treatment of patients vs control group who receives placebo.",
"_____no_output_____"
],
[
"### Independent voting\n\n\n* Democrate voting is independent of Republican voting on any particular issues.\n* Independent t test : ttest_ind\n\n",
"_____no_output_____"
]
],
[
[
"from scipy.stats import ttest_ind\n\n",
"_____no_output_____"
],
[
"result = pd.DataFrame(columns=['issues','statistic','pvalue'])\n\nfor i in range(1,16):\n t,p = ttest_ind(rvotes[i], dvotes[i])\n result.loc[i] = [column_names[i],t,p]\n \nresult.head()",
"_____no_output_____"
],
[
"'''\nAdding democrat & republic issues dataframe column to existing result dataframe\n'''\nresult['d_issues'] = d_issues\nresult['r_issues'] = r_issues\nresult\n",
"_____no_output_____"
],
[
"'''\npvalue > 0.01\nNo much different from democrat nor republican support, liken coin flip.\nNull hypothesis of random chance accepted.\nTherefore party support for this issue can not be trusted.\n'''\nresult[result['pvalue'] > 0.01]",
"_____no_output_____"
],
[
"'''\nDemocrat supported issues\n'''\nresult[ result['d_issues'] & result['pvalue'] < 0.01 ] ",
"_____no_output_____"
],
[
"'''\nRepublic Supported issues\n'''\nresult[ result['r_issues'] & result['pvalue'] < 0.01 ]\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f3b84e43a37257e4788c79ab009535ebab09a
| 10,596 |
ipynb
|
Jupyter Notebook
|
Hive/16-2 Musicians - Medium.ipynb
|
madlogos/sqlzoo
|
6f4a4c7905e245fda40f0d31e0e756805b5ae063
|
[
"MIT"
] | null | null | null |
Hive/16-2 Musicians - Medium.ipynb
|
madlogos/sqlzoo
|
6f4a4c7905e245fda40f0d31e0e756805b5ae063
|
[
"MIT"
] | null | null | null |
Hive/16-2 Musicians - Medium.ipynb
|
madlogos/sqlzoo
|
6f4a4c7905e245fda40f0d31e0e756805b5ae063
|
[
"MIT"
] | null | null | null | 25.410072 | 146 | 0.438184 |
[
[
[
"# Musicians- Medium",
"_____no_output_____"
]
],
[
[
"# Prerequesites\nfrom pyhive import hive\n%load_ext sql\n%sql hive://[email protected]:10000/sqlzoo\n%config SqlMagic.displaylimit = 20",
"_____no_output_____"
]
],
[
[
"## 6.\n**List the names, dates of birth and the instrument played of living musicians who play a instrument which Theo also plays.**",
"_____no_output_____"
]
],
[
[
"%%sql\nWITH ins AS (\n SELECT instrument FROM \n musician JOIN performer ON (\n musician.m_no=performer.perf_is) \n WHERE m_name LIKE 'Theo%'\n)\nSELECT m_name, born, performer.instrument\n FROM musician JOIN performer ON (\n musician.m_no=performer.perf_is) JOIN ins ON\n (ins.instrument=performer.instrument)\n WHERE died IS NULL AND m_name NOT LIKE 'Theo%'\n ORDER BY m_name",
" * hive://[email protected]:10000/sqlzoo\nDone.\n"
]
],
[
[
"## 7.\n**List the name and the number of players for the band whose number of players is greater than the average number of players in each band.**",
"_____no_output_____"
]
],
[
[
"%%sql\nWITH t AS (\n SELECT DISTINCT band_name, perf_is\n FROM band JOIN plays_in ON (\n band.band_no=plays_in.band_id) JOIN performer ON (\n performer.perf_no=plays_in.player)\n), summ AS (\n SELECT band_name, COUNT(*) nmbr \n FROM t GROUP BY band_name\n)\nSELECT summ.band_name, summ.nmbr\n FROM summ JOIN (SELECT AVG(nmbr) mean FROM summ) a\n WHERE summ.nmbr>a.mean",
" * hive://[email protected]:10000/sqlzoo\nDone.\n"
]
],
[
[
"## 8.\n**List the names of musicians who both conduct and compose and live in Britain.**",
"_____no_output_____"
]
],
[
[
"%%sql\nSELECT DISTINCT m_name FROM\n musician JOIN composer ON (\n musician.m_no=composer.comp_is) JOIN\n place ON (musician.living_in=place.place_no) JOIN\n performance ON (\n performance.conducted_by=musician.m_no) \n WHERE place_country IN ('England', 'Scotland')\n ORDER BY m_name",
" * hive://[email protected]:10000/sqlzoo\nDone.\n"
]
],
[
[
"## 9.\n**Show the least commonly played instrument and the number of musicians who play it.**",
"_____no_output_____"
]
],
[
[
"%%sql\nWITH t AS (\n SELECT instrument, COUNT(*) n\n FROM performer JOIN plays_in ON (\n performer.perf_no=plays_in.player) JOIN\n performance ON (performance.gave=plays_in.band_id)\n GROUP By instrument\n ORDER BY n\n LIMIT 1\n)\nSELECT performer.instrument, COUNT(*) n_player\n FROM performer JOIN t ON (performer.instrument=t.instrument)\n GROUP BY performer.instrument",
" * hive://[email protected]:10000/sqlzoo\nDone.\n"
]
],
[
[
"## 10.\n**List the bands that have played music composed by Sue Little; Give the titles of the composition in each case.**",
"_____no_output_____"
]
],
[
[
"%%sql\nWITH t AS (\n SELECT c_no, c_title\n FROM composition JOIN has_composed ON (\n composition.c_no=has_composed.cmpn_no) JOIN\n composer ON (composer.comp_no=has_composed.cmpr_no) JOIN\n musician ON (musician.m_no=composer.comp_is)\n WHERE m_name='Sue Little'\n)\nSELECT band_name, c_title\n FROM t JOIN performance ON (t.c_no=performance.performed) JOIN\n band ON (performance.gave=band.band_no)\n ORDER BY band_name",
" * hive://[email protected]:10000/sqlzoo\nDone.\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a1f42d51bb1a443ac0624222a055a8400782083
| 97,971 |
ipynb
|
Jupyter Notebook
|
Python/Cotidal lines/Untitled.ipynb
|
Pietervanhalem/Pieters-Personal-Repository
|
c31e3c86b1d42f29876455e8553f350d4d527ee5
|
[
"MIT"
] | 2 |
2020-02-26T13:02:44.000Z
|
2020-03-06T07:09:10.000Z
|
Python/Cotidal lines/Untitled.ipynb
|
Pietervanhalem/Pieters-Personal-Repository
|
c31e3c86b1d42f29876455e8553f350d4d527ee5
|
[
"MIT"
] | 11 |
2020-03-06T07:17:10.000Z
|
2022-02-26T22:32:59.000Z
|
Python/Cotidal lines/Untitled.ipynb
|
Pietervanhalem/Personal-Code-Examples
|
c31e3c86b1d42f29876455e8553f350d4d527ee5
|
[
"MIT"
] | null | null | null | 54.701843 | 9,268 | 0.586908 |
[
[
[
"import datetime, time\nimport netCDF4\nimport numpy as np\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n% matplotlib inline",
"_____no_output_____"
],
[
"def func(name):\n nc = netCDF4.Dataset('E:/Validatiecase TEXEL data/{}.nc'.format(name))\n\n x_domain = (0, -1) # general-waddden sea (250, 380)\n y_domain = (0, -1) # (530,760)\n\n h = nc.variables[\"SEP\"][:, :, :]\n x = nc.variables[\"x\"][:, :]\n y = nc.variables[\"y\"][:, :]\n t = nc.variables[\"time\"][:]\n t = t * 60\n x = x[x_domain[0] : x_domain[1], y_domain[0] : y_domain[1]]\n y = y[x_domain[0] : x_domain[1], y_domain[0] : y_domain[1]]\n h = h[:, x_domain[0] : x_domain[1], y_domain[0] : y_domain[1]]\n \n return x,y,t,h\n\nclass flow_zuno(): # testcasae with rotating flow with tidal component\n def __init__(self): \n number_downloads = 1\n t0 = '08/05/2018 06:00:00'\n t0 = datetime.datetime.strptime(t0, \"%d/%m/%Y %H:%M:%S\").timestamp()\n names = []\n for i in range(number_downloads):\n ana = datetime.datetime.fromtimestamp(t0+i*60*60*6).strftime('%Y%m%d%H%M')\n names.append(ana)\n \n x,y,t,h = func(names[0])\n for i in range(1, len(names)):\n _,_,t1,h1 = func(names[i]) \n t = np.concatenate((t,t1), axis = 0)\n h = np.concatenate((h,h1), axis = 0)\n \n self.h = h\n self.x = x\n self.y = y\n self.t = t\n\nf = flow_zuno()",
"_____no_output_____"
],
[
"plt.contourf(f.x,f.y,f.h[0,:,:])\nplt.xlim(-5,10)\nplt.ylim(48,58)",
"_____no_output_____"
],
[
"import matplotlib.animation as animation\n% matplotlib notebook\n\nfig = plt.figure(figsize=(7.5,5))\nax = plt.subplot()\nax.axis('equal')\n\nplt.xlim(-5,10)\nplt.ylim(48,58)\n\na = 2\n\nq = plt.contourf(f.x,\n f.y,\n f.h[0]\n )\n\n\ndef update_line(i, q):\n QQ = 10\n i = i*1\n Q = int((i)/(len(f.t[:])))\n i = i - Q * (len(f.t[:]))\n plt.title(np.round((f.t[i] - f.t[0])/ 3600 ,1) )\n \n q = plt.contourf(f.x,\n f.y,\n f.h[i]\n )\n\n \nani = animation.FuncAnimation(fig, \n update_line,\n len(f.t) - 11,\n fargs=[q], \n interval=20,\n repeat = True\n )\n\n# ani.save('gif{}.gif'.format(int(time.time())))",
"_____no_output_____"
],
[
"f.h.shape",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f460d56b8f8eea2c824e9b6978ca3955ff67f
| 2,633 |
ipynb
|
Jupyter Notebook
|
machine_learning/lecture/week_8/xiv_dimensionality_reduction/quiz-Principal Component Analysis.ipynb
|
Rabeeah/coursera-stanford
|
f6f8087e3db936e1125ae590edcb6d3f189c1d8b
|
[
"MIT"
] | 4 |
2022-01-19T14:46:36.000Z
|
2022-01-20T08:32:11.000Z
|
machine_learning/lecture/week_8/xiv_dimensionality_reduction/quiz-Principal Component Analysis.ipynb
|
DineshBafila-DS/coursera-stanford
|
f6f8087e3db936e1125ae590edcb6d3f189c1d8b
|
[
"MIT"
] | null | null | null |
machine_learning/lecture/week_8/xiv_dimensionality_reduction/quiz-Principal Component Analysis.ipynb
|
DineshBafila-DS/coursera-stanford
|
f6f8087e3db936e1125ae590edcb6d3f189c1d8b
|
[
"MIT"
] | 3 |
2016-01-25T02:58:20.000Z
|
2016-04-10T18:35:26.000Z
| 21.064 | 132 | 0.540828 |
[
[
[
"# Principal Component Analysis",
"_____no_output_____"
],
[
"# Question 1",
"_____no_output_____"
],
[
"<img src=\"images/lec14_quiz01-01.png\">\n<img src=\"images/lec14_quiz01-02.png\">\n<img src=\"images/lec14_quiz01-03.png\">\n\n*Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/B20Bx/principal-component-analysis)*\n\n<!--TEASER_END-->",
"_____no_output_____"
],
[
"# Question 2",
"_____no_output_____"
],
[
"<img src=\"images/lec14_quiz02.png\">\n\n*Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/B20Bx/principal-component-analysis)*\n\n<!--TEASER_END-->",
"_____no_output_____"
],
[
"# Question 3",
"_____no_output_____"
],
[
"<img src=\"images/lec14_quiz03.png\">\n\n*Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/B20Bx/principal-component-analysis)*\n\n<!--TEASER_END-->",
"_____no_output_____"
],
[
"# Question 4",
"_____no_output_____"
],
[
"<img src=\"images/lec14_quiz04.png\">\n\n*Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/B20Bx/principal-component-analysis)*\n\n<!--TEASER_END-->",
"_____no_output_____"
],
[
"# Question 5",
"_____no_output_____"
],
[
"<img src=\"images/lec14_quiz05.png\">\n\n*Screenshot taken from [Coursera](https://www.coursera.org/learn/machine-learning/exam/B20Bx/principal-component-analysis)*\n\n<!--TEASER_END-->",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a1f50a15fbb27aef6dd51d2c846a52a6d20452e
| 6,425 |
ipynb
|
Jupyter Notebook
|
python-data/unit_tests.ipynb
|
GmZhang3/data-science-ipython-notebooks
|
ab442df29eb917d4bee3e50b8ac294ff1263b14e
|
[
"Apache-2.0"
] | 1 |
2019-01-26T03:00:43.000Z
|
2019-01-26T03:00:43.000Z
|
python-data/unit_tests.ipynb
|
GmZhang3/data-science-ipython-notebooks
|
ab442df29eb917d4bee3e50b8ac294ff1263b14e
|
[
"Apache-2.0"
] | null | null | null |
python-data/unit_tests.ipynb
|
GmZhang3/data-science-ipython-notebooks
|
ab442df29eb917d4bee3e50b8ac294ff1263b14e
|
[
"Apache-2.0"
] | null | null | null | 25.803213 | 178 | 0.513307 |
[
[
[
"This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/data-science-ipython-notebooks).",
"_____no_output_____"
],
[
"# Nose Unit Tests with IPython Notebook",
"_____no_output_____"
],
[
"## Nose\n\nTesting is a vital part of software development. Nose extends unittest to make testing easier.",
"_____no_output_____"
],
[
"## Install Nose\n\nRun the following command line:",
"_____no_output_____"
]
],
[
[
"!pip install nose",
"Requirement already satisfied: nose in d:\\software\\install\\anaconda2\\lib\\site-packages (1.3.7)\n"
]
],
[
[
"## Create the Code\n\nSave your code to a file with the %%file magic:",
"_____no_output_____"
]
],
[
[
"%%file type_util.py\nclass TypeUtil:\n\n @classmethod\n def is_iterable(cls, obj):\n \"\"\"Determines if obj is iterable.\n\n Useful when writing functions that can accept multiple types of\n input (list, tuple, ndarray, iterator). Pairs well with\n convert_to_list.\n \"\"\"\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n @classmethod\n def convert_to_list(cls, obj):\n \"\"\"Converts obj to a list if it is not a list and it is iterable, \n else returns the original obj.\n \"\"\"\n if not isinstance(obj, list) and cls.is_iterable(obj):\n obj = list(obj)\n return obj\n",
"Overwriting type_util.py\n"
]
],
[
[
"## Create the Nose Tests\n\nSave your test to a file with the %%file magic:",
"_____no_output_____"
]
],
[
[
"%%file tests/test_type_util.py\nimport sys\nsys.path.append(\"..\") # Adds higher directory to python modules path.\nimport os\nprint os.getcwd()\nfrom nose.tools import assert_equal\nfrom type_util import TypeUtil\n\n\nclass TestUtil():\n\n def test_is_iterable(self):\n assert_equal(TypeUtil.is_iterable('foo'), True)\n assert_equal(TypeUtil.is_iterable(7), False)\n\n def test_convert_to_list(self):\n assert_equal(isinstance(TypeUtil.convert_to_list('foo'), list), True)\n assert_equal(isinstance(TypeUtil.convert_to_list(7), list), False)",
"Overwriting tests/test_type_util.py\n"
]
],
[
[
"## Run the Nose Tests\n\nRun the following command line:",
"_____no_output_____"
]
],
[
[
"import os\nprint os.getcwd()\n",
"D:\\codeReposities\\data-science-ipython-notebooks\\python-data\n"
],
[
"!nosetests tests/test_type_util.py -v",
"Failure: ImportError (No module named type_util) ... ERROR\n\n======================================================================\nERROR: Failure: ImportError (No module named type_util)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"D:\\software\\install\\Anaconda2\\lib\\site-packages\\nose\\loader.py\", line 418, in loadTestsFromName\n addr.filename, addr.module)\n File \"D:\\software\\install\\Anaconda2\\lib\\site-packages\\nose\\importer.py\", line 47, in importFromPath\n return self.importFromDir(dir_path, fqname)\n File \"D:\\software\\install\\Anaconda2\\lib\\site-packages\\nose\\importer.py\", line 94, in importFromDir\n mod = load_module(part_fqname, fh, filename, desc)\n File \"D:\\codeReposities\\data-science-ipython-notebooks\\python-data\\tests\\test_type_util.py\", line 6, in <module>\n from type_util import TypeUtil\nImportError: No module named type_util\n\n----------------------------------------------------------------------\nRan 1 test in 0.000s\n\nFAILED (errors=1)\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1f5c9b37fe621a4f17839b4742398b0336b25a
| 5,863 |
ipynb
|
Jupyter Notebook
|
notebooks/bigquery:trafikkdata.standardized.timetrafikk.ipynb
|
svvsaga/notebooks-public
|
13f722a0f91304c0d5f17a41ea5391e3990df835
|
[
"MIT"
] | null | null | null |
notebooks/bigquery:trafikkdata.standardized.timetrafikk.ipynb
|
svvsaga/notebooks-public
|
13f722a0f91304c0d5f17a41ea5391e3990df835
|
[
"MIT"
] | null | null | null |
notebooks/bigquery:trafikkdata.standardized.timetrafikk.ipynb
|
svvsaga/notebooks-public
|
13f722a0f91304c0d5f17a41ea5391e3990df835
|
[
"MIT"
] | null | null | null | 28.6 | 187 | 0.581102 |
[
[
[
"project = 'saga-trafikkdata-prod-pz8l'\nuse_colab_auth = True\n\n# Legg inn ditt eget prosjekt her, f.eks. 'saga-olanor-playground-ab12'\nbq_job_project = ''",
"_____no_output_____"
],
[
"if (use_colab_auth):\n from google.colab import auth\n auth.authenticate_user()\n print('Authenticated')",
"_____no_output_____"
],
[
"import warnings\nfrom google.cloud import bigquery\n\nwarnings.filterwarnings('ignore')\nclient = bigquery.Client(project=bq_job_project)",
"_____no_output_____"
]
],
[
[
"Denne spørringen henter enkelt og greit ut timetrafikk for trafikkregistreringspunktet \"HØVIK\" på datoen 2. februar 2022, totalt for begge kjøreretninger.",
"_____no_output_____"
]
],
[
[
"query = f\"\"\"\nSELECT\n name AS tellepunkt,\n EXTRACT(DATE FROM `from` AT TIME ZONE \"Europe/Oslo\") AS dato,\n EXTRACT(HOUR FROM `from` AT TIME ZONE \"Europe/Oslo\") AS time,\n total.volumeNumbers.volume as timetrafikk\nFROM `{project}.standardized.timetrafikk`\nWHERE name = \"HØVIK\"\nAND DATE(`from`, \"Europe/Oslo\") = \"2022-02-02\"\nORDER BY dato, time\n\"\"\" \n\nprint(query)\n\nclient.query(query).to_dataframe()",
"_____no_output_____"
]
],
[
[
"Følgende spørring henter data for tellepunktet på E18 ved Høvik og beregner gjennomsnittlig døgntrafikk i januar i år per ukedag, kjøreretning og lengdeklasse.",
"_____no_output_____"
]
],
[
[
"query = f\"\"\"\nSELECT\n name AS tellepunkt,\n flat_directions.heading AS retning,\n EXTRACT(DAYOFWEEK FROM `from` AT TIME ZONE \"Europe/Oslo\") AS ukedag,\n CAST(AVG(IF(flattened.lengthRange.upperBound = 5.6, flattened.total.volumeNumbers.volume, NULL)) * 24 AS INT64) AS korte_kjoretoy,\n CAST(AVG(IF(flattened.lengthRange.upperBound IS NULL AND flattened.lengthRange.lowerBound = 5.6, flattened.total.volumeNumbers.volume, NULL)) * 24 AS INT64) AS lange_kjoretoy\nFROM `{project}.standardized.timetrafikk`, UNNEST(byDirection) flat_directions, UNNEST(flat_directions.byLengthRange) flattened\nWHERE name = \"HØVIK\"\nAND DATE(`from`, \"Europe/Oslo\") BETWEEN \"2022-01-01\" AND \"2022-01-31\"\nAND (flattened.lengthRange.upperBound = 5.6 OR (flattened.lengthRange.lowerBound = 5.6 AND flattened.lengthRange.upperBound IS NULL))\nGROUP BY 1,2,3\nORDER BY 1,2,3\n\"\"\" \n\nprint(query)\n\nclient.query(query).to_dataframe()",
"_____no_output_____"
]
],
[
[
"Denne spørringen viser en enkel `UNNEST` for å finne totalvolum og volum per __felt__.",
"_____no_output_____"
]
],
[
[
"query = f\"\"\"\nSELECT \n trpId,\n DATETIME(`from`, \"Europe/Oslo\") AS dato,\n timetrafikk.total.volumeNumbers.volume as totalVolum,\n lanes.lane.laneNumber as felt,\n lanes.total.volumeNumbers.volume as feltVolum\nFROM\n `{project}.standardized.timetrafikk` timetrafikk,\n UNNEST(byLane) lanes\nWHERE\n DATE(`from`, \"Europe/Oslo\") = \"2021-09-15\"\n AND trpId = \"16219V72812\"\nORDER BY `from`\nLIMIT 20\n\"\"\" \n\nprint(query)\n\nclient.query(query).to_dataframe()",
"_____no_output_____"
]
],
[
[
"Denne spørringen viser en enkel `UNNEST` for å finne totalvolum og volum per __lengdeklasse__.",
"_____no_output_____"
]
],
[
[
"query = f\"\"\"\nSELECT \n trpId,\n DATETIME(`from`, \"Europe/Oslo\") AS dato,\n timetrafikk.total.volumeNumbers.volume as totalVolum,\n IFNULL(lengthRanges.lengthRange.lowerBound, 0) meterLengdeFra, \n IFNULL(CAST(lengthRanges.lengthRange.upperBound AS STRING), \"ubegrenset\") meterLengdeTil,\n lengthRanges.total.volumeNumbers.volume as lengdeklasseVolum\nFROM\n `{project}.standardized.timetrafikk` timetrafikk,\n UNNEST(byLengthRange) lengthRanges\nWHERE\n DATE(`from`, \"Europe/Oslo\") = \"2021-09-15\"\n AND trpId = \"16219V72812\"\nORDER BY `from`\nLIMIT 20\n\"\"\" \n\nprint(query)\n\nclient.query(query).to_dataframe()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a1f649f18a0978a79498d208a917b0958b31f9f
| 19,378 |
ipynb
|
Jupyter Notebook
|
notebooks/12-gdb-prohet_features.ipynb
|
StorWater/StorWater_POC
|
8351ddb77ff5980bcd65d24dd49fae1d0d29e8fe
|
[
"MIT"
] | null | null | null |
notebooks/12-gdb-prohet_features.ipynb
|
StorWater/StorWater_POC
|
8351ddb77ff5980bcd65d24dd49fae1d0d29e8fe
|
[
"MIT"
] | null | null | null |
notebooks/12-gdb-prohet_features.ipynb
|
StorWater/StorWater_POC
|
8351ddb77ff5980bcd65d24dd49fae1d0d29e8fe
|
[
"MIT"
] | null | null | null | 33.525952 | 125 | 0.519197 |
[
[
[
"import sys\nprint(f'Interpreter dir: {sys.executable}')\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nif os.path.basename(os.getcwd()) == 'notebooks':\n os.chdir('../')\n \nprint(f'Working dir: {os.getcwd()}')\n%load_ext autoreload\n%autoreload 2\n",
"_____no_output_____"
],
[
"import xgboost as xgb\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"from bayes_opt import BayesianOptimization\n# Ejemplo de bayessian metaparameter optimization por si quieres usarlo para buscar parametros\ndef bayes_parameter_opt_lgb(X, y,\n init_round=15,\n opt_round=25, \n n_folds=5, \n random_seed=6, \n n_estimators=10000, \n learning_rate=0.02, \n output_process=False):\n # prepare data\n train_data = lgb.Dataset(data=X, label=y)\n # parameters\n def lgb_eval(num_leaves, feature_fraction,\n bagging_fraction, max_depth,\n lambda_l1, lambda_l2,\n min_split_gain,\n min_child_weight):\n params = {'application':'binary',\n 'num_iterations': n_estimators, \n 'learning_rate':learning_rate, \n 'early_stopping_round':100, \n 'metric':'binary'}\n params[\"num_leaves\"] = int(round(num_leaves))\n params['feature_fraction'] = max(min(feature_fraction, 1), 0)\n params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)\n params['max_depth'] = int(round(max_depth))\n params['lambda_l1'] = max(lambda_l1, 0)\n params['lambda_l2'] = max(lambda_l2, 0)\n params['min_split_gain'] = min_split_gain\n params['min_child_weight'] = min_child_weight\n params[\"is_unbalance\"] = True\n cv_result = lgb.cv(params, train_data, nfold=n_folds,\n seed=random_seed,\n stratified=True, \n verbose_eval =200,\n metrics=['auc'])\n return max(cv_result['auc-mean'])\n # range \n lgbBO = BayesianOptimization(lgb_eval, {'num_leaves': (24, 45),\n 'feature_fraction': (0.1, 0.9),\n 'bagging_fraction': (0.8, 1),\n 'max_depth': (5, 8.99),\n 'lambda_l1': (0, 5),\n 'lambda_l2': (0, 3),\n 'min_split_gain': (0.001, 0.1),\n 'min_child_weight': (5, 50)}, random_state=0)\n # optimize\n lgbBO.maximize(init_points=init_round, n_iter=opt_round)\n \n # output optimization process\n if output_process==True: lgbBO.points_to_csv(\"bayes_opt_result.csv\")\n \n # return best parameters\n return lgbBO#.res['max']['max_params']\n\n",
"_____no_output_____"
]
],
[
[
"### Load data with outliers remove as in previous version",
"_____no_output_____"
]
],
[
[
"# clean data has outlier of std > 2 already removed\nraw = pd.read_csv(\"data/processed/clean_data.csv\", index_col=[\"Timestamp\"], parse_dates=[\"Timestamp\"])\ndf = raw.rename(columns={\"is_leakage\":\"target\"})\nlast_leakage_period = \"6H\" # Create target using rolling window of 6 hours\ndf[\"target\"] = df[[\"target\"]].rolling(last_leakage_period).max().copy()\n",
"_____no_output_____"
],
[
"# Standarize time series\nscaler = StandardScaler().fit(df.values[:, 1:])\ndf.iloc[:, 1:] = scaler.transform(df.values[:, 1:])\ndf.head()",
"_____no_output_____"
],
[
"# Remove 2020 to avoid the coronavirus effect\ndf_2019 = df[df.index < df.index[-14000]]\ndf_2019.shape",
"_____no_output_____"
],
[
"def split_datasets(df, test_examples=25000):\n df_train = df[df.index < df.index[-test_examples]]\n df_test = df[df.index > df.index[-test_examples]]\n df_val = df_test.iloc[test_examples // 2:].copy()\n df_test = df_test.iloc[:test_examples // 2].copy()\n return df_train, df_test, df_val",
"_____no_output_____"
],
[
"test_examples = 25000\ndf_train, df_test, df_val = split_datasets(df_2019)\ndf_train.head()",
"_____no_output_____"
]
],
[
[
"## Extract prophet features",
"_____no_output_____"
]
],
[
[
"# These are all the non-zero features provided by prophet\ncols = [\"ds\", 'trend', 'yhat_lower', 'yhat_upper', 'trend_lower', 'trend_upper',\n 'additive_terms', 'additive_terms_lower', 'additive_terms_upper',\n 'daily', 'daily_lower', 'daily_upper', 'weekly', 'weekly_lower',\n 'weekly_upper', 'yearly', 'yearly_lower', 'yearly_upper',\n 'yhat'] \n# Choosing only these ones we get pretty much the same metrics than using all the previous ones\nsmall_cols = [\"ds\", 'trend', 'additive_terms', 'daily', 'weekly', 'yearly', 'yhat']\ndef extract_prophet_features(df, column, model, cols):\n \"\"\"Fit a prophet model to the desired column. Return its predictions and fitter model.\"\"\"\n pdf = df[[column]].reset_index().rename(columns={\"Timestamp\":\"ds\", column:\"y\"})\n m = Prophet(**model) if isinstance(model, dict) else model\n model = m.fit(pdf) if isinstance(model, dict) else m\n return model, model.predict(pdf)[cols].set_index(\"ds\")\n ",
"_____no_output_____"
],
[
"# Prpphet can be fine-tuned to get better forecastings\nprophet_params = dict(yearly_seasonality=True,\n weekly_seasonality=True,\n daily_seasonality=True,\n )",
"_____no_output_____"
],
[
"column = \"PressureBar\"\nmodel_pres, press_train = extract_prophet_features(df_train, column=column, model=prophet_params, cols=small_cols)\ncolumn = \"m3Volume\"\nmodel_vol, volume_train = extract_prophet_features(df_train, column=column, model=prophet_params, cols=small_cols)",
"_____no_output_____"
],
[
"# Use the models fit on training set to extract prophet features on the test set.\n# This is a conservative assumption, as the prophet models could be continuously trained in \n# in production to provide more accurate forecastings.\ncolumn = \"PressureBar\"\n_, press_test = extract_prophet_features(df_test, column=column, model=model_pres, cols=small_cols)\ncolumn = \"m3Volume\"\n_, volume_test = extract_prophet_features(df_test, column=column, model=model_vol, cols=small_cols)",
"_____no_output_____"
]
],
[
[
"## Add rolling statistics (Optional)",
"_____no_output_____"
]
],
[
[
"def add_rolling_means(df, periods):\n \"\"\"Add features representing rolling mean aggregation during the provided periods.\"\"\"\n data = [df.rolling(period).mean() for period in periods]\n df_c = df.copy()\n for new_df, p in zip(data, periods):\n df_c = pd.merge(df_c, new_df, left_index=True, right_index=True, how=\"inner\", suffixes=('', \"_%s\" % p))\n return df_c",
"_____no_output_____"
],
[
"periods = [\"1H\", \"2H\", \"6H\", \"12H\", \"24H\"]\npress_feats_train = add_rolling_means(press_train, periods)\nvol_feats_train = add_rolling_means(volume_train, periods)\npress_feats_test = add_rolling_means(press_test, periods)\nvol_feats_test = add_rolling_means(volume_test, periods)",
"_____no_output_____"
],
[
"press_feats_train = press_train\nvol_feats_train = volume_train\npress_feats_test = press_test\nvol_feats_test = volume_test",
"_____no_output_____"
]
],
[
[
"## Create train and test sets",
"_____no_output_____"
]
],
[
[
"train_features = pd.merge(df_train, press_feats_train,\n right_index=True,\n left_index=True,\n how=\"inner\", suffixes=('', \"_press\"))\ntrain_features = pd.merge(train_features,\n vol_feats_train,\n right_index=True,\n left_index=True,\n how=\"inner\",\n suffixes=('', \"_vol\"))",
"_____no_output_____"
],
[
"test_features = pd.merge(df_test, press_feats_test,\n right_index=True,\n left_index=True,\n how=\"inner\", suffixes=('', \"_press\"))\ntest_features = pd.merge(test_features,\n vol_feats_test,\n right_index=True,\n left_index=True,\n how=\"inner\",\n suffixes=('', \"_vol\"))",
"_____no_output_____"
],
[
"train_x = train_features.drop(\"target\", axis=1)\ntrain_y = train_features[\"target\"]\ntest_x = test_features.drop(\"target\", axis=1)\ntest_y = test_features[\"target\"]",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score,roc_auc_score,confusion_matrix,classification_report\ndef print_report(model):\n y_train_pred = model.predict(train_x)\n y_test_pred = model.predict(test_x)\n #y_val_pred = gbm_2.predict(x_val)\n\n print(\"TRAIN SET\")\n print(classification_report(train_y.values.astype(int), y_train_pred.astype(int)))\n print(\"\\nTEST SET\")\n print(classification_report(test_y.values.astype(int), y_test_pred.astype(int)))\n print(\"\\nTRAIN SET\")\n print(confusion_matrix(train_y.values.astype(int), y_train_pred.astype(int)))\n print(\"\\nTEST DATSET\")\n print(confusion_matrix(test_y.values.astype(int), y_test_pred.astype(int)))\n ",
"_____no_output_____"
]
],
[
[
"# This needs to be fine tuned to unbalanced classification. Mot ready to be used yet.\nopt_params = bayes_parameter_opt_lgb(train_x, train_y, init_round=5, opt_round=10,\n n_folds=3,\n random_seed=6,\n n_estimators=60, learning_rate=0.02)\n\ndef train_lgbm(train_x,train_y,test_x,test_y, params):\n lgb_train = lgb.Dataset(train_x,train_y)\n lgb_valid = lgb.Dataset(test_x,test_y)\n model = lgb.train(params, lgb_train, 3000,\n valid_sets=[lgb_train, lgb_valid],\n early_stopping_rounds=10000, verbose_eval=50)\n y_test = model.predict(test_x.values)\n return y_test,model\n\nbest_lgbm_params = opt_params.res[8][\"params\"]\nother_params = dict(objective='binary',metric='binary',\n n_estimators=50,\n scale_pos_weight=10,\n is_unbalance=False)\nbest_lgbm_params[\"num_leaves\"] = int(best_lgbm_params[\"num_leaves\"])\nbest_lgbm_params[\"max_depth\"] = int(best_lgbm_params[\"max_depth\"])\nbest_lgbm_params.update(other_params)\ny_test, model = train_lgbm(train_x,train_y,test_x,test_y, best_lgbm_params)\nprint_report(model)",
"_____no_output_____"
]
],
[
[
"import imblearn\nfrom sklearn.ensemble import BaggingClassifier\nfrom imblearn.under_sampling import NearMiss\nfrom imblearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\nn_jobs = 64\npipeline = make_pipeline(NearMiss(version=2, n_jobs=n_jobs),\n LogisticRegression(max_iter=500,\n C=0.1,\n class_weight='balanced',\n n_jobs=n_jobs,\n penalty='elasticnet',\n solver=\"saga\",\n l1_ratio=0.2))",
"_____no_output_____"
],
[
"pipeline.fit(train_x.values, train_y.values.astype(int))",
"_____no_output_____"
],
[
"print_report(pipeline)",
"_____no_output_____"
],
[
"params = {#'pos_bagging_fraction':0.4,\n #\"bagging_fraction\":0.5,\n 'feature_fraction': 0.3721514930979355,\n 'lambda_l1': 3.0138168803582195,\n 'lambda_l2': 1.6346495489906907,\n 'max_depth': None,\n 'min_child_weight': 1.065235087999525,\n 'min_split_gain': 0.04432113391500656,\n 'num_leaves': 42}\ngbm_2 = lgb.LGBMClassifier(objective='binary',metric='binary',\n n_estimators=50,\n bagging_fraction=0.5,\n scale_pos_weight=1000, # tweaking this has a direct effect on prec/recall tradeoff\n is_unbalance=False, **params)\n\ngbm_2 = make_pipeline(imblearn.combine.SMOTEENN(n_jobs=n_jobs),\n gbm_2)",
"_____no_output_____"
],
[
"\ngbm_2.fit(train_x.values, train_y.values.astype(int))",
"_____no_output_____"
],
[
"print_report(gbm_2)",
"_____no_output_____"
],
[
"from collections import defaultdict\nimport ray\nimport tqdm\ndef get_cum_metrics(y_true, y_pred):\n metrics = defaultdict(list)\n for i in tqdm.autonotebook.trange(1, len(x)):\n mets = classification_report(y_true[:i], y_pred[:i], output_dict=True)\n for k, v in mets.items():\n if k == \"1\":\n for ki, vi in v.items():\n metrics[ki].append(vi)\n return metrics\n\n\n\[email protected]\ndef calculate_metrics(i, y_true, y_pred):\n return classification_report(y_true[:i], y_pred[:i], output_dict=True)\n\n\ny_test_pred = gbm_2.predict(test_x)\nray.init(ignore_reinit_error=True)\nproc_ids = [calculate_metrics.remote(i, test_y.astype(int), y_test_pred.astype(int)) for i in range(1, len(test_y))]\nresults = ray.get(proc_ids)\nresults[0].keys()\n\n\n",
"_____no_output_____"
],
[
"cum_mets = pd.DataFrame.from_records([r[\"1\"] for r in results if \"1\" in r])\ncum_mets.iloc[:, :3].plot()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1f80e5876ec9c241c63ea98fd40785a43a2c01
| 24,691 |
ipynb
|
Jupyter Notebook
|
docs/_build/html/notebooks/pytorch_binary_classification.ipynb
|
aiqc/aiqc
|
19c468b59fafca1036fa90d0ace50006744e5ac0
|
[
"BSD-3-Clause"
] | 17 |
2021-02-07T17:46:10.000Z
|
2021-04-08T04:40:46.000Z
|
docs/_build/html/_sources/notebooks/pytorch_binary_classification.ipynb.txt
|
aiqc/aiqc
|
19c468b59fafca1036fa90d0ace50006744e5ac0
|
[
"BSD-3-Clause"
] | 49 |
2021-03-19T12:51:31.000Z
|
2021-04-09T01:41:38.000Z
|
docs/_build/doctrees/nbsphinx/notebooks/pytorch_binary_classification.ipynb
|
aiqc/aiqc
|
19c468b59fafca1036fa90d0ace50006744e5ac0
|
[
"BSD-3-Clause"
] | 6 |
2021-03-01T23:14:05.000Z
|
2021-04-02T23:30:44.000Z
| 29.151122 | 345 | 0.455956 |
[
[
[
"# PyTorch: Tabular Classify Binary",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nfrom torch import optim\nimport torchmetrics\n\nfrom sklearn.preprocessing import LabelBinarizer, StandardScaler\n\nimport aiqc\nfrom aiqc import datum",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Example Data",
"_____no_output_____"
],
[
"Reference [Example Datasets](example_datasets.ipynb) for more information.",
"_____no_output_____"
]
],
[
[
"df = datum.to_pandas('sonar.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## a) High-Level API",
"_____no_output_____"
],
[
"Reference [High-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data.",
"_____no_output_____"
]
],
[
[
"splitset = aiqc.Pipeline.Tabular.make(\n df_or_path = df\n , dtype = None\n\n , feature_cols_excluded = 'object'\n , feature_interpolaters = None\n , feature_window = None\n , feature_encoders = dict(\n sklearn_preprocess = StandardScaler()\n , dtypes = ['float64']\n )\n , feature_reshape_indices = None\n\n , label_column = 'object'\n , label_interpolater = None\n , label_encoder = dict(sklearn_preprocess = LabelBinarizer(sparse_output=False))\n\n , size_test = 0.12\n , size_validation = 0.22\n , fold_count = None\n , bin_count = None\n)",
"\n=> Info - System overriding user input to set `sklearn_preprocess.copy=False`.\n This saves memory when concatenating the output of many encoders.\n\n\n___/ featurecoder_index: 0 \\_________\n\n=> The column(s) below matched your filter(s) featurecoder filters.\n\n['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'ab', 'ac', 'ad', 'ae', 'af', 'ag', 'ah', 'ai', 'aj', 'ak', 'al', 'am', 'an', 'ao', 'ap', 'aq', 'ar', 'as', 'at', 'au', 'av', 'aw', 'ax', 'ay', 'az', 'ba', 'bb', 'bc', 'bd', 'be', 'bf', 'bg', 'bh']\n\n=> Done. All feature column(s) have featurecoder(s) associated with them.\nNo more Featurecoders can be added to this Encoderset.\n\n"
],
[
"def fn_build(features_shape, label_shape, **hp):\n model = nn.Sequential(\n nn.Linear(features_shape[0], 12),\n nn.BatchNorm1d(12,12),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n\n nn.Linear(12, label_shape[0]),\n nn.Sigmoid()\n )\n return model",
"_____no_output_____"
],
[
"def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):\n ## --- Prepare mini batches for analysis ---\n batched_features, batched_labels = aiqc.torch_batcher(\n samples_train['features'], samples_train['labels'],\n batch_size=5, enforce_sameSize=False, allow_1Sample=False\n )\n\n ## --- Metrics ---\n acc = torchmetrics.Accuracy()\n # Mirrors `keras.model.History.history` object.\n history = {\n 'loss':list(), 'accuracy': list(), \n 'val_loss':list(), 'val_accuracy':list()\n }\n\n ## --- Training loop ---\n epochs = hp['epoch_count']\n for epoch in range(epochs):\n ## --- Batch training ---\n for i, batch in enumerate(batched_features): \n # Make raw (unlabeled) predictions.\n batch_probability = model(batched_features[i])\n batch_loss = loser(batch_probability, batched_labels[i])\n # Backpropagation.\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\n ## --- Epoch metrics ---\n # Overall performance on training data.\n train_probability = model(samples_train['features'])\n train_loss = loser(train_probability, samples_train['labels'])\n train_acc = acc(train_probability, samples_train['labels'].to(torch.short))\n history['loss'].append(float(train_loss))\n history['accuracy'].append(float(train_acc))\n # Performance on evaluation data.\n eval_probability = model(samples_evaluate['features'])\n eval_loss = loser(eval_probability, samples_evaluate['labels'])\n eval_acc = acc(eval_probability, samples_evaluate['labels'].to(torch.short)) \n history['val_loss'].append(float(eval_loss))\n history['val_accuracy'].append(float(eval_acc))\n return model, history",
"_____no_output_____"
]
],
[
[
"Optional, will be automatically selected based on `analysis_type` if left as `None`.",
"_____no_output_____"
]
],
[
[
"def fn_optimize(model, **hp):\n optimizer = optim.Adamax(\n model.parameters()\n , lr=hp['learning_rate']\n )\n return optimizer",
"_____no_output_____"
],
[
"hyperparameters = {\n \"learning_rate\": [0.01, 0.005]\n , \"epoch_count\": [50]\n}",
"_____no_output_____"
],
[
"queue = aiqc.Experiment.make(\n library = \"pytorch\"\n , analysis_type = \"classification_binary\"\n , fn_build = fn_build\n , fn_train = fn_train\n , splitset_id = splitset.id\n , repeat_count = 2\n , hide_test = False\n , hyperparameters = hyperparameters\n \n , fn_lose = None #optional/ automated\n , fn_optimize = fn_optimize #optional/ automated\n , fn_predict = None #optional/ automated\n , foldset_id = None\n)",
"_____no_output_____"
],
[
"queue.run_jobs()",
"🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 4/4 [00:08<00:00, 2.01s/it]\n"
]
],
[
[
"For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## b) Low-Level API",
"_____no_output_____"
],
[
"Reference [Low-Level API Docs](api_low_level.ipynb) for more information including how to work with non-tabular data and defining optimizers.",
"_____no_output_____"
]
],
[
[
"dataset = aiqc.Dataset.Tabular.from_pandas(df)",
"_____no_output_____"
],
[
"label_column = 'object'",
"_____no_output_____"
],
[
"label = dataset.make_label(columns=[label_column])",
"_____no_output_____"
],
[
"labelcoder = label.make_labelcoder(\n sklearn_preprocess = LabelBinarizer(sparse_output=False)\n)",
"_____no_output_____"
],
[
"feature = dataset.make_feature(exclude_columns=[label_column])",
"_____no_output_____"
],
[
"encoderset = feature.make_encoderset()",
"_____no_output_____"
],
[
"featurecoder_0 = encoderset.make_featurecoder(\n sklearn_preprocess = StandardScaler()\n , dtypes = ['float64']\n)",
"\n=> Info - System overriding user input to set `sklearn_preprocess.copy=False`.\n This saves memory when concatenating the output of many encoders.\n\n\n___/ featurecoder_index: 0 \\_________\n\n=> The column(s) below matched your filter(s) featurecoder filters.\n\n['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'ab', 'ac', 'ad', 'ae', 'af', 'ag', 'ah', 'ai', 'aj', 'ak', 'al', 'am', 'an', 'ao', 'ap', 'aq', 'ar', 'as', 'at', 'au', 'av', 'aw', 'ax', 'ay', 'az', 'ba', 'bb', 'bc', 'bd', 'be', 'bf', 'bg', 'bh']\n\n=> Done. All feature column(s) have featurecoder(s) associated with them.\nNo more Featurecoders can be added to this Encoderset.\n\n"
],
[
"splitset = aiqc.Splitset.make(\n feature_ids = [feature.id]\n , label_id = label.id\n , size_test = 0.22\n , size_validation = 0.12\n)",
"_____no_output_____"
],
[
"def fn_build(features_shape, label_shape, **hp):\n model = nn.Sequential(\n nn.Linear(features_shape[0], 12),\n nn.BatchNorm1d(12,12),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n\n nn.Linear(12, label_shape[0]),\n nn.Sigmoid()\n )\n return model",
"_____no_output_____"
],
[
"def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):\n ## --- Prepare mini batches for analysis ---\n batched_features, batched_labels = aiqc.torch_batcher(\n samples_train['features'], samples_train['labels'],\n batch_size=5, enforce_sameSize=False, allow_1Sample=False\n )\n\n ## --- Metrics ---\n acc = torchmetrics.Accuracy()\n # Mirrors `keras.model.History.history` object.\n history = {\n 'loss':list(), 'accuracy': list(), \n 'val_loss':list(), 'val_accuracy':list()\n }\n\n ## --- Training loop ---\n epochs = hp['epoch_count']\n for epoch in range(epochs):\n ## --- Batch training ---\n for i, batch in enumerate(batched_features): \n # Make raw (unlabeled) predictions.\n batch_probability = model(batched_features[i])\n batch_loss = loser(batch_probability, batched_labels[i])\n # Backpropagation.\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n\n ## --- Epoch metrics ---\n # Overall performance on training data.\n train_probability = model(samples_train['features'])\n train_loss = loser(train_probability, samples_train['labels'])\n train_acc = acc(train_probability, samples_train['labels'].to(torch.short))\n history['loss'].append(float(train_loss))\n history['accuracy'].append(float(train_acc))\n # Performance on evaluation data.\n eval_probability = model(samples_evaluate['features'])\n eval_loss = loser(eval_probability, samples_evaluate['labels'])\n eval_acc = acc(eval_probability, samples_evaluate['labels'].to(torch.short)) \n history['val_loss'].append(float(eval_loss))\n history['val_accuracy'].append(float(eval_acc))\n return model, history",
"_____no_output_____"
]
],
[
[
"Optional, will be automatically selected based on `analysis_type` if left as `None`.",
"_____no_output_____"
]
],
[
[
"def fn_optimize(model, **hp):\n optimizer = optim.Adamax(\n model.parameters()\n , lr=hp['learning_rate']\n )\n return optimizer",
"_____no_output_____"
],
[
"hyperparameters = {\n \"learning_rate\": [0.01, 0.005]\n , \"epoch_count\": [50]\n}",
"_____no_output_____"
],
[
"algorithm = aiqc.Algorithm.make(\n library = \"pytorch\"\n , analysis_type = \"classification_binary\"\n , fn_build = fn_build\n , fn_train = fn_train\n , fn_optimize = fn_optimize\n)",
"_____no_output_____"
],
[
"hyperparamset = algorithm.make_hyperparamset(\n hyperparameters = hyperparameters\n)",
"_____no_output_____"
],
[
"queue = algorithm.make_queue(\n splitset_id = splitset.id\n , hyperparamset_id = hyperparamset.id\n , repeat_count = 1\n)",
"_____no_output_____"
],
[
"queue.run_jobs()",
"🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 2/2 [00:04<00:00, 2.18s/it]\n"
]
],
[
[
"For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a1f8b4f13efb7cc37f65de1daae895cd21bbd66
| 1,275 |
ipynb
|
Jupyter Notebook
|
Processamento de Linguagem Natural/PLN usando python/Named Entity Recognition.ipynb
|
CSM-TeachingProject/CSM-Courses
|
ca4242d6afc8ba6b8cb3eb25b90d0d53fc6a6530
|
[
"Apache-2.0"
] | 6 |
2021-04-20T01:39:33.000Z
|
2022-02-24T02:28:17.000Z
|
Processamento de Linguagem Natural/PLN usando python/Named Entity Recognition.ipynb
|
CSM-TeachingProject/CSM-Courses
|
ca4242d6afc8ba6b8cb3eb25b90d0d53fc6a6530
|
[
"Apache-2.0"
] | 14 |
2021-04-26T03:15:08.000Z
|
2021-12-20T21:07:22.000Z
|
Processamento de Linguagem Natural/PLN usando python/Named Entity Recognition.ipynb
|
CSM-TeachingProject/CSM-Courses
|
ca4242d6afc8ba6b8cb3eb25b90d0d53fc6a6530
|
[
"Apache-2.0"
] | 6 |
2021-04-23T05:04:03.000Z
|
2022-02-02T05:50:38.000Z
| 20.564516 | 96 | 0.551373 |
[
[
[
"import nltk\n\nparagraph = \"The Taj Mahal was built by Emperor Shah Jahan\"",
"_____no_output_____"
],
[
"# separa cada palavra\nwords = nltk.word_tokenize(paragraph)\n# faz o POS-Tagging\ntagged_words = nltk.pos_tag(words)\n",
"_____no_output_____"
],
[
"# aqui você poderá encontrar entidades como: \n# orgnizações, pessoas, localizações, data, tempo, porcentagem, lugares, posição global\n\nnamed_entity = nltk.ne_chunk(tagged_words)\nnamed_entity.draw()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a1f94a9047472766dcfc6bd49de41b36e403469
| 131,303 |
ipynb
|
Jupyter Notebook
|
code/Carajas_Data_Irregular_Grid.ipynb
|
pinga-lab/Eq_Layer-Toeplitz
|
d56b40f99e9059c07a504efe3ad53aff8462622f
|
[
"BSD-3-Clause"
] | null | null | null |
code/Carajas_Data_Irregular_Grid.ipynb
|
pinga-lab/Eq_Layer-Toeplitz
|
d56b40f99e9059c07a504efe3ad53aff8462622f
|
[
"BSD-3-Clause"
] | 1 |
2020-08-19T22:42:42.000Z
|
2020-08-19T23:32:26.000Z
|
code/Carajas_Data_Irregular_Grid.ipynb
|
pinga-lab/Eq_Layer-Toeplitz
|
d56b40f99e9059c07a504efe3ad53aff8462622f
|
[
"BSD-3-Clause"
] | null | null | null | 184.933803 | 57,604 | 0.905714 |
[
[
[
"## Import",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport functions as fc\nfrom timeit import default_timer as time\nfrom fatiando.gravmag import polyprism\nfrom fatiando import mesher, gridder\nfrom fatiando.gravmag import prism\nfrom fatiando.constants import G, SI2MGAL\nfrom scipy.sparse import diags\n\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nfrom scipy.interpolate import griddata\nfrom scipy import interpolate, signal\nfrom fatiando.vis import mpl\nimport cPickle as pickle\n#%matplotlib inline",
"/home/vanderlei/Documents/fatiando/fatiando/vis/mpl.py:70: UserWarning: This module will be removed in v0.6. We recommend the use of matplotlib.pyplot module directly. Some of the fatiando specific functions will remain.\n \"specific functions will remain.\")\n"
],
[
"def plot_rec(bmap, lower_left, upper_left, lower_right, upper_right):\n xs = [lower_left[0], upper_left[0],\n lower_right[0], upper_right[0],\n lower_left[0], lower_right[0],\n upper_left[0], upper_right[0]]\n ys = [lower_left[1], upper_left[1],\n lower_right[1], upper_right[1],\n lower_left[1], lower_right[1],\n upper_left[1], upper_right[1]]\n bmap.plot(xs, ys, latlon = True, color='red')",
"_____no_output_____"
]
],
[
[
"## Observed Grid and Data",
"_____no_output_____"
]
],
[
[
"with open('carajas_gz.pickle') as r:\n carajas = pickle.load(r)",
"_____no_output_____"
],
[
"grid_x = carajas['x']\ngrid_y = carajas['y']\ngrid_z = carajas['z']\ngrid_dobs = carajas['gz']",
"_____no_output_____"
],
[
"gz_max = np.max(grid_dobs)\ngz_min = np.min(grid_dobs)\n\nprint gz_min, gz_max",
"-88.25 2.35\n"
],
[
"gz_colorbar_ranges = np.arange(-90., 6.1, 6)\n\nprint gz_colorbar_ranges",
"[-90. -84. -78. -72. -66. -60. -54. -48. -42. -36. -30. -24. -18. -12.\n -6. 0. 6.]\n"
],
[
"shape = (500, 500)",
"_____no_output_____"
],
[
"font_title = 10\nfont_ticks = 8\nfont_labels = 10\n\nheight=6.\nwidth = 8.\nheight_per_width = height/width\n#plt.figure(figsize=(8,6))\nplt.figure(figsize=(4.33,4.33*height_per_width))\n\n#plt.plot()\nax=plt.subplot(1,1,1)\n# plt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),np.ravel(grid_dobs),\n# levels=gz_colorbar_ranges, cmap='jet',\n# vmin = -90, vmax = 6)\nplt.contourf(grid_y.reshape(shape),grid_x.reshape(shape),grid_dobs.reshape(shape),\n levels=gz_colorbar_ranges, cmap='jet',\n vmin = -90, vmax = 6)\n\n#define colorbar\ncbar = plt.cm.ScalarMappable(cmap=cm.jet)\ncbar.set_array(np.ravel(grid_dobs))\ncbar.set_clim(-90, 6)\ncb = plt.colorbar(cbar, shrink=1, boundaries=gz_colorbar_ranges)\ncb.set_label('Gravity data (mGal)', rotation=90, fontsize=font_labels)\ncb.ax.tick_params(labelsize=font_ticks)\n\nplt.xlim(np.min(grid_y),np.max(grid_y))\nplt.ylim(np.min(grid_x),np.max(grid_x))\nplt.xticks(fontsize=font_ticks)\nplt.yticks(fontsize=font_ticks)\nplt.xlabel('Easting coordinate y (km)', fontsize=font_labels)\nplt.ylabel('Northing coordinate x (m)', fontsize=font_labels)\nmpl.m2km()\nplt.tight_layout(True)\n\n# plot the inset\n#inset = inset_axes(ax, width=\"40%\", height=\"40%\", loc=3, bbox_to_anchor=(65,44,350,350))\ninset = inset_axes(ax, width=\"30%\", height=\"30%\", loc=3)\nm = Basemap(projection='merc',llcrnrlat=-40,urcrnrlat=10,\\\n llcrnrlon=-82,urcrnrlon=-29,lat_ts=20,resolution='c')\nm.drawcoastlines(zorder=1)\nm.fillcontinents(color='white',lake_color='aqua')\nllcrnrlon = -53\nurcrnrlon = -49\nllcrnrlat = -8\nurcrnrlat = -5\nlower_left = (llcrnrlon, llcrnrlat)\nlower_right= (urcrnrlon, llcrnrlat)\nupper_left = (llcrnrlon, urcrnrlat)\nupper_right= (urcrnrlon, urcrnrlat)\nplot_rec(m, lower_left, upper_left, lower_right, upper_right, )\n\nm.drawmapboundary(fill_color='lightblue')\nm.drawcountries(linewidth=0.6, linestyle='solid', color='k' )\n\n#plt.savefig('../manuscript/Fig/carajas_real_data.png', dpi=300)\nplt.savefig('../manuscript/Fig/Figure9.png', dpi=1200)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Equivalent layer Depth",
"_____no_output_____"
]
],
[
[
"# Equivalent Layer depth\nshape_m = (500, 500)\nzj = np.ones_like(grid_z)*300",
"_____no_output_____"
]
],
[
[
"## Fast Eq. Layer Combined with Circulant-Toeplitz (BCCB)",
"_____no_output_____"
]
],
[
[
"# Predicted data\ns = time()\nitmax = 50\nrho_toep, gzp_toep = fc.fast_eq_bccb(np.ravel(grid_x),np.ravel(grid_y),np.ravel(grid_z),\n np.ravel(zj),shape_m,np.ravel(grid_dobs),itmax)\ne = time()\ntcpu = e - s\nprint tcpu",
"1.16217398643\n"
],
[
"delta_gz = gzp_toep-np.ravel(grid_dobs)",
"_____no_output_____"
]
],
[
[
"## Property estimative plot",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(6,6))\n\nplt.plot()\nplt.pcolormesh(grid_y.reshape(shape_m), grid_x.reshape(shape_m),rho_toep.reshape(shape_m))\n#plt.tricontourf(yi,xi,rho_toep,30,cmap='jet')\ncb = plt.colorbar()\n#plt.axis('scaled')\ncb.set_label('$density$ ( $kg.m^{-3}$ )', rotation=90, fontsize=14)\nplt.xlim(np.min(grid_y),np.max(grid_y))\nplt.ylim(np.min(grid_x),np.max(grid_x))\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\nplt.xlabel('Easting coordinate y (km)', fontsize=12)\nplt.ylabel('Northing coordinate x (m)', fontsize=12)\nmpl.m2km()\nplt.tight_layout(True)\n#plt.savefig('figures/rho_carajas_500x500.png', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Data, Predicted data and Residuals plot",
"_____no_output_____"
],
[
"## Fast Equivalent layer BCCB plot",
"_____no_output_____"
]
],
[
[
"mean = np.mean(delta_gz)\nprint mean\nstd = np.std(delta_gz)\nprint std",
"0.00029344162027741734\n0.1158238959637627\n"
],
[
"print np.min(delta_gz), np.max(delta_gz)",
"-1.8876473016330095 2.302509214867115\n"
],
[
"res_colorbar_ranges = np.arange(-0.8, 0.81, 0.1)",
"_____no_output_____"
],
[
"res_colorbar_ranges",
"_____no_output_____"
],
[
"# plot of the vertical component of the gravitational atraction at z=0 \nfont_title = 10\nfont_ticks = 8\nfont_labels = 10\n\nheight=12.\nwidth = 8.\nheight_per_width = height/width\n#plt.figure(figsize=(7,10))\nplt.figure(figsize=(4.33,4.33*height_per_width))\n\n#plt.subplot(311)\n#plt.title('A)', y=0.91, x=0.1, fontsize=18)\n#plt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),np.ravel(grid_dobs),15,cmap='jet')\n#cb = plt.colorbar()\n##plt.axis('scaled')\n#cb.set_label('$Gz$ ( $mGal$ )', rotation=90, fontsize=14)\n#plt.xlim(np.min(yi_c),np.max(yi_c))\n#plt.ylim(np.min(xi_c),np.max(xi_c))\n#plt.xticks(fontsize=14)\n#plt.yticks(fontsize=14)\n#plt.xlabel('Easting coordinate y (km)', fontsize=12)\n#plt.ylabel('Northing coordinate x (m)', fontsize=12)\n#mpl.m2km()\n\nplt.subplot(211)\nplt.title('(a)', y=0.93, x=-0.20, fontsize=font_title)\n# plt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),gzp_toep,\n# levels=gz_colorbar_ranges, cmap='jet', \n# vmin = -90, vmax = 6)\nplt.contourf(grid_y.reshape(shape),grid_x.reshape(shape),gzp_toep.reshape(shape),\n levels=gz_colorbar_ranges, cmap='jet', \n vmin = -90, vmax = 6)\n\n#define colorbar\ncbar = plt.cm.ScalarMappable(cmap=cm.jet)\ncbar.set_array(gzp_toep)\ncbar.set_clim(-90, 6)\ncb = plt.colorbar(cbar, shrink=1, boundaries=gz_colorbar_ranges)\ncb.set_label('Gravity data (mGal)', rotation=90, fontsize=font_labels)\ncb.ax.tick_params(labelsize=font_ticks)\n\n#plt.xlim(np.min(grid_y),np.max(grid_y))\n#plt.ylim(np.min(grid_x),np.max(grid_x))\nplt.xticks(fontsize=font_ticks)\nplt.yticks(fontsize=font_ticks)\n#plt.xlabel('Easting coordinate y (km)', fontsize=14)\nplt.ylabel('Northing coordinate x (m)', fontsize=font_labels)\nmpl.m2km()\n\nplt.subplot(212)\nplt.title('(b)', y=0.93, x=-0.20, fontsize=font_title)\n# plt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),delta_gz,\n# levels=res_colorbar_ranges, vmin=-0.8, vmax=.8, cmap='jet')\nplt.contourf(grid_y.reshape(shape),grid_x.reshape(shape),delta_gz.reshape(shape),\n levels=res_colorbar_ranges, vmin=-0.6, vmax=0.6, cmap='jet')\n\n#define colorbar\ncbar = plt.cm.ScalarMappable(cmap=cm.jet)\ncbar.set_array(delta_gz)\ncbar.set_clim(-0.6, 0.6)\ncb = plt.colorbar(cbar, shrink=1, boundaries=res_colorbar_ranges, extend='both')\ncb.set_label('Residuals (mGal)', rotation=90, fontsize=font_labels)\ncb.ax.tick_params(labelsize=font_ticks)\n\n#plt.xlim(np.min(grid_y),np.max(grid_y))\n#plt.ylim(np.min(grid_x),np.max(grid_x))\nplt.xticks(fontsize=font_ticks)\nplt.yticks(fontsize=font_ticks)\nplt.xlabel('Easting coordinate y (km)', fontsize=font_labels)\nplt.ylabel('Northing coordinate x (m)', fontsize=font_labels)\nmpl.m2km()\nplt.tight_layout(True)\n#plt.savefig('../manuscript/Fig/Carajas_gz_predito.png', dpi=300)\nplt.savefig('../manuscript/Fig/Figure10.png', dpi=1200)",
"_____no_output_____"
]
],
[
[
"## Transformation - Upward Continuation",
"_____no_output_____"
]
],
[
[
"# BTTb Eq. Layer Transformation\nN = shape_m[0]*shape_m[1]\nz_up = np.zeros_like(grid_x)-5000\ns = time()\nBTTB_up = fc.bttb(np.ravel(grid_x),np.ravel(grid_y),np.ravel(z_up),np.ravel(zj))\ncev_up = fc.bccb(shape_m,N,BTTB_up)\ngzp_bccb_up = fc.fast_forward_bccb(shape_m,N,rho_toep,cev_up)\ne = time()\ntcpu = e - s\nprint tcpu",
"0.0764899253845\n"
],
[
"# plot of the vertical component of the gravitational atraction at z=0 \nfont_title = 10\nfont_ticks = 8\nfont_labels = 10\n\nheight=6.\nwidth = 8.\nheight_per_width = height/width\n#plt.figure(figsize=(8,6))\nplt.figure(figsize=(4.33,4.33*height_per_width))\n\n# plt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),gzp_bccb_up,\n# 30, cmap='jet', vmin = -90, vmax = 6)\nplt.contourf(grid_y.reshape(shape),grid_x.reshape(shape),gzp_bccb_up.reshape(shape),\n 30, cmap='jet', vmin = -90, vmax = 6)\n\n#define colorbar\ncbar = plt.cm.ScalarMappable(cmap=cm.jet)\ncbar.set_array(gzp_bccb_up)\ncbar.set_clim(-90, 6)\ncb = plt.colorbar(cbar, shrink=1, boundaries=gz_colorbar_ranges)\ncb.set_label('Gravity data (mGal)', rotation=90, fontsize=font_labels)\ncb.ax.tick_params(labelsize=font_ticks)\n\nplt.xlim(np.min(grid_y),np.max(grid_y))\nplt.ylim(np.min(grid_x),np.max(grid_x))\nplt.xticks(fontsize=font_ticks)\nplt.yticks(fontsize=font_ticks)\nplt.xlabel('Easting coordinate y (km)', fontsize=font_labels)\nplt.ylabel('Northing coordinate x (m)', fontsize=font_labels)\nmpl.m2km()\nplt.tight_layout(True)\n#plt.savefig('../manuscript/Fig/up5000_carajas_500x500.png', dpi=300)\nplt.savefig('../manuscript/Fig/Figure11.png', dpi=1200)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Transformation - Downward Continuation",
"_____no_output_____"
]
],
[
[
"# BTTb Eq. Layer Transformation\nN = shape_m[0]*shape_m[1]\nz_down = np.zeros_like(grid_x)-500\nBTTB_down = fc.bttb(np.ravel(grid_x),np.ravel(grid_y),np.ravel(z_down),np.ravel(zj))\ncev_down = fc.bccb(shape_m,N,BTTB_down)\ngzp_bccb_down = fc.fast_forward_bccb(shape_m,N,rho_toep,cev_down)",
"_____no_output_____"
],
[
"# plot of the vertical component of the gravitational atraction at z=0 \nplt.figure(figsize=(6,6))\n\nplt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),gzp_bccb_down,30,cmap='jet')\ncb = plt.colorbar(shrink=0.8)\nplt.axis('scaled')\ncb.set_label('$Gz$ ( $mGal$ )', rotation=90, fontsize=14)\nplt.xlim(np.min(yi_c),np.max(yi_c))\nplt.ylim(np.min(xi_c),np.max(xi_c))\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\nplt.xlabel('Easting coordinate y (km)', fontsize=12)\nplt.ylabel('Northing coordinate x (m)', fontsize=12)\nmpl.m2km()\nplt.tight_layout(True)\n#plt.savefig('figures/down500_carajas_500x500.jpg', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Transformation - Gzz",
"_____no_output_____"
]
],
[
[
"# BTTb Eq. Layer Transformation\nw_gzz = fc.bttb_gzz(np.ravel(grid_x),np.ravel(grid_y),np.ravel(grid_z),np.ravel(zj))\ngzz = fc.fast_forward_bccb(shape_m,shape_m[0]*shape_m[1],rho_toep,w_gzz)",
"_____no_output_____"
],
[
"# plot of the vertical component of the gravitational atraction at z=0 \nplt.figure(figsize=(6,6))\n\nplt.tricontourf(np.ravel(grid_y),np.ravel(grid_x),gzz,30,cmap='jet')\ncb = plt.colorbar(shrink=0.8)\nplt.axis('scaled')\ncb.set_label('$Gzz$ ( $eotvos$ )', rotation=90, fontsize=14)\nplt.xlim(np.min(yi_c),np.max(yi_c))\nplt.ylim(np.min(xi_c),np.max(xi_c))\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\nplt.xlabel('Horizontal coordinate y (m)', fontsize=12)\nplt.ylabel('Horizontal coordinate x (m)', fontsize=12)\n#plt.savefig('figures/gzz_carajas_1005x131.jpg', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Junk Tests",
"_____no_output_____"
]
],
[
[
"data_right = np.zeros_like(data)\nfor i in range (shape[1]):\n line = data[i*300:(i+1)*300,:]\n if line[i+1,1] < line[i,1]:\n line_reverse = line[::-1]\n data_right[i*300:(i+1)*300,:] = line_reverse\n else:\n data_right[i*300:(i+1)*300,:] = line",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"markdown",
"raw",
"markdown",
"raw"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"raw",
"raw"
],
[
"markdown"
],
[
"raw",
"raw"
],
[
"markdown"
],
[
"raw"
]
] |
4a1f99e4891ddc20af03aecd2b60056c83781f59
| 4,933 |
ipynb
|
Jupyter Notebook
|
examples/reference/elements/matplotlib/Scatter.ipynb
|
ppwadhwa/holoviews
|
e8e2ec08c669295479f98bb2f46bbd59782786bf
|
[
"BSD-3-Clause"
] | 864 |
2019-11-13T08:18:27.000Z
|
2022-03-31T13:36:13.000Z
|
examples/reference/elements/matplotlib/Scatter.ipynb
|
ppwadhwa/holoviews
|
e8e2ec08c669295479f98bb2f46bbd59782786bf
|
[
"BSD-3-Clause"
] | 1,117 |
2019-11-12T16:15:59.000Z
|
2022-03-30T22:57:59.000Z
|
examples/reference/elements/matplotlib/Scatter.ipynb
|
ppwadhwa/holoviews
|
e8e2ec08c669295479f98bb2f46bbd59782786bf
|
[
"BSD-3-Clause"
] | 180 |
2019-11-19T16:44:44.000Z
|
2022-03-28T22:49:18.000Z
| 43.27193 | 568 | 0.627407 |
[
[
[
"<div class=\"contentcontainer med left\" style=\"margin-left: -50px;\">\n<dl class=\"dl-horizontal\">\n <dt>Title</dt> <dd> Scatter Element</dd>\n <dt>Dependencies</dt> <dd>Matplotlib</dd>\n <dt>Backends</dt>\n <dd><a href='./Scatter.ipynb'>Matplotlib</a></dd>\n <dd><a href='../bokeh/Scatter.ipynb'>Bokeh</a></dd>\n <dd><a href='../plotly/Scatter.ipynb'>Plotly</a></dd>\n</dl>\n</div>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport holoviews as hv\nfrom holoviews import dim\n\nhv.extension('matplotlib')",
"_____no_output_____"
]
],
[
[
"The ``Scatter`` element visualizes as markers placed in a space of one independent variable, traditionally denoted as *x*, against a dependent variable, traditionally denoted as *y*. In HoloViews, the name ``'x'`` is the default dimension name used in the key dimensions (``kdims``) and ``'y'`` is the default dimension name used in the value dimensions (``vdims``). We can see this from the default axis labels when visualizing a simple ``Scatter`` element:",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\ncoords = [(i, np.random.random()) for i in range(20)]\nscatter = hv.Scatter(coords)\n\nscatter.opts(color='k', marker='s', s=50)",
"_____no_output_____"
]
],
[
[
"Here the random *y* values are considered to be the 'data' whereas the *x* positions express where those data values were measured (compare this to the different way that [``Points``](./Points.ipynb) elements are defined). In this sense, ``Scatter`` is equivalent to a [``Curve``](./Curve.ipynb) without any lines connecting the samples, and you can use slicing to view the *y* values corresponding to a chosen *x* range:",
"_____no_output_____"
]
],
[
[
"scatter[0:12] + scatter[12:20]",
"_____no_output_____"
]
],
[
[
"A ``Scatter`` element must always have at least one value dimension (to give it a *y* location), but additional value dimensions are also supported. Here is an example with two additional quantities for each point, declared as the ``vdims`` ``'z'`` and ``'size'`` visualized as the color and size of the dots, respectively:",
"_____no_output_____"
]
],
[
[
"np.random.seed(10)\ndata = np.random.rand(100,4)\n\nscatter = hv.Scatter(data, vdims=['y', 'z', 'size'])\nscatter = scatter.opts(color='z', s=dim('size')*100)\nscatter + scatter[0.3:0.7, 0.3:0.7].hist()",
"_____no_output_____"
]
],
[
[
"In the right subplot, the ``hist`` method is used to show the distribution of samples along our first value dimension, (*y*).\n\nThe marker shape specified above can be any supported by [matplotlib](http://matplotlib.org/api/markers_api.html), e.g. ``s``, ``d``, or ``o``; the other options select the color and size of the marker.\n\n**Note**: Although the ``Scatter`` element is superficially similar to the [``Points``](./Points.ipynb) element (they can generate plots that look identical), the two element types are semantically quite different: Unlike ``Scatter``, ``Points`` are used to visualize data where the *y* variable is *independent*. This semantic difference also explains why the histogram generated by the ``hist`` call above visualizes the distribution of a different dimension than it does for [``Points``](./Points.ipynb) (because here *y*, not *z*, is the first ``vdim``).\n\nThis difference means that ``Scatter`` elements most naturally overlay with other elements that express dependent relationships between the x and y axes in two-dimensional space, such as the ``Chart`` types like [``Curve``](./Curve.ipynb). Conversely, ``Points`` elements either capture (x,y) spatial locations or they express a dependent relationship between an (x,y) location and some other dimension (expressed as point size, color, etc.), and thus they most naturally overlay with [``Raster``](./Raster.ipynb) types like [``Image``](./Image.ipynb).\n\nFor full documentation and the available style and plot options, use ``hv.help(hv.Scatter).``",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a1fab6bd5817e5333b2043e5a36a54a9e678ff4
| 8,975 |
ipynb
|
Jupyter Notebook
|
dev/check_fedvr.ipynb
|
jfeist/jftools
|
df2133431a2af5fe4708beecef879697f493d9d8
|
[
"MIT"
] | null | null | null |
dev/check_fedvr.ipynb
|
jfeist/jftools
|
df2133431a2af5fe4708beecef879697f493d9d8
|
[
"MIT"
] | null | null | null |
dev/check_fedvr.ipynb
|
jfeist/jftools
|
df2133431a2af5fe4708beecef879697f493d9d8
|
[
"MIT"
] | 1 |
2021-09-18T22:29:11.000Z
|
2021-09-18T22:29:11.000Z
| 34.922179 | 177 | 0.460056 |
[
[
[
"import numpy as np\nimport scipy",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('jf')",
"_____no_output_____"
],
[
"from jftools import fedvr",
"_____no_output_____"
],
[
"# 5 points (element boundaries) gives 4 elements\n# very low order to have only a few basis functions for plot\n# g = fedvr_grid(4,np.linspace(0,8,5))\ng = fedvr.fedvr_grid(4,np.array([0,2,3.5,4.5,6,8]))\nxnew = np.linspace(g.x[0],g.x[-1],500)\nfvals = g.get_basis_function_values(xnew)\nplt.plot(g.x,0*g.x,'ro',ms=10,mew=2.5,zorder=5,label='FEDVR points')\nfor x in [r.x[0] for r in g.regs]+[g.regs[-1].x[-1]]:\n plt.axvline(x,ls='--',color='0.4',lw=1,label='FE boundaries' if x==g.regs[0].x[0] else None)\nplt.plot(xnew,fvals.T)\nplt.margins(0.03)\nplt.legend()\nplt.tight_layout(pad=0)",
"_____no_output_____"
],
[
"from ipywidgets import interact\nimport scipy.sparse.linalg\n\ng = fedvr.fedvr_grid(11,np.linspace(-80,80,41))\nprint(\"#Grid points:\",len(g.x))\nM = 1.\nsigma = 8.\nk0 = 1.\nts, dt = np.linspace(0,300,301,retstep=True)\nf0 = lambda x: np.exp(-x**2/(2*sigma**2) + 1j*k0*x)\nH = -g.dx2/(2*M)\npsis = np.zeros([len(ts),len(g.x)],dtype=complex)\npsis[0] = g.project_function(f0)\nU = scipy.sparse.linalg.expm(-1j*dt*H.tocsc())\nfor ii in range(1,len(ts)):\n psis[ii] = U.dot(psis[ii-1])\n\nxnew = np.linspace(g.x[0],g.x[-1],500)\npsiplots = g.evaluate_basis(psis,xnew)\n@interact(ii=(0,len(ts)-1))\ndef doplot(ii=0):\n plt.plot(xnew,abs(psiplots[ii])**2)\n #plt.plot(g.x,abs(psis[ii])**2/g.wt,'o--')\n plt.ylim(0,1)",
"_____no_output_____"
],
[
"sigma = 0.8\nfdfs = [(lambda x: np.exp(-x**2/(2*sigma**2)), lambda x: np.exp(-x**2/(2*sigma**2)) * -x/sigma**2, lambda x: np.exp(-x**2/(2*sigma**2)) * (x**2-sigma**2)/sigma**4),\n (np.sin, np.cos, lambda x: -np.sin(x)),\n (lambda x: np.sin(np.pi*x/4)**2, lambda x: np.pi/4*np.sin(np.pi*x/2), lambda x: np.pi**2/8*np.cos(np.pi*x/2)),\n (lambda x: np.pi/4*np.sin(np.pi*x/2), lambda x: np.pi**2/8*np.cos(np.pi*x/2), lambda x: -np.pi**3/16*np.sin(np.pi*x/2)),\n (lambda x: np.sin(x)**2, lambda x: np.sin(2*x), lambda x: 2*np.cos(2*x)),\n (lambda x: np.sin(12*x), lambda x: 12*np.cos(12*x), lambda x: -144*np.sin(12*x))\n ]\ng = fedvr.fedvr_grid(11,np.linspace(-4,4,5))\nxnew = np.linspace(g.x[0],g.x[-1],1000)\n\nfig, axs = plt.subplots(1,len(fdfs),figsize=(7.5*len(fdfs),6.5))\nfor (f,df,d2f),ax in zip(fdfs,axs):\n cn = g.project_function(f)\n y = g.evaluate_basis(cn,xnew)\n dcn = g.dx.dot(cn)\n dy = g.evaluate_basis(dcn,xnew)\n dcn2 = g.dx2.dot(cn)\n dcn2a = g.dx.dot(dcn)\n dy2 = g.evaluate_basis(dcn2,xnew)\n dy2a = g.evaluate_basis(dcn2a,xnew)\n next(ax._get_lines.prop_cycler)\n ax.plot(xnew,y,label=r'$f(x)$')\n ax.plot(xnew,f(xnew),'k--')\n ax.plot(xnew,dy,label=r\"$f'(x)$\")\n ax.plot(xnew,df(xnew),'k--')\n ax.plot(xnew,dy2,label=r\"$f''(x)$\")\n ax.plot(xnew,d2f(xnew),'k--')\n ax.margins(0.03)\n ax.legend(fontsize=18)\nfig.tight_layout()",
"_____no_output_____"
],
[
"dx = g.dx.toarray()\ndxdx = dx @ dx\ndx2 = g.dx2.toarray()\nprint(np.linalg.norm(dx+dx.T))\nprint(np.linalg.norm(dxdx-dxdx.T))\nprint(np.linalg.norm(dx2-dx2.T))\nplt.plot(np.linalg.eigvalsh(-0.5*dx2))\nplt.plot(np.linalg.eigvalsh(-0.5*dxdx))\nplt.plot(np.arange(dx.shape[0])**2*np.pi**2/(2*8**2))\nplt.ylim(0,100)\n#plt.xlim(0,10); plt.ylim(0,10)",
"_____no_output_____"
],
[
"f, axs = plt.subplots(1,4,figsize=(23,4))\nfor ax, arr in zip(axs,[dx,dx2,dxdx,dx2-dxdx]):\n arr = np.sign(arr)*np.sqrt(abs(arr))\n vmax = abs(arr).max()\n im = ax.imshow(arr,interpolation='none',cmap='coolwarm',vmin=-vmax,vmax=vmax)\n plt.colorbar(im,ax=ax)",
"_____no_output_____"
],
[
"fdfs = [(lambda x: np.exp(-x**2/(2*0.5**2)), lambda x: np.exp(-x**2/(2*0.5**2)) * -x/0.5**2),\n (np.sin, np.cos),\n (lambda x: np.sin(x)**2, lambda x: np.sin(2*x)),\n (lambda x: np.sin(12*x), lambda x: 12*np.cos(12*x))\n ]\ng = fedvr.fedvr_grid(11,np.linspace(-4,4,5))\nxnew = np.linspace(g.x[0],g.x[-1],1000)\n\nfig, axs = plt.subplots(1,len(fdfs),figsize=(7*len(fdfs),5.5))\nfor (f,df),ax in zip(fdfs,axs):\n cn = g.project_function(f)\n y = g.evaluate_basis(cn,xnew)\n dcn = g.dx.dot(cn)\n dy = g.evaluate_basis(dcn,xnew)\n next(ax._get_lines.prop_cycler)\n ax.plot(xnew,y,label=r'$f(x)$')\n ax.plot(xnew,f(xnew),'k--')\n ax.plot(xnew,dy,label=r\"$f'(x)$\")\n ax.plot(xnew,df(xnew),'k--')\n ax.margins(0.03)\n ax.legend(fontsize=18)\nfig.tight_layout()",
"_____no_output_____"
],
[
"f = lambda x: np.exp(-x**2/(2*0.5**2))\nnfuns = [5,8,11,15]\nfig, axs = plt.subplots(1,len(nfuns),figsize=(7*len(nfuns),5.5),sharey=True)\nfor nfun, ax in zip(nfuns,axs):\n g = fedvr.fedvr_grid(nfun,np.linspace(-4,4,5))\n xnew = np.linspace(g.x[0],g.x[-1],1000)\n y = f(xnew)\n cn = g.project_function(f)\n ynew = g.evaluate_basis(cn,xnew)\n ax.plot(xnew,y,label=r'$f(x)$',lw=3)\n ax.plot(g.x,cn/np.sqrt(g.wt),'o--',lw=1,ms=6,label=r'$f(x_n) = c_n/\\sqrt{w_n}$',zorder=4)\n ax.plot(xnew,ynew,'--',label=r'$\\tilde f(x) = \\sum_n c_n b_n(x)$')\n ax.margins(0.02)\n ax.legend()\n ax.set_title(r\"$N_{fun} = %d$, $\\|\\tilde f - f\\|/\\|f\\| = %.3e$\"%(nfun,np.trapz(abs(y-ynew),xnew)/np.trapz(y,xnew)),verticalalignment='bottom')\n print(np.trapz(y,xnew)-np.sum(cn*np.sqrt(g.wt)))\nfig.tight_layout(pad=0.5)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1faf3d1e06ba70861cc9dddaeb9c01854a9985
| 1,033,696 |
ipynb
|
Jupyter Notebook
|
labwork/lab5/Time_Series_AirPassenger.ipynb
|
YogeshHiremath/ml_lab_ecsc_306
|
985c20ea5340f760fc209b9eef84aa3dbd217d99
|
[
"Apache-2.0"
] | null | null | null |
labwork/lab5/Time_Series_AirPassenger.ipynb
|
YogeshHiremath/ml_lab_ecsc_306
|
985c20ea5340f760fc209b9eef84aa3dbd217d99
|
[
"Apache-2.0"
] | null | null | null |
labwork/lab5/Time_Series_AirPassenger.ipynb
|
YogeshHiremath/ml_lab_ecsc_306
|
985c20ea5340f760fc209b9eef84aa3dbd217d99
|
[
"Apache-2.0"
] | 1 |
2018-01-18T05:50:11.000Z
|
2018-01-18T05:50:11.000Z
| 809.4722 | 79,484 | 0.942348 |
[
[
[
"# Steps to Tackle a Time Series Problem (with Codes in Python)\nNote: These are just the codes from article",
"_____no_output_____"
],
[
"## Loading and Handling TS in Pandas",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\n%matplotlib inline\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 15, 6",
"_____no_output_____"
],
[
"#Note: aim is not to teach stock price forecasting. It's a very complex domain and I have almost no clue about it. Here I will demonstrate the various techniques which can be used for time-series forecasting\ndata = pd.read_csv('AirPassengers.csv')\nprint data.head()\nprint '\\n Data Types:'\nprint data.dtypes",
" Month #Passengers\n0 1949-01 112\n1 1949-02 118\n2 1949-03 132\n3 1949-04 129\n4 1949-05 121\n\n Data Types:\nMonth object\n#Passengers int64\ndtype: object\n"
]
],
[
[
"Reading as datetime format:",
"_____no_output_____"
]
],
[
[
"dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m')\n# dateparse('1962-01')\ndata = pd.read_csv('AirPassengers.csv', parse_dates='Month', index_col='Month',date_parser=dateparse)\nprint data.head()",
" #Passengers\nMonth \n1949-01-01 112\n1949-02-01 118\n1949-03-01 132\n1949-04-01 129\n1949-05-01 121\n"
],
[
"#check datatype of index\ndata.index",
"_____no_output_____"
],
[
"#convert to time series:\nts = data['#Passengers']\nts.head(10)",
"_____no_output_____"
]
],
[
[
"### Indexing TS arrays:",
"_____no_output_____"
]
],
[
[
"#1. Specific the index as a string constant:\nts['1949-01-01']",
"_____no_output_____"
],
[
"#2. Import the datetime library and use 'datetime' function:\nfrom datetime import datetime\nts[datetime(1949,1,1)]",
"_____no_output_____"
]
],
[
[
"#Get range:",
"_____no_output_____"
]
],
[
[
"#1. Specify the entire range:\nts['1949-01-01':'1949-05-01']",
"_____no_output_____"
],
[
"#2. Use ':' if one of the indices is at ends:\nts[:'1949-05-01']",
"_____no_output_____"
]
],
[
[
"Note: ends included here",
"_____no_output_____"
]
],
[
[
"#All rows of 1962:\nts['1949']",
"_____no_output_____"
]
],
[
[
"# Checking for stationarity\n\n## Plot the time-series",
"_____no_output_____"
]
],
[
[
"plt.plot(ts)",
"_____no_output_____"
]
],
[
[
"### Function for testing stationarity",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.stattools import adfuller\ndef test_stationarity(timeseries):\n \n #Determing rolling statistics\n rolmean = pd.rolling_mean(timeseries, window=12)\n rolstd = pd.rolling_std(timeseries, window=12)\n\n #Plot rolling statistics:\n orig = plt.plot(timeseries, color='blue',label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n plt.show(block=False)\n \n #Perform Dickey-Fuller test:\n print 'Results of Dickey-Fuller Test:'\n dftest = adfuller(timeseries, autolag='AIC')\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\n for key,value in dftest[4].items():\n dfoutput['Critical Value (%s)'%key] = value\n print dfoutput",
"_____no_output_____"
],
[
"test_stationarity(ts)",
"_____no_output_____"
]
],
[
[
"# Making TS Stationary\n\n\n## Estimating & Eliminating Trend\n",
"_____no_output_____"
]
],
[
[
"ts_log = np.log(ts)\nplt.plot(ts_log)",
"_____no_output_____"
]
],
[
[
"## Smoothing:\n\n### Moving average",
"_____no_output_____"
]
],
[
[
"moving_avg = pd.rolling_mean(ts_log,12)\nplt.plot(ts_log)\nplt.plot(moving_avg, color='red')",
"_____no_output_____"
],
[
"ts_log_moving_avg_diff = ts_log - moving_avg\nts_log_moving_avg_diff.head(12)",
"_____no_output_____"
],
[
"ts_log_moving_avg_diff.dropna(inplace=True)\nts_log_moving_avg_diff.head()",
"_____no_output_____"
],
[
"test_stationarity(ts_log_moving_avg_diff)",
"_____no_output_____"
]
],
[
[
"### Exponentially Weighted Moving Average",
"_____no_output_____"
]
],
[
[
"expwighted_avg = pd.ewma(ts_log, halflife=12)\nplt.plot(ts_log)\nplt.plot(expwighted_avg, color='red')\n# expwighted_avg.plot(style='k--')",
"_____no_output_____"
],
[
"ts_log_ewma_diff = ts_log - expwighted_avg\ntest_stationarity(ts_log_ewma_diff)",
"_____no_output_____"
]
],
[
[
"## Eliminating Trend and Seasonality",
"_____no_output_____"
],
[
"### Differencing:",
"_____no_output_____"
]
],
[
[
"#Take first difference:\nts_log_diff = ts_log - ts_log.shift()\nplt.plot(ts_log_diff)",
"_____no_output_____"
],
[
"ts_log_diff.dropna(inplace=True)\ntest_stationarity(ts_log_diff)",
"_____no_output_____"
]
],
[
[
"### Decomposition:",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.seasonal import seasonal_decompose\ndecomposition = seasonal_decompose(ts_log)\n\ntrend = decomposition.trend\nseasonal = decomposition.seasonal\nresidual = decomposition.resid\n\nplt.subplot(411)\nplt.plot(ts_log, label='Original')\nplt.legend(loc='best')\nplt.subplot(412)\nplt.plot(trend, label='Trend')\nplt.legend(loc='best')\nplt.subplot(413)\nplt.plot(seasonal,label='Seasonality')\nplt.legend(loc='best')\nplt.subplot(414)\nplt.plot(residual, label='Residuals')\nplt.legend(loc='best')\nplt.tight_layout()",
"_____no_output_____"
],
[
"ts_log_decompose = residual\nts_log_decompose.dropna(inplace=True)\ntest_stationarity(ts_log_decompose)",
"_____no_output_____"
]
],
[
[
"# Final Forecasting",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.arima_model import ARIMA",
"_____no_output_____"
]
],
[
[
"### ACF & PACF Plots",
"_____no_output_____"
]
],
[
[
"#ACF and PACF plots:\nfrom statsmodels.tsa.stattools import acf, pacf \n\nlag_acf = acf(ts_log_diff, nlags=20)\nlag_pacf = pacf(ts_log_diff, nlags=20, method='ols')\n\n#Plot ACF: \nplt.subplot(121) \nplt.plot(lag_acf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')\nplt.title('Autocorrelation Function')\n\n#Plot PACF:\nplt.subplot(122)\nplt.plot(lag_pacf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray')\nplt.title('Partial Autocorrelation Function')\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"### AR Model:",
"_____no_output_____"
]
],
[
[
"#MA model:\nmodel = ARIMA(ts_log, order=(2, 1, 0)) \nresults_AR = model.fit(disp=-1) \nplt.plot(ts_log_diff)\nplt.plot(results_AR.fittedvalues, color='red')\nplt.title('RSS: %.4f'% sum((results_AR.fittedvalues-ts_log_diff)**2))",
"_____no_output_____"
]
],
[
[
"### MA Model",
"_____no_output_____"
]
],
[
[
"model = ARIMA(ts_log, order=(0, 1, 2)) \nresults_MA = model.fit(disp=-1) \nplt.plot(ts_log_diff)\nplt.plot(results_MA.fittedvalues, color='red')\nplt.title('RSS: %.4f'% sum((results_MA.fittedvalues-ts_log_diff)**2))",
"_____no_output_____"
]
],
[
[
"### ARIMA Model:",
"_____no_output_____"
]
],
[
[
"model = ARIMA(ts_log, order=(2, 1, 2)) \nresults_ARIMA = model.fit(disp=-1) \nplt.plot(ts_log_diff)\nplt.plot(results_ARIMA.fittedvalues, color='red')\nplt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff)**2))",
"_____no_output_____"
]
],
[
[
"### Convert to original scale:",
"_____no_output_____"
]
],
[
[
"predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)\nprint predictions_ARIMA_diff.head()",
"Month\n1949-02-01 0.009580\n1949-03-01 0.017491\n1949-04-01 0.027670\n1949-05-01 -0.004521\n1949-06-01 -0.023889\ndtype: float64\n"
],
[
"predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\nprint predictions_ARIMA_diff_cumsum.head()",
"Month\n1949-02-01 0.009580\n1949-03-01 0.027071\n1949-04-01 0.054742\n1949-05-01 0.050221\n1949-06-01 0.026331\ndtype: float64\n"
],
[
"predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)\npredictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)\npredictions_ARIMA_log.head()",
"_____no_output_____"
],
[
"plt.plot(ts_log)\nplt.plot(predictions_ARIMA_log)",
"_____no_output_____"
],
[
"predictions_ARIMA = np.exp(predictions_ARIMA_log)\nplt.plot(ts)\nplt.plot(predictions_ARIMA)\nplt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a1fb85cf10a6e8acb73f3817dc7d1d328fe3881
| 293,575 |
ipynb
|
Jupyter Notebook
|
deeplearning1/nbs/lesson4_daniel.ipynb
|
db7894/courses
|
3a8be09d450291fea4e7981e57c89501ec828edc
|
[
"Apache-2.0"
] | null | null | null |
deeplearning1/nbs/lesson4_daniel.ipynb
|
db7894/courses
|
3a8be09d450291fea4e7981e57c89501ec828edc
|
[
"Apache-2.0"
] | null | null | null |
deeplearning1/nbs/lesson4_daniel.ipynb
|
db7894/courses
|
3a8be09d450291fea4e7981e57c89501ec828edc
|
[
"Apache-2.0"
] | null | null | null | 157.412869 | 238,868 | 0.871089 |
[
[
[
"# Gameplan:",
"_____no_output_____"
],
[
"1. Set up data\n2. Create subset for Excel\n3. Make a prediction w Dot Product\n4. Analyze results\n5. Try a neural net.",
"_____no_output_____"
]
],
[
[
"from theano.sandbox import cuda",
"Using cuDNN version 5103 on context None\nPreallocating 10867/11439 Mb (0.950000) on cuda\nMapped name None to device cuda: Tesla K40c (0000:81:00.0)\n"
],
[
"%matplotlib inline\nimport utils; reload(utils)\nfrom utils import *\nfrom __future__ import division, print_function",
"Using Theano backend.\n"
],
[
"path = \"data/ml-small/ml-latest-small/\"\nmodel_path = path + 'models/'\nif not os.path.exists(model_path): os.mkdir(model_path)\nbatch_size = 64",
"_____no_output_____"
]
],
[
[
"# Setup",
"_____no_output_____"
],
[
"We'll read in the ratings using read_csv function from Pandas, which reads a csv file into a pandas dataframe--a 2D size-mutable tabular data structure w/ labeled rows and columns. It's a dict-like container fro series objects.\nWe'll return the head of the dataframe, which by default is set to n=5 rows.",
"_____no_output_____"
]
],
[
[
"ratings = pd.read_csv(path+'ratings.csv')\nratings.head()",
"_____no_output_____"
],
[
"len(ratings) #how many?",
"_____no_output_____"
]
],
[
[
"We'll read in movie names for display purposes\nWe'll use set_index, a dataframe functiuon that will give us info from the column label. to_dict will just convert this to a dictionary",
"_____no_output_____"
]
],
[
[
"movie_names_prelim = pd.read_csv(path+'ratings.csv').set_index('movieId').to_dict()\nprint(movie_names_prelim[\"title\"])",
"_____no_output_____"
],
[
"movie_names = pd.read_csv(path+'movies.csv').set_index('movieId')['title'].to_dict()",
"_____no_output_____"
]
],
[
[
"We'll grab users/movies as the unique values from ratings.",
"_____no_output_____"
]
],
[
[
"users = ratings.userId.unique()\nmovies = ratings.movieId.unique()",
"_____no_output_____"
],
[
"print(users[:5])\nprint(movies[:10])",
"[1 2 3 4 5]\n[ 31 1029 1061 1129 1172 1263 1287 1293 1339 1343]\n"
]
],
[
[
"We'll reverse order of users,movies by flipping things. we use enumerate() which adds a counter to an iterable--it starts from 0 and will count from there.\nIt will update movie and user ids so that they are contiguous integers--this helps us use embeddings.",
"_____no_output_____"
]
],
[
[
"userid2idx = {o:i for i,o in enumerate(users)}\nmovieid2idx = {o:i for i,o in enumerate(movies)}",
"_____no_output_____"
]
],
[
[
"We'll reorder the ratings/users in our ratings DataFrame by using apply, which applies a function along the input axis of a DataFrame. This'll make sure things in our frame our ordered when we use them.",
"_____no_output_____"
]
],
[
[
"ratings.movieId = ratings.movieId.apply(lambda(x): movieid2idx[x])\nratings.userId = ratings.userId.apply(lambda(x): userid2idx[x])",
"_____no_output_____"
],
[
"user_min, user_max, movie_min, movie_max = (ratings.userId.min(),\n ratings.userId.max(), ratings.movieId.min(), ratings.movieId.max())\nuser_min, user_max, movie_min, movie_max",
"_____no_output_____"
],
[
"# now to get the number of users and movies using nunique(), not unique()\nn_users = ratings.userId.nunique()\nn_movies = ratings.movieId.nunique()\nn_users,n_movies",
"_____no_output_____"
]
],
[
[
"671,9066 are the number of latent factors in each embedding.",
"_____no_output_____"
]
],
[
[
"n_factors = 50",
"_____no_output_____"
],
[
"np.random.seed = 42 #seeds a generator--for when we want repeatable results.",
"_____no_output_____"
]
],
[
[
"Now split into training and validation\nrandom.rand creates an array of the given shape and will populate it with values from a uniform distribution over [0,1)--since we say to be less than 0.8, the values in the array that are less than 0.8 will show up as true, and thsoe that are not will be replaced with a value of false.",
"_____no_output_____"
]
],
[
[
"things = [1,2,3,4,5]\nrand_toy = np.random.rand(len(things)) < 0.7\nprint(rand_toy)\nprint(rand_toy[~things])",
"[False False True True True]\n"
]
],
[
[
"We use the tilde operator for validation--it's the invert/complement operation. So bascially trn should be all ratings less than 0.8 and val should be those more than 0.8 (?)",
"_____no_output_____"
]
],
[
[
"msk = np.random.rand(len(ratings)) < 0.8\ntrn = ratings[msk]\nval = ratings[~msk]",
"_____no_output_____"
]
],
[
[
"# Subset for Excel",
"_____no_output_____"
],
[
"We now get the most popular movies and most addicted users to copy into excel.\nWe'll use pandas groupby to group series of things by key or a series of columns",
"_____no_output_____"
]
],
[
[
"g=ratings.groupby('userId')['rating'].count() #count returns a series w/ number of non-null observations over the requested axis\ntopUsers=g.sort_values(ascending=False)[:15] #top 15 users",
"_____no_output_____"
],
[
"g=ratings.groupby('movieId')['rating'].count()\ntopMovies=g.sort_values(ascending=False)[:15]",
"_____no_output_____"
],
[
"top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId') #rsuffix is used from right frame's overlapping columns\n#inner will form an intersection of calling frame's index with the other frame's index preserving the order othe calling one\n#on is the column in the caller (ratings) to join the index in other",
"_____no_output_____"
],
[
"top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId')",
"_____no_output_____"
],
[
"pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) #creates a crosstab of 2+ factors",
"_____no_output_____"
]
],
[
[
"# Dot Product",
"_____no_output_____"
]
],
[
[
"user_in = Input(shape=(1,),dtype='int64',name='user_in')\nu = Embedding(n_users,n_factors,input_length=1,W_regularizer=l2(1e-4))(user_in)\n# turns positive integers (indexes) into dense vectors of a fixed size. input_dim is n_users, output is n_factors. \nmovie_in = Input(shape=(1,),dtype='int64',name='movie_in')\nm = Embedding(n_movies,n_factors,input_length=1,W_regularizer=l2(1e-4))(movie_in)",
"_____no_output_____"
],
[
"x = merge([u,m], mode='dot')\nx = Flatten()(x)\nmodel = Model([user_in,movie_in],x) #we're using functional api and giving multiple inputs to the model\nmodel.compile(Adam(0.001),loss='mse')",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=1,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/1\n79983/79983 [==============================] - 2s - loss: 9.9152 - val_loss: 4.3311\n"
],
[
"model.optimizer.lr=0.01",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=1,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/1\n79983/79983 [==============================] - 3s - loss: 3.1290 - val_loss: 2.8549\n"
],
[
"model.optimizer.lr=0.001",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=6,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/6\n79983/79983 [==============================] - 2s - loss: 2.3947 - val_loss: 2.6446\nEpoch 2/6\n79983/79983 [==============================] - 2s - loss: 2.2220 - val_loss: 2.5943\nEpoch 3/6\n79983/79983 [==============================] - 2s - loss: 2.1538 - val_loss: 2.5857\nEpoch 4/6\n79983/79983 [==============================] - 2s - loss: 2.1194 - val_loss: 2.5840\nEpoch 5/6\n79983/79983 [==============================] - 2s - loss: 2.0936 - val_loss: 2.5918\nEpoch 6/6\n79983/79983 [==============================] - 2s - loss: 2.0742 - val_loss: 2.5981\n"
]
],
[
[
"# Bias",
"_____no_output_____"
],
[
"We need a single bias for each user / each movie representing how positive/negative each user is, and how good each movie is.\nTo get this, we'll create an embedding w one output for each movie and each user, and add it to our output.",
"_____no_output_____"
]
],
[
[
"def embedding_input(name, n_in, n_out, reg):\n inp = Input(shape=(1,),dtype='int64',name=name)\n return inp, Embedding(n_in,n_out,input_length=1, W_regularizer=l2(reg))(inp)",
"_____no_output_____"
],
[
"user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)\nmovie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)",
"_____no_output_____"
],
[
"def create_bias(inp, n_in):\n x = Embedding(n_in, 1, input_length=1)(inp) #n_in is the input to embedding layer, output is 1 number\n return Flatten()(x)",
"_____no_output_____"
],
[
"ub = create_bias(user_in,n_users)\nmb = create_bias(movie_in,n_movies)",
"_____no_output_____"
],
[
"x = merge([u,m],mode='dot')\nx = Flatten()(x)\nx = merge([x,ub],mode='sum') #add user bias to our dot product\nx = merge([x,mb],mode='sum') #add movie bias to dot product\nmodel = Model([user_in,movie_in],x)\nmodel.compile(Adam(0.001),loss='mse')",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating, batch_size=64, nb_epoch=1,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/1\n79983/79983 [==============================] - 3s - loss: 8.7959 - val_loss: 3.5443\n"
],
[
"model.optimizer.lr=0.01",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=6,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/6\n79983/79983 [==============================] - 3s - loss: 2.5850 - val_loss: 2.3491\nEpoch 2/6\n79983/79983 [==============================] - 3s - loss: 2.0054 - val_loss: 2.1441\nEpoch 3/6\n79983/79983 [==============================] - 3s - loss: 1.8466 - val_loss: 2.0458\nEpoch 4/6\n79983/79983 [==============================] - 3s - loss: 1.7494 - val_loss: 1.9684\nEpoch 5/6\n79983/79983 [==============================] - 3s - loss: 1.6680 - val_loss: 1.9001\nEpoch 6/6\n79983/79983 [==============================] - 3s - loss: 1.5922 - val_loss: 1.8331\n"
],
[
"model.optimizer.lr=0.001",
"_____no_output_____"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=10,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/10\n79983/79983 [==============================] - 3s - loss: 1.4489 - val_loss: 1.6987\nEpoch 2/10\n79983/79983 [==============================] - 3s - loss: 1.3800 - val_loss: 1.6432\nEpoch 3/10\n79983/79983 [==============================] - 3s - loss: 1.3125 - val_loss: 1.5846\nEpoch 4/10\n79983/79983 [==============================] - 3s - loss: 1.2480 - val_loss: 1.5366\nEpoch 5/10\n79983/79983 [==============================] - 3s - loss: 1.1877 - val_loss: 1.4838\nEpoch 6/10\n79983/79983 [==============================] - 3s - loss: 1.1289 - val_loss: 1.4371\nEpoch 7/10\n79983/79983 [==============================] - 3s - loss: 1.0739 - val_loss: 1.3922\nEpoch 8/10\n79983/79983 [==============================] - 3s - loss: 1.0210 - val_loss: 1.3485\nEpoch 9/10\n79983/79983 [==============================] - 3s - loss: 0.9713 - val_loss: 1.3127\nEpoch 10/10\n79983/79983 [==============================] - 3s - loss: 0.9246 - val_loss: 1.2769\n"
],
[
"model.fit([trn.userId,trn.movieId],trn.rating,batch_size=64,nb_epoch=5,\n validation_data=([val.userId,val.movieId],val.rating))",
"Train on 79983 samples, validate on 20021 samples\nEpoch 1/5\n79983/79983 [==============================] - 3s - loss: 0.8814 - val_loss: 1.2434\nEpoch 2/5\n79983/79983 [==============================] - 3s - loss: 0.8409 - val_loss: 1.2137\nEpoch 3/5\n79983/79983 [==============================] - 3s - loss: 0.8031 - val_loss: 1.1846\nEpoch 4/5\n79983/79983 [==============================] - 3s - loss: 0.7694 - val_loss: 1.1618\nEpoch 5/5\n79983/79983 [==============================] - 3s - loss: 0.7378 - val_loss: 1.1381\n"
],
[
"model.save_weights(model_path+'bias.h5')\nmodel.load_weights(model_path+'bias.h5')",
"_____no_output_____"
],
[
"model.predict([np.array([3]), np.array([6])])",
"_____no_output_____"
]
],
[
[
"# Results",
"_____no_output_____"
]
],
[
[
"g=ratings.groupby('movieId')['rating'].count()\ntopMovies=g.sort_values(ascending=False)[:2000]\ntopMovies=np.array(topMovies.index)",
"_____no_output_____"
]
],
[
[
"We'll look at the movie bias term--create a model (inputs associated w outputs) using fxnl api. Input is movie id and output is bias",
"_____no_output_____"
]
],
[
[
"get_movie_bias = Model(movie_in,mb)\nmovie_bias = get_movie_bias.predict(topMovies)\nmovie_ratings = [(b[0], movie_names[movies[i]]) for i,b in zip(topMovies,movie_bias)]\n\nsorted(movie_ratings,key=itemgetter(0))[:15]",
"_____no_output_____"
],
[
"sorted(movie_ratings,key=itemgetter(0),reverse=True)[:15] #bottom rated movies",
"_____no_output_____"
],
[
"get_movie_emb = Model(movie_in,m)\nmovie_emb = np.squeeze(get_movie_emb.predict([topMovies]))\nmovie_emb.shape",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\npca = PCA(n_components=3) #reduce to 3 vectors/embeddings\nmovie_pca = pca.fit(movie_emb.T).components_",
"_____no_output_____"
],
[
"fac0 = movie_pca[0]",
"_____no_output_____"
],
[
"movie_comp = [(f,movie_names[movies[i]]) for f,i in zip(fac0,topMovies)]",
"_____no_output_____"
]
],
[
[
"First comopnent of the 3 vectors:",
"_____no_output_____"
]
],
[
[
"sorted(movie_comp,key=itemgetter(0),reverse=True)[:10]",
"_____no_output_____"
],
[
"sorted(movie_comp,key=itemgetter(0))[:10]",
"_____no_output_____"
],
[
"fac1= movie_pca[1]",
"_____no_output_____"
],
[
"movie_comp = [(f,movie_names[movies[i]]) for f,i in zip(fac1,topMovies)]",
"_____no_output_____"
]
],
[
[
"Second reduced component",
"_____no_output_____"
]
],
[
[
"sorted(movie_comp,key=itemgetter(0),reverse=True)[:10]",
"_____no_output_____"
],
[
"sorted(movie_comp,key=itemgetter(0))[:10]",
"_____no_output_____"
],
[
"fac2 = movie_pca[2]",
"_____no_output_____"
],
[
"movie_comp = [(f,movie_names[movies[i]]) for f,i in zip(fac2,topMovies)]",
"_____no_output_____"
],
[
"sorted(movie_comp,key=itemgetter(0),reverse=True)[:10]",
"_____no_output_____"
],
[
"sorted(movie_comp,key=itemgetter(0))[:10]",
"_____no_output_____"
],
[
"import sys\nstdout, stderror = sys.stdout, sys.stderr #svae stdout, stderr\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.stdout,sys.stderr = stdout,stderror",
"_____no_output_____"
],
[
"start=50;end=100\nX = fac0[start:end]\nY = fac2[start:end]\nplt.figure(figsize=(15,15))\nplt.scatter(X,Y)\nfor i,x,y in zip(topMovies[start:end],X,Y):\n plt.text(x,y,movie_names[movies[i]],color=np.random.rand(3)*0.7, fontsize=14)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Neural Net",
"_____no_output_____"
]
],
[
[
"user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)\nmovie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)",
"_____no_output_____"
],
[
"x = merge([u,m], mode='concat') #we concatenate user/movie embeddings into a vector to feed into the NN\nx = Flatten()(x)\nx = Dropout(0.3)(x)\nx = Dense(70,activation='relu')(x)\nx = Dropout(0.75)(x)\nx = Dense(1)(x)\nnn = Model([user_in,movie_in],x)\nnn.compile(Adam(0.001),loss='mse')",
"_____no_output_____"
],
[
"nn.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=8, \n validation_data=([val.userId, val.movieId], val.rating))",
"_____no_output_____"
]
],
[
[
"Looks like the neural net is a good way to go!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a1fc9b14593296e371fe7385f9ecac132771811
| 58,645 |
ipynb
|
Jupyter Notebook
|
GNN/Sol_SDF_GNN.ipynb
|
kojima-r/MolPredictionTutorial
|
342c7a18ac78ccdc511176bb60a851b503cc02cc
|
[
"MIT"
] | null | null | null |
GNN/Sol_SDF_GNN.ipynb
|
kojima-r/MolPredictionTutorial
|
342c7a18ac78ccdc511176bb60a851b503cc02cc
|
[
"MIT"
] | null | null | null |
GNN/Sol_SDF_GNN.ipynb
|
kojima-r/MolPredictionTutorial
|
342c7a18ac78ccdc511176bb60a851b503cc02cc
|
[
"MIT"
] | 1 |
2022-03-08T07:56:32.000Z
|
2022-03-08T07:56:32.000Z
| 86.116006 | 20,928 | 0.734879 |
[
[
[
"### Pytorch geometry (グラフニューラルネットワークライブラリ) のインストール(only first time)\n!pip install -q torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html\n!pip install -q torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html\n!pip install -q git+https://github.com/rusty1s/pytorch_geometric.git",
"_____no_output_____"
],
[
"# ライブラリ確認\nimport numpy as np\nimport pandas as pd\nimport rdkit\nfrom rdkit import Chem\nimport torch\nfrom torch_geometric.data import Data as TorchGeometricData\n\nprint(Chem.__doc__)",
" A module for molecules and stuff\n\n see Chem/index.html in the doc tree for documentation\n\n\n"
],
[
"#SDFファイルの読み込み\nimport glob\nimport re\n\nsuppl = Chem.SDMolSupplier(\"../../ForMolPredict/SDF_files/SOL/SOL_AllMOL.sdf\",removeHs=False) \nmol_list = [x for x in suppl]\nmol_num = len(mol_list)\nprint(\"there are {} molecules\".format(mol_num))",
"there are 1214 molecules\n"
],
[
"#detaの分割\nfrom sklearn.model_selection import train_test_split\ntrain_val, test = train_test_split(mol_list, random_state=0)\ntrain, val = train_test_split(train_val)",
"_____no_output_____"
],
[
"## pandasのデータフレームからRDkitのmolオブジェクトXとラベルYのペアに変換\n\nXwf={dataset_keyword:[] for dataset_keyword in [\"train\",\"valid\",\"test\"]}\nYwf={dataset_keyword:[] for dataset_keyword in [\"train\",\"valid\",\"test\"]}\n#(A) lowを0それ以外を1とする\nfor mol in train: \n Xwf[\"train\"].append(mol)\n if mol.GetProp('SOL_class')=='(A) low':\n Ywf[\"train\"].append(0.0)\n else:\n Ywf[\"train\"].append(1.0)\nfor mol in val: \n Xwf[\"valid\"].append(mol)\n if mol.GetProp('SOL_class')=='(A) low':\n Ywf[\"valid\"].append(0.0)\n else:\n Ywf[\"valid\"].append(1.0)\nfor mol in test:\n Xwf[\"test\"].append(mol)\n if mol.GetProp('SOL_class')=='(A) low':\n Ywf[\"test\"].append(0.0)\n else:\n Ywf[\"test\"].append(1.0)",
"_____no_output_____"
],
[
"\ndef one_of_k_encoding(x, allowable_set):\n if x not in allowable_set:\n raise Exception(\n \"input {0} not in allowable set{1}:\".format(x, allowable_set))\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n\ndef get_atom_features(atom, en_list=None, explicit_H=False, use_sybyl=False, use_electronegativity=False,\n use_gasteiger=False, degree_dim=17):\n if use_sybyl:\n atom_type = ordkit._sybyl_atom_type(atom)\n atom_list = ['C.ar', 'C.cat', 'C.1', 'C.2', 'C.3', 'N.ar', 'N.am', 'N.pl3', 'N.1', 'N.2', 'N.3', 'N.4', 'O.co2',\n 'O.2', 'O.3', 'S.O', 'S.o2', 'S.2', 'S.3', 'F', 'Si', 'P', 'P3', 'Cl', 'Br', 'Mg', 'Na', 'Ca',\n 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn',\n 'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'Unknown']\n else:\n atom_type = atom.GetSymbol()\n atom_list = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V',\n 'K', 'Tl', 'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni',\n 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'Unknown']\n results = one_of_k_encoding_unk(atom_type, atom_list) + \\\n one_of_k_encoding(atom.GetDegree(), list(range(degree_dim))) + \\\n one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \\\n [atom.GetFormalCharge(), atom.GetNumRadicalElectrons()] + \\\n one_of_k_encoding_unk(atom.GetHybridization(),\n [Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2]) + \\\n [atom.GetIsAromatic()]\n\n if use_electronegativity:\n results = results + [en_list[atom.GetAtomicNum() - 1]]\n if use_gasteiger:\n gasteiger = atom.GetDoubleProp('_GasteigerCharge')\n if np.isnan(gasteiger) or np.isinf(gasteiger):\n gasteiger = 0 # because the mean is 0\n results = results + [gasteiger]\n\n # In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`\n if not explicit_H:\n results = results + one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4])\n return np.array(results, dtype=np.float32)\n\ndef get_bond_features(bond):\n results=one_of_k_encoding_unk(bond.GetBondType(),[Chem.rdchem.BondType.SINGLE,\n Chem.rdchem.BondType.DOUBLE,\n Chem.rdchem.BondType.TRIPLE,\n Chem.rdchem.BondType.AROMATIC])\n return np.array(results, dtype=np.float32)",
"_____no_output_____"
],
[
"import torch\nfrom rdkit import Chem\nfrom torch_geometric.data import Data as TorchGeometricData\nfrom torch_geometric.data import DataLoader\n\ndef get_edge_features(mol):\n edge_list= []\n num_bond_features=0\n for bond in mol.GetBonds():\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n bond_features = get_bond_features(bond)\n num_bond_features=len(bond_features)\n edge_list += [([i, j],bond_features), ([j, i],bond_features)]\n return edge_list, num_bond_features\n\n#modified mol2geodata\n#規格化関数\ndef rescaling(features):\n norm_features = []\n max_value = max(features)\n min_value = min(features)\n for feature in features:\n norm_feature = (feature - min_value)/(max_value - min_value)\n norm_features.append(norm_feature)\n \n return norm_features\n\ndef get_WF_results(mol):\n mol_props = ['Volume', 'Energy', 'HOMO', 'LUMO', 'HLgap', 'Mcharge_ave', 'Mcharge_var', 'Lcharge_ave', 'Lcharge_var', 'dipole', 'Atom_num', 'Mass', 'Density']\n atom_props = ['Mcharges', 'Lcharges', 'Mass', 'X_dem', 'Y_dem', 'Z_dem']\n mol_datalist = []\n WF_results = []\n for mol_prop in mol_props:\n mol_datalist.append(mol.GetDoubleProp(mol_prop))\n for atom in mol.GetAtoms():\n atom_data = []\n for atom_prop in atom_props:\n atom_data.append(atom.GetDoubleProp(atom_prop))\n molatom_data = mol_datalist + atom_data\n WF_results.append(molatom_data)\n return np.array(WF_results, dtype=np.float32)\n\ndef mol2geodataWF(mol,y):\n smile = Chem.MolToSmiles(mol)\n atom_features =[get_atom_features(atom) for atom in mol.GetAtoms()]\n WF_results = get_WF_results(mol)\n atom_features = np.append(atom_features, WF_results, axis=1)\n num_atom_features=len(atom_features[0])\n atom_features = torch.FloatTensor(atom_features).view(-1, len(atom_features[0]))\n\n edge_list,num_bond_features = get_edge_features(mol)\n edge_list=sorted(edge_list)\n \n edge_indices=[e for e,v in edge_list]\n edge_attributes=[v for e,v in edge_list]\n edge_indices = torch.tensor(edge_indices)\n edge_indices = edge_indices.t().to(torch.long).view(2, -1)\n edge_attributes = torch.FloatTensor(edge_attributes)\n #print(num_atom_features,num_bond_features)\n return TorchGeometricData(x=atom_features, edge_index=edge_indices, edge_attr=edge_attributes, num_atom_features=num_atom_features,num_bond_features=num_bond_features,smiles=smile, y=y) ",
"_____no_output_____"
],
[
"train_data_list = [mol2geodataWF(mol,y) for mol,y in zip(Xwf[\"train\"],Ywf[\"train\"])]\ntrain_loader = DataLoader(train_data_list, batch_size=128,shuffle=True)\n\nvalid_data_list = [mol2geodataWF(mol,y) for mol,y in zip(Xwf[\"valid\"],Ywf[\"valid\"])]\nvalid_loader = DataLoader(valid_data_list, batch_size=128,shuffle=True)\n\ntest_data_list = [mol2geodataWF(mol,y) for mol,y in zip(Xwf[\"test\"],Ywf[\"test\"])]\ntest_loader = DataLoader(test_data_list, batch_size=128,shuffle=True)",
"/home/kuma/anaconda3/envs/chem/lib/python3.7/site-packages/torch_geometric/deprecation.py:13: UserWarning: 'data.DataLoader' is deprecated, use 'loader.DataLoader' instead\n warnings.warn(out)\n"
],
[
"num_atom_features = train_data_list[0].num_atom_features\nnum_bond_features = train_data_list[0].num_bond_features\n\nprint(\"num_atom_features =\",num_atom_features)\nprint(\"num_bond_features =\",num_bond_features)",
"num_atom_features = 100\nnum_bond_features = 4\n"
],
[
"# ニューラルネットワークの構造の定義\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, ReLU, GRU\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import NNConv, Set2Set\n\ndim = 64 # 中間層の次元\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.lin0 = torch.nn.Linear(num_atom_features, dim)\n\n nn = Sequential(Linear(num_bond_features, 128), ReLU(), Linear(128, dim * dim))\n self.conv = NNConv(dim, dim, nn, aggr='mean')\n self.gru = GRU(dim, dim)\n\n self.set2set = Set2Set(dim, processing_steps=3)\n self.lin1 = torch.nn.Linear(2 * dim, dim)\n self.lin2 = torch.nn.Linear(dim, 1)\n\n def forward(self, data):\n out = F.relu(self.lin0(data.x))\n h = out.unsqueeze(0)\n\n for i in range(3):\n m = F.relu(self.conv(out, data.edge_index, data.edge_attr))\n out, h = self.gru(m.unsqueeze(0), h)\n out = out.squeeze(0)\n\n out = self.set2set(out, data.batch)\n out = F.relu(self.lin1(out))\n out = self.lin2(out)\n return out.view(-1)",
"_____no_output_____"
],
[
"# ニューラルネットワークの学習パラメータの定義\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',factor=0.7, patience=5,min_lr=0.00001)\nloss_function = torch.nn.BCEWithLogitsLoss()\n\ndef train_step(epoch):\n model.train()\n loss_all = 0\n\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n loss = loss_function(model(data), data.y)\n loss.backward()\n loss_all += loss.item() * data.num_graphs\n optimizer.step()\n return loss_all / len(train_loader.dataset)\n\ndef test_step(loader):\n model.eval()\n loss_all = 0\n for data in loader:\n data = data.to(device)\n loss = loss_function(model(data), data.y)\n loss_all += loss.item() * data.num_graphs\n return loss_all / len(loader.dataset)",
"_____no_output_____"
],
[
"# 学習開始\nEpoch_list = []\nLoss_list = []\nVal_list = []\nTest_list =[]\nbest_valid_loss = None\nfor epoch in range(200):\n lr = scheduler.optimizer.param_groups[0]['lr']\n loss = train_step(epoch)\n valid_loss = test_step(valid_loader)\n scheduler.step(valid_loss)\n\n if best_valid_loss is None or valid_loss <= best_valid_loss:\n test_loss = test_step(test_loader)\n best_valid_loss = valid_loss\n Epoch_list.append(epoch)\n Loss_list.append(loss)\n Val_list.append(valid_loss)\n Test_list.append(test_loss)\n print('Epoch: {:03d}, LR: {:7f}, Loss: {:.7f}, Validation loss: {:.7f}, '\n 'Test loss: {:.7f}'.format(epoch, lr, loss, valid_loss, test_loss))",
"Epoch: 000, LR: 0.001000, Loss: 1.1022893, Validation loss: 0.7813503, Test loss: 0.7936083\nEpoch: 001, LR: 0.001000, Loss: 0.7509977, Validation loss: 0.6890599, Test loss: 0.6853815\nEpoch: 002, LR: 0.001000, Loss: 0.7763703, Validation loss: 0.6077581, Test loss: 0.6216355\nEpoch: 003, LR: 0.001000, Loss: 0.5851493, Validation loss: 0.5758690, Test loss: 0.5758689\nEpoch: 004, LR: 0.001000, Loss: 0.6102653, Validation loss: 0.5740793, Test loss: 0.5740344\nEpoch: 005, LR: 0.001000, Loss: 0.5767765, Validation loss: 0.5125458, Test loss: 0.5281407\nEpoch: 006, LR: 0.001000, Loss: 0.5219769, Validation loss: 0.5413732, Test loss: 0.5281407\nEpoch: 007, LR: 0.001000, Loss: 0.5152275, Validation loss: 0.5674338, Test loss: 0.5281407\nEpoch: 008, LR: 0.001000, Loss: 0.5497352, Validation loss: 0.5466520, Test loss: 0.5281407\nEpoch: 009, LR: 0.001000, Loss: 0.5349560, Validation loss: 0.4862863, Test loss: 0.4902830\nEpoch: 010, LR: 0.001000, Loss: 0.5306335, Validation loss: 0.5513079, Test loss: 0.4902830\nEpoch: 011, LR: 0.001000, Loss: 0.5965828, Validation loss: 0.5120872, Test loss: 0.4902830\nEpoch: 012, LR: 0.001000, Loss: 0.5537541, Validation loss: 0.5727759, Test loss: 0.4902830\nEpoch: 013, LR: 0.001000, Loss: 0.5333004, Validation loss: 0.4653822, Test loss: 0.4782499\nEpoch: 014, LR: 0.001000, Loss: 0.4856902, Validation loss: 0.4649394, Test loss: 0.4722159\nEpoch: 015, LR: 0.001000, Loss: 0.4880463, Validation loss: 0.4503746, Test loss: 0.4872122\nEpoch: 016, LR: 0.001000, Loss: 0.4875964, Validation loss: 0.5182745, Test loss: 0.4872122\nEpoch: 017, LR: 0.001000, Loss: 0.5035200, Validation loss: 0.4464649, Test loss: 0.4749307\nEpoch: 018, LR: 0.001000, Loss: 0.4746122, Validation loss: 0.4328343, Test loss: 0.4715272\nEpoch: 019, LR: 0.001000, Loss: 0.4886920, Validation loss: 0.5126211, Test loss: 0.4715272\nEpoch: 020, LR: 0.001000, Loss: 0.4769054, Validation loss: 0.4398091, Test loss: 0.4715272\nEpoch: 021, LR: 0.001000, Loss: 0.4317808, Validation loss: 0.4502278, Test loss: 0.4715272\nEpoch: 022, LR: 0.001000, Loss: 0.4354163, Validation loss: 0.4159976, Test loss: 0.4404687\nEpoch: 023, LR: 0.001000, Loss: 0.4179990, Validation loss: 0.4050319, Test loss: 0.4504351\nEpoch: 024, LR: 0.001000, Loss: 0.4156325, Validation loss: 0.4397740, Test loss: 0.4504351\nEpoch: 025, LR: 0.001000, Loss: 0.4157171, Validation loss: 0.4087756, Test loss: 0.4504351\nEpoch: 026, LR: 0.001000, Loss: 0.3990990, Validation loss: 0.3758971, Test loss: 0.4072757\nEpoch: 027, LR: 0.001000, Loss: 0.3781215, Validation loss: 0.3746893, Test loss: 0.4057205\nEpoch: 028, LR: 0.001000, Loss: 0.3772288, Validation loss: 0.3876987, Test loss: 0.4057205\nEpoch: 029, LR: 0.001000, Loss: 0.3804100, Validation loss: 0.3647759, Test loss: 0.3981682\nEpoch: 030, LR: 0.001000, Loss: 0.3781648, Validation loss: 0.3499898, Test loss: 0.3788369\nEpoch: 031, LR: 0.001000, Loss: 0.3698474, Validation loss: 0.3640135, Test loss: 0.3788369\nEpoch: 032, LR: 0.001000, Loss: 0.3606850, Validation loss: 0.3590283, Test loss: 0.3788369\nEpoch: 033, LR: 0.001000, Loss: 0.4165170, Validation loss: 0.4326548, Test loss: 0.3788369\nEpoch: 034, LR: 0.001000, Loss: 0.4698488, Validation loss: 0.4845140, Test loss: 0.3788369\nEpoch: 035, LR: 0.001000, Loss: 0.4295208, Validation loss: 0.3818158, Test loss: 0.3788369\nEpoch: 036, LR: 0.001000, Loss: 0.4035889, Validation loss: 0.3996918, Test loss: 0.3788369\nEpoch: 037, LR: 0.000700, Loss: 0.3779145, Validation loss: 0.3854420, Test loss: 0.3788369\nEpoch: 038, LR: 0.000700, Loss: 0.3570196, Validation loss: 0.3385121, Test loss: 0.3763388\nEpoch: 039, LR: 0.000700, Loss: 0.3629736, Validation loss: 0.3533426, Test loss: 0.3763388\nEpoch: 040, LR: 0.000700, Loss: 0.3536679, Validation loss: 0.3509159, Test loss: 0.3763388\nEpoch: 041, LR: 0.000700, Loss: 0.3273053, Validation loss: 0.3583085, Test loss: 0.3763388\nEpoch: 042, LR: 0.000700, Loss: 0.3381919, Validation loss: 0.3294661, Test loss: 0.3579082\nEpoch: 043, LR: 0.000700, Loss: 0.3364375, Validation loss: 0.3356038, Test loss: 0.3579082\nEpoch: 044, LR: 0.000700, Loss: 0.3292605, Validation loss: 0.3315363, Test loss: 0.3579082\nEpoch: 045, LR: 0.000700, Loss: 0.3149937, Validation loss: 0.3240954, Test loss: 0.3550546\nEpoch: 046, LR: 0.000700, Loss: 0.3201732, Validation loss: 0.3417162, Test loss: 0.3550546\nEpoch: 047, LR: 0.000700, Loss: 0.3191291, Validation loss: 0.3287192, Test loss: 0.3550546\nEpoch: 048, LR: 0.000700, Loss: 0.3202225, Validation loss: 0.3357893, Test loss: 0.3550546\nEpoch: 049, LR: 0.000700, Loss: 0.3279483, Validation loss: 0.3213335, Test loss: 0.3564931\nEpoch: 050, LR: 0.000700, Loss: 0.3278978, Validation loss: 0.3231611, Test loss: 0.3564931\nEpoch: 051, LR: 0.000700, Loss: 0.3217608, Validation loss: 0.3640405, Test loss: 0.3564931\nEpoch: 052, LR: 0.000700, Loss: 0.3480103, Validation loss: 0.3479157, Test loss: 0.3564931\nEpoch: 053, LR: 0.000700, Loss: 0.3216743, Validation loss: 0.3539492, Test loss: 0.3564931\nEpoch: 054, LR: 0.000700, Loss: 0.3056179, Validation loss: 0.3367120, Test loss: 0.3564931\nEpoch: 055, LR: 0.000700, Loss: 0.3132952, Validation loss: 0.3186079, Test loss: 0.3538866\nEpoch: 056, LR: 0.000700, Loss: 0.3084799, Validation loss: 0.3293298, Test loss: 0.3538866\nEpoch: 057, LR: 0.000700, Loss: 0.3157354, Validation loss: 0.3279295, Test loss: 0.3538866\nEpoch: 058, LR: 0.000700, Loss: 0.3019371, Validation loss: 0.3150683, Test loss: 0.3547535\nEpoch: 059, LR: 0.000700, Loss: 0.2969490, Validation loss: 0.3247770, Test loss: 0.3547535\nEpoch: 060, LR: 0.000700, Loss: 0.3093222, Validation loss: 0.3157025, Test loss: 0.3547535\nEpoch: 061, LR: 0.000700, Loss: 0.3187556, Validation loss: 0.3159379, Test loss: 0.3547535\nEpoch: 062, LR: 0.000700, Loss: 0.3126511, Validation loss: 0.3370150, Test loss: 0.3547535\nEpoch: 063, LR: 0.000700, Loss: 0.3307188, Validation loss: 0.3317004, Test loss: 0.3547535\nEpoch: 064, LR: 0.000700, Loss: 0.3203799, Validation loss: 0.3592719, Test loss: 0.3547535\nEpoch: 065, LR: 0.000490, Loss: 0.3159533, Validation loss: 0.3353305, Test loss: 0.3547535\nEpoch: 066, LR: 0.000490, Loss: 0.3037580, Validation loss: 0.3439440, Test loss: 0.3547535\nEpoch: 067, LR: 0.000490, Loss: 0.3035647, Validation loss: 0.3328323, Test loss: 0.3547535\nEpoch: 068, LR: 0.000490, Loss: 0.2904192, Validation loss: 0.3342495, Test loss: 0.3547535\nEpoch: 069, LR: 0.000490, Loss: 0.2993240, Validation loss: 0.3191056, Test loss: 0.3547535\nEpoch: 070, LR: 0.000490, Loss: 0.2881199, Validation loss: 0.3223298, Test loss: 0.3547535\nEpoch: 071, LR: 0.000343, Loss: 0.2906183, Validation loss: 0.3150512, Test loss: 0.3551732\nEpoch: 072, LR: 0.000343, Loss: 0.2865221, Validation loss: 0.3403949, Test loss: 0.3551732\nEpoch: 073, LR: 0.000343, Loss: 0.2937562, Validation loss: 0.3198384, Test loss: 0.3551732\nEpoch: 074, LR: 0.000343, Loss: 0.3013451, Validation loss: 0.3133860, Test loss: 0.3412171\nEpoch: 075, LR: 0.000343, Loss: 0.3039692, Validation loss: 0.3086854, Test loss: 0.3511409\nEpoch: 076, LR: 0.000343, Loss: 0.3157706, Validation loss: 0.3026500, Test loss: 0.3382869\nEpoch: 077, LR: 0.000343, Loss: 0.3121213, Validation loss: 0.3233996, Test loss: 0.3382869\nEpoch: 078, LR: 0.000343, Loss: 0.3139203, Validation loss: 0.3152246, Test loss: 0.3382869\nEpoch: 079, LR: 0.000343, Loss: 0.2950204, Validation loss: 0.3633991, Test loss: 0.3382869\nEpoch: 080, LR: 0.000343, Loss: 0.3022848, Validation loss: 0.3275949, Test loss: 0.3382869\nEpoch: 081, LR: 0.000343, Loss: 0.2958733, Validation loss: 0.3041683, Test loss: 0.3382869\nEpoch: 082, LR: 0.000343, Loss: 0.2855295, Validation loss: 0.3222078, Test loss: 0.3382869\nEpoch: 083, LR: 0.000240, Loss: 0.2783342, Validation loss: 0.3174345, Test loss: 0.3382869\nEpoch: 084, LR: 0.000240, Loss: 0.2764900, Validation loss: 0.3185702, Test loss: 0.3382869\nEpoch: 085, LR: 0.000240, Loss: 0.2785434, Validation loss: 0.3191316, Test loss: 0.3382869\nEpoch: 086, LR: 0.000240, Loss: 0.2802823, Validation loss: 0.3146253, Test loss: 0.3382869\nEpoch: 087, LR: 0.000240, Loss: 0.2781254, Validation loss: 0.3234781, Test loss: 0.3382869\nEpoch: 088, LR: 0.000240, Loss: 0.2802150, Validation loss: 0.3205344, Test loss: 0.3382869\nEpoch: 089, LR: 0.000168, Loss: 0.2817008, Validation loss: 0.3094477, Test loss: 0.3382869\nEpoch: 090, LR: 0.000168, Loss: 0.2719039, Validation loss: 0.3025544, Test loss: 0.3430774\nEpoch: 091, LR: 0.000168, Loss: 0.2767632, Validation loss: 0.3057239, Test loss: 0.3430774\nEpoch: 092, LR: 0.000168, Loss: 0.2837609, Validation loss: 0.3303587, Test loss: 0.3430774\nEpoch: 093, LR: 0.000168, Loss: 0.2744859, Validation loss: 0.3051655, Test loss: 0.3430774\nEpoch: 094, LR: 0.000168, Loss: 0.2757913, Validation loss: 0.3051868, Test loss: 0.3430774\nEpoch: 095, LR: 0.000168, Loss: 0.2724852, Validation loss: 0.3128392, Test loss: 0.3430774\nEpoch: 096, LR: 0.000168, Loss: 0.2751624, Validation loss: 0.3094722, Test loss: 0.3430774\nEpoch: 097, LR: 0.000118, Loss: 0.2701912, Validation loss: 0.3062750, Test loss: 0.3430774\nEpoch: 098, LR: 0.000118, Loss: 0.2725306, Validation loss: 0.3026144, Test loss: 0.3430774\nEpoch: 099, LR: 0.000118, Loss: 0.2696800, Validation loss: 0.3070062, Test loss: 0.3430774\nEpoch: 100, LR: 0.000118, Loss: 0.2700669, Validation loss: 0.3096811, Test loss: 0.3430774\nEpoch: 101, LR: 0.000118, Loss: 0.2716623, Validation loss: 0.3120107, Test loss: 0.3430774\nEpoch: 102, LR: 0.000118, Loss: 0.2704542, Validation loss: 0.3087305, Test loss: 0.3430774\nEpoch: 103, LR: 0.000082, Loss: 0.2696460, Validation loss: 0.3132561, Test loss: 0.3430774\nEpoch: 104, LR: 0.000082, Loss: 0.2692551, Validation loss: 0.3133789, Test loss: 0.3430774\nEpoch: 105, LR: 0.000082, Loss: 0.2691385, Validation loss: 0.3101038, Test loss: 0.3430774\nEpoch: 106, LR: 0.000082, Loss: 0.2686936, Validation loss: 0.3047580, Test loss: 0.3430774\nEpoch: 107, LR: 0.000082, Loss: 0.2682578, Validation loss: 0.3047236, Test loss: 0.3430774\nEpoch: 108, LR: 0.000082, Loss: 0.2682005, Validation loss: 0.3037546, Test loss: 0.3430774\nEpoch: 109, LR: 0.000058, Loss: 0.2675677, Validation loss: 0.3037738, Test loss: 0.3430774\nEpoch: 110, LR: 0.000058, Loss: 0.2672077, Validation loss: 0.3048255, Test loss: 0.3430774\nEpoch: 111, LR: 0.000058, Loss: 0.2683663, Validation loss: 0.3088440, Test loss: 0.3430774\nEpoch: 112, LR: 0.000058, Loss: 0.2675101, Validation loss: 0.3062618, Test loss: 0.3430774\nEpoch: 113, LR: 0.000058, Loss: 0.2680358, Validation loss: 0.3050621, Test loss: 0.3430774\nEpoch: 114, LR: 0.000058, Loss: 0.2673548, Validation loss: 0.3069571, Test loss: 0.3430774\nEpoch: 115, LR: 0.000040, Loss: 0.2669780, Validation loss: 0.3063557, Test loss: 0.3430774\nEpoch: 116, LR: 0.000040, Loss: 0.2664638, Validation loss: 0.3091599, Test loss: 0.3430774\nEpoch: 117, LR: 0.000040, Loss: 0.2667936, Validation loss: 0.3082406, Test loss: 0.3430774\nEpoch: 118, LR: 0.000040, Loss: 0.2664978, Validation loss: 0.3044713, Test loss: 0.3430774\nEpoch: 119, LR: 0.000040, Loss: 0.2667568, Validation loss: 0.3031685, Test loss: 0.3430774\nEpoch: 120, LR: 0.000040, Loss: 0.2670614, Validation loss: 0.3038983, Test loss: 0.3430774\nEpoch: 121, LR: 0.000028, Loss: 0.2660326, Validation loss: 0.3067031, Test loss: 0.3430774\nEpoch: 122, LR: 0.000028, Loss: 0.2661030, Validation loss: 0.3081476, Test loss: 0.3430774\nEpoch: 123, LR: 0.000028, Loss: 0.2665847, Validation loss: 0.3088308, Test loss: 0.3430774\nEpoch: 124, LR: 0.000028, Loss: 0.2660490, Validation loss: 0.3069697, Test loss: 0.3430774\nEpoch: 125, LR: 0.000028, Loss: 0.2661320, Validation loss: 0.3058078, Test loss: 0.3430774\nEpoch: 126, LR: 0.000028, Loss: 0.2661548, Validation loss: 0.3058918, Test loss: 0.3430774\nEpoch: 127, LR: 0.000020, Loss: 0.2659233, Validation loss: 0.3060609, Test loss: 0.3430774\nEpoch: 128, LR: 0.000020, Loss: 0.2657497, Validation loss: 0.3059674, Test loss: 0.3430774\nEpoch: 129, LR: 0.000020, Loss: 0.2657343, Validation loss: 0.3061775, Test loss: 0.3430774\nEpoch: 130, LR: 0.000020, Loss: 0.2656359, Validation loss: 0.3055702, Test loss: 0.3430774\nEpoch: 131, LR: 0.000020, Loss: 0.2658054, Validation loss: 0.3048231, Test loss: 0.3430774\nEpoch: 132, LR: 0.000020, Loss: 0.2657088, Validation loss: 0.3052955, Test loss: 0.3430774\nEpoch: 133, LR: 0.000014, Loss: 0.2654007, Validation loss: 0.3064600, Test loss: 0.3430774\nEpoch: 134, LR: 0.000014, Loss: 0.2655058, Validation loss: 0.3065407, Test loss: 0.3430774\nEpoch: 135, LR: 0.000014, Loss: 0.2653850, Validation loss: 0.3053500, Test loss: 0.3430774\nEpoch: 136, LR: 0.000014, Loss: 0.2655606, Validation loss: 0.3041760, Test loss: 0.3430774\nEpoch: 137, LR: 0.000014, Loss: 0.2653821, Validation loss: 0.3043425, Test loss: 0.3430774\nEpoch: 138, LR: 0.000014, Loss: 0.2654751, Validation loss: 0.3043806, Test loss: 0.3430774\nEpoch: 139, LR: 0.000010, Loss: 0.2655248, Validation loss: 0.3048354, Test loss: 0.3430774\nEpoch: 140, LR: 0.000010, Loss: 0.2653794, Validation loss: 0.3041636, Test loss: 0.3430774\nEpoch: 141, LR: 0.000010, Loss: 0.2653657, Validation loss: 0.3038349, Test loss: 0.3430774\nEpoch: 142, LR: 0.000010, Loss: 0.2654139, Validation loss: 0.3035292, Test loss: 0.3430774\nEpoch: 143, LR: 0.000010, Loss: 0.2653212, Validation loss: 0.3037802, Test loss: 0.3430774\nEpoch: 144, LR: 0.000010, Loss: 0.2652999, Validation loss: 0.3045578, Test loss: 0.3430774\nEpoch: 145, LR: 0.000010, Loss: 0.2652095, Validation loss: 0.3052632, Test loss: 0.3430774\nEpoch: 146, LR: 0.000010, Loss: 0.2651844, Validation loss: 0.3057883, Test loss: 0.3430774\nEpoch: 147, LR: 0.000010, Loss: 0.2651977, Validation loss: 0.3058470, Test loss: 0.3430774\nEpoch: 148, LR: 0.000010, Loss: 0.2652154, Validation loss: 0.3048500, Test loss: 0.3430774\nEpoch: 149, LR: 0.000010, Loss: 0.2651127, Validation loss: 0.3049462, Test loss: 0.3430774\nEpoch: 150, LR: 0.000010, Loss: 0.2651736, Validation loss: 0.3051907, Test loss: 0.3430774\nEpoch: 151, LR: 0.000010, Loss: 0.2650929, Validation loss: 0.3051945, Test loss: 0.3430774\nEpoch: 152, LR: 0.000010, Loss: 0.2650752, Validation loss: 0.3057942, Test loss: 0.3430774\nEpoch: 153, LR: 0.000010, Loss: 0.2650371, Validation loss: 0.3057987, Test loss: 0.3430774\nEpoch: 154, LR: 0.000010, Loss: 0.2650192, Validation loss: 0.3050964, Test loss: 0.3430774\nEpoch: 155, LR: 0.000010, Loss: 0.2650294, Validation loss: 0.3047560, Test loss: 0.3430774\nEpoch: 156, LR: 0.000010, Loss: 0.2650241, Validation loss: 0.3042805, Test loss: 0.3430774\nEpoch: 157, LR: 0.000010, Loss: 0.2651238, Validation loss: 0.3036898, Test loss: 0.3430774\nEpoch: 158, LR: 0.000010, Loss: 0.2650731, Validation loss: 0.3036155, Test loss: 0.3430774\nEpoch: 159, LR: 0.000010, Loss: 0.2649975, Validation loss: 0.3040957, Test loss: 0.3430774\nEpoch: 160, LR: 0.000010, Loss: 0.2649814, Validation loss: 0.3045808, Test loss: 0.3430774\nEpoch: 161, LR: 0.000010, Loss: 0.2648665, Validation loss: 0.3046505, Test loss: 0.3430774\nEpoch: 162, LR: 0.000010, Loss: 0.2648524, Validation loss: 0.3045637, Test loss: 0.3430774\nEpoch: 163, LR: 0.000010, Loss: 0.2649050, Validation loss: 0.3045060, Test loss: 0.3430774\nEpoch: 164, LR: 0.000010, Loss: 0.2648914, Validation loss: 0.3048037, Test loss: 0.3430774\nEpoch: 165, LR: 0.000010, Loss: 0.2652397, Validation loss: 0.3054456, Test loss: 0.3430774\nEpoch: 166, LR: 0.000010, Loss: 0.2650829, Validation loss: 0.3046892, Test loss: 0.3430774\nEpoch: 167, LR: 0.000010, Loss: 0.2649127, Validation loss: 0.3044707, Test loss: 0.3430774\nEpoch: 168, LR: 0.000010, Loss: 0.2647948, Validation loss: 0.3047272, Test loss: 0.3430774\nEpoch: 169, LR: 0.000010, Loss: 0.2647652, Validation loss: 0.3054370, Test loss: 0.3430774\nEpoch: 170, LR: 0.000010, Loss: 0.2647970, Validation loss: 0.3067233, Test loss: 0.3430774\nEpoch: 171, LR: 0.000010, Loss: 0.2648303, Validation loss: 0.3075734, Test loss: 0.3430774\nEpoch: 172, LR: 0.000010, Loss: 0.2650716, Validation loss: 0.3075425, Test loss: 0.3430774\nEpoch: 173, LR: 0.000010, Loss: 0.2648916, Validation loss: 0.3069674, Test loss: 0.3430774\nEpoch: 174, LR: 0.000010, Loss: 0.2648852, Validation loss: 0.3064851, Test loss: 0.3430774\nEpoch: 175, LR: 0.000010, Loss: 0.2647483, Validation loss: 0.3061078, Test loss: 0.3430774\nEpoch: 176, LR: 0.000010, Loss: 0.2646999, Validation loss: 0.3063928, Test loss: 0.3430774\nEpoch: 177, LR: 0.000010, Loss: 0.2647037, Validation loss: 0.3061413, Test loss: 0.3430774\nEpoch: 178, LR: 0.000010, Loss: 0.2648043, Validation loss: 0.3058159, Test loss: 0.3430774\nEpoch: 179, LR: 0.000010, Loss: 0.2647680, Validation loss: 0.3057467, Test loss: 0.3430774\nEpoch: 180, LR: 0.000010, Loss: 0.2647270, Validation loss: 0.3058582, Test loss: 0.3430774\nEpoch: 181, LR: 0.000010, Loss: 0.2646206, Validation loss: 0.3056031, Test loss: 0.3430774\nEpoch: 182, LR: 0.000010, Loss: 0.2645297, Validation loss: 0.3049453, Test loss: 0.3430774\nEpoch: 183, LR: 0.000010, Loss: 0.2646042, Validation loss: 0.3041892, Test loss: 0.3430774\nEpoch: 184, LR: 0.000010, Loss: 0.2645920, Validation loss: 0.3041769, Test loss: 0.3430774\nEpoch: 185, LR: 0.000010, Loss: 0.2645620, Validation loss: 0.3041274, Test loss: 0.3430774\nEpoch: 186, LR: 0.000010, Loss: 0.2645098, Validation loss: 0.3044351, Test loss: 0.3430774\nEpoch: 187, LR: 0.000010, Loss: 0.2644311, Validation loss: 0.3048283, Test loss: 0.3430774\nEpoch: 188, LR: 0.000010, Loss: 0.2643759, Validation loss: 0.3046521, Test loss: 0.3430774\nEpoch: 189, LR: 0.000010, Loss: 0.2643343, Validation loss: 0.3044620, Test loss: 0.3430774\nEpoch: 190, LR: 0.000010, Loss: 0.2643566, Validation loss: 0.3042132, Test loss: 0.3430774\nEpoch: 191, LR: 0.000010, Loss: 0.2645731, Validation loss: 0.3040435, Test loss: 0.3430774\nEpoch: 192, LR: 0.000010, Loss: 0.2644878, Validation loss: 0.3042894, Test loss: 0.3430774\nEpoch: 193, LR: 0.000010, Loss: 0.2646412, Validation loss: 0.3043197, Test loss: 0.3430774\nEpoch: 194, LR: 0.000010, Loss: 0.2643866, Validation loss: 0.3043077, Test loss: 0.3430774\nEpoch: 195, LR: 0.000010, Loss: 0.2643713, Validation loss: 0.3040810, Test loss: 0.3430774\nEpoch: 196, LR: 0.000010, Loss: 0.2643674, Validation loss: 0.3033549, Test loss: 0.3430774\nEpoch: 197, LR: 0.000010, Loss: 0.2642897, Validation loss: 0.3036678, Test loss: 0.3430774\nEpoch: 198, LR: 0.000010, Loss: 0.2642007, Validation loss: 0.3033203, Test loss: 0.3430774\nEpoch: 199, LR: 0.000010, Loss: 0.2642181, Validation loss: 0.3030943, Test loss: 0.3430774\n"
],
[
"#グラフ化\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nplt.plot(Epoch_list, Loss_list, label = 'Loss')\nplt.plot(Epoch_list, Val_list, label = 'Validation loss')\nplt.plot(Epoch_list, Test_list, label = 'Test loss')\n\n# 凡例を表示\nplt.legend(loc=5)\n\n# 軸ラベル\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1fcb30235e7f9cf3542da45428c668b0dcbf7d
| 382,696 |
ipynb
|
Jupyter Notebook
|
refs/elements-of-statistical-learning/eosl14_unsupervised_learning.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 1 |
2020-10-29T11:26:00.000Z
|
2020-10-29T11:26:00.000Z
|
refs/elements-of-statistical-learning/eosl14_unsupervised_learning.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 5 |
2021-03-18T21:33:45.000Z
|
2022-03-11T23:34:50.000Z
|
refs/elements-of-statistical-learning/eosl14_unsupervised_learning.ipynb
|
obs145628/ml-notebooks
|
08a64962e106ec569039ab204a7ae4c900783b6b
|
[
"MIT"
] | 1 |
2019-12-23T21:50:02.000Z
|
2019-12-23T21:50:02.000Z
| 162.022015 | 56,540 | 0.858718 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport sys\nsys.path.append('../../pyutils')\nimport metrics\nimport utils",
"_____no_output_____"
]
],
[
[
"# Introduction",
"_____no_output_____"
],
[
"In unsupervised learing, one has a set of $N$ observations $x_i \\in \\mathbb{R}^p$, having joint density $P(X)$. \nThe goal is to infer properties of this density. \nAt very low dimension ($p \\leq 3$), several methods can directly estimate $P(X)$ for any $X$. But these methods fail in high-dimensions. \n\nIt can be used to:\n- Identify low-dimensional manifolds with high data density.\n- Cluster analysis finds multiple convex regions that contains modes of $P(X)$\n- Mixture modeling try to estimate $P(X)$ with a mixture of density functions.\n- Association rules: construct simple rules that describe regions of high density. \n\nIn unsupervised learning, there is no measure of success, it's difficult to prove the conclusions of the model.",
"_____no_output_____"
],
[
"# Association Rules",
"_____no_output_____"
],
[
"The general goal of association rules is to find values $v_1, \\text{...}, v_L$ such that the probability density $P(X=v_l)$ is relatively large. This problem is also called mode finding or bump hunting. For problems with a large number of values, the number of observations such that $X=v_l$ is usually too small to be reliable. \n\nOne solution is to seek regions of the $X$-space. Let $s_{j}$ a subset of values taken by feature v$X_j$. \nThe goal is to find $s_{1},\\text{...},s_p$ such that the folowing value is large:\n$$P \\left( \\bigcap_{j=1}^p (X_j \\in s_j) \\right)$$",
"_____no_output_____"
],
[
"## Market Basket Analysis\n\nThis problem is usually not feasible for $p$ and $N$ large. \n\nMarket Basket Analysis is a special case where all predictions are binary: $X_j \\in \\{ 0, 1 \\}$. \nThe goal is to find a subset of integers $\\mathcal{K} \\subset \\{ 1, \\text{...}, p \\}$ such that the following value is large:\n$$P \\left( \\bigcap_{k \\in \\mathcal{K}} (X_k = 1) \\right) = \\prod_{k \\in \\mathcal{J}} P(X_k = 1)$$\n\n$\\mathcal{K}$ is called an item set. This value is called the support or prevalente $T(\\mathcal{K})$. It can be estimated from the dataset:\n$$T(\\mathcal{K}) = \\frac{1}{N} \\sum_{i=1}^N \\prod_{k \\in \\mathcal{K}} x_{ik}$$ \n\nThe goal of the algorithm is to fing, given a lower bound $t$ for the support, all item sets with support greater than $t$:\n$$\\{ \\mathcal{K}_l | T(\\mathcal{K}_l) > t \\}$$\n\nThere are $2^J$ possible item sets, fortunately they are algorithms that allow to find the item sets without looking at all the possibilities.",
"_____no_output_____"
],
[
"## The Apriori Algorithm\n\nThis algorithm can handle very large $N$ and $p$ as long as the number of itemset with support greather than $t$ is small enough. \nIt uses the following property:\n$$\\mathcal{L} \\subseteq \\mathcal{K} \\implies T(\\mathcal{L}) \\geq T(\\mathcal{K})$$\n\nIt works by doing only a few passes through the training set. \nThe first pass over the data compute the support of all single-item sets, and discards all with support lower than $t$. \nThe following passes combine the remaining itemsets with the ones remaining after the first pass, and discard all with support lower than $t$. \nThe process stops when all itemsets are discarded. \n\nEach obtained itemset $\\mathcal{K}$ into two subsets such that: $A \\cup B = \\mathcal{K}$, written $A \\implies B$. \n\nThe support of the rule is written $T(A \\implies B) \\approx P(A \\cap B)$, it is the same as $T(\\mathcal{K})$. \nThe support is the proportion of observations having $A \\cap B$.\n\nThe confidence is the proportion of obversations having $B$ among all those having $A$. It is written $C(A \\implies B)$.\n$$C(A \\implies B) = \\frac{T(A \\implies B)}{T(A)} \\approx P(B|A)$$\n\nThe lift is how likely it is to have $A$ and $B$ relative to $B$. It is written $L(A \\implies B)$.\n$$L(A \\implies B) = \\frac{C(A \\implies B)}{T(B)} \\approx \\frac{P(B|A)}{P(B)}$$",
"_____no_output_____"
],
[
"## Example\n\nGiven a dataset of 7500 transactions from a french retail store, find associative rules from it\n\nDataset: [Link](https://drive.google.com/file/d/1y5DYn0dGoSbC22xowBq2d4po6h1JxcTQ/view)",
"_____no_output_____"
]
],
[
[
"import os\nfrom apyori import apriori\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\n\nFILE_ID ='1y5DYn0dGoSbC22xowBq2d4po6h1JxcTQ'\nFILE_PATH = '/tmp/store_data.csv'\n\nif not os.path.isfile(FILE_PATH):\n gdd.download_file_from_google_drive(file_id=FILE_ID,\n dest_path=FILE_PATH)\n\n\ndata = pd.read_csv(FILE_PATH, header=None)\ndata.head()\n\nrecords = []\nfor i in range(data.shape[0]):\n records.append([str(data.values[i,j]) for j in range(data.shape[1]) if str(data.values[i,j]) != 'nan'])",
"Downloading 1y5DYn0dGoSbC22xowBq2d4po6h1JxcTQ into /tmp/store_data.csv... Done.\n"
],
[
"rules = apriori(records, min_support=0.0235, \n min_confidence=0.1, \n min_lift=1.5, min_length=2) \n\nres = list(rules)\nprint(len(res))",
"11\n"
],
[
"for item in res:\n s = ''\n stats = item.ordered_statistics[0]\n for x in stats.items_base:\n s += str(x) + '; '\n s += '=> '\n for x in stats.items_add:\n s += str(x) + '; '\n s += ('S = {:.4f}, C = {:.4f}, L = {:.4f}'.format(item.support,\n stats.confidence,\n stats.lift))\n print(s)",
"burgers; => eggs; S = 0.0288, C = 0.3303, L = 1.8378\nchocolate; => milk; S = 0.0321, C = 0.1961, L = 1.5133\nfrozen vegetables; => milk; S = 0.0236, C = 0.2476, L = 1.9104\nfrozen vegetables; => mineral water; S = 0.0357, C = 0.3748, L = 1.5725\nfrozen vegetables; => spaghetti; S = 0.0279, C = 0.2923, L = 1.6789\nground beef; => mineral water; S = 0.0409, C = 0.4166, L = 1.7475\nground beef; => spaghetti; S = 0.0392, C = 0.3989, L = 2.2912\nmilk; => mineral water; S = 0.0480, C = 0.3704, L = 1.5538\nmilk; => spaghetti; S = 0.0355, C = 0.2737, L = 1.5718\nmineral water; => olive oil; S = 0.0276, C = 0.1158, L = 1.7579\npancakes; => spaghetti; S = 0.0252, C = 0.2651, L = 1.5225\n"
],
[
"\nclass AprioriRule:\n \n def __init__(self, a, b, support, confidence, lift):\n self.a = a\n self.b = b\n self.support = support\n self.confidence = confidence\n self.lift = lift\n\n\nclass Apriori:\n \n \n def __init__(self, min_support, min_confidence,\n min_lift, min_length):\n self.min_support = min_support\n self.min_confidence = min_confidence\n self.min_lift = min_lift\n self.min_length = min_length\n \n def fit(self, data):\n \n #1) build dict of words\n self.lwords = []\n self.dwords = dict()\n for entry in data:\n for w in entry:\n if not w in self.dwords:\n self.dwords[w] = len(self.lwords)\n self.lwords.append(w)\n \n #2) build data matrix\n self.X = np.zeros((len(data), len(self.lwords)))\n for i in range(len(data)):\n for w in data[i]:\n self.X[i, self.dwords[w]] = 1\n \n\n \n #3) first pass through dataset\n rules = []\n res = []\n for j in range(self.X.shape[1]):\n items = [j]\n s = self.get_support(items)\n if s >= self.min_support:\n res.append(items)\n rules.append(items)\n \n res1 = list(res)\n \n \n # 4) other passes through dataset until no itemset found\n while len(res) > 0:\n \n res_next = []\n for items in res:\n for other in res1:\n if other[0] > items[-1]:\n items_ext = items + other\n s = self.get_support(items_ext)\n if s >= self.min_support:\n res_next.append(items_ext)\n rules.append(items_ext)\n \n res = res_next\n \n # 5) remove lists too short\n rules = [x for x in rules if len(x) >= self.min_length]\n \n # 6) divide rules into A => B rules\n rules_ex = []\n for r in rules:\n rules_ex += self.split_rule(r)\n rules = rules_ex\n \n # 7) compute all rules stats\n rules = [self.build_rule(r) for r in rules]\n \n # 8) filter rules\n rules = [r for r in rules if r.confidence > self.min_confidence\n and r.lift > self.min_lift]\n\n self.rules = rules\n \n \n def get_support(self, items):\n n = 0\n for x in self.X:\n val = 1\n for it in items:\n if x[it] == 0:\n val = 0\n break\n n += val\n \n return n / len(self.X)\n \n def split_rule(self, r):\n res = []\n for i in range(len(r) - 1):\n p1 = r[:i+1]\n p2 = r[i+1:]\n res.append((p1, p2))\n return res\n \n def build_rule(self, r):\n sab = self.get_support(r[0] + r[1])\n sa = self.get_support(r[0])\n sb = self.get_support(r[1])\n support = sab\n confidence = support / sa\n lift = confidence / sb\n \n wa = [self.lwords[x] for x in r[0]]\n wb = [self.lwords[x] for x in r[1]]\n return AprioriRule(wa, wb, support, confidence, lift)\n \nmod = Apriori(min_support=0.0235, \n min_confidence=0.1, \n min_lift=1.5, min_length=2)\n\nmod.fit(records)\n\nprint(len(mod.rules))",
"11\n"
],
[
"for item in mod.rules:\n s = ''\n for x in item.a:\n s += str(x) + '; '\n s += '=> '\n for x in item.b:\n s += str(x) + '; '\n s += ('S = {:.4f}, C = {:.4f}, L = {:.4f}'.format(item.support,\n item.confidence,\n item.lift))\n print(s)",
"mineral water; => olive oil; S = 0.0276, C = 0.1158, L = 1.7579\nmineral water; => milk; S = 0.0480, C = 0.2013, L = 1.5538\nmineral water; => frozen vegetables; S = 0.0357, C = 0.1499, L = 1.5725\nmineral water; => ground beef; S = 0.0409, C = 0.1717, L = 1.7475\nburgers; => eggs; S = 0.0288, C = 0.3303, L = 1.8378\nmilk; => frozen vegetables; S = 0.0236, C = 0.1821, L = 1.9104\nmilk; => spaghetti; S = 0.0355, C = 0.2737, L = 1.5718\nmilk; => chocolate; S = 0.0321, C = 0.2479, L = 1.5133\nfrozen vegetables; => spaghetti; S = 0.0279, C = 0.2923, L = 1.6789\nspaghetti; => pancakes; S = 0.0252, C = 0.1447, L = 1.5225\nspaghetti; => ground beef; S = 0.0392, C = 0.2251, L = 2.2912\n"
]
],
[
[
"## Unsupervised as Supersived learning\n\nWe are trying to estimate the probability density $g(x)$. \nWe onyl have access to a reference probability density $g_0(x)$. It could be for example the uniform density over the range of the variables. We can easily sample $N_0$ observations from $g_0(x)$. \nWe also have the dataset $x_1,\\text{...},x_N$, an i.i.d. random sample drawn from $g(x)$. \n\nLet's pool this two datasets together and assign mass $w = \\frac{N_0}{N+N_0}$ to those drawn from $g(x)$, and $w_0 = \\frac{N}{N+N_0}$ to those drawn from $g_0(x)$. We get a mixture density $\\frac{g(x) + g_0(x)}{2}$. \n\nIf we assign $Y=1$ to sample draw from $g(x)$ and $Y=0$ to those draw from $g_0(x)$, we get:\n$$\\mu(x) = E(Y|x) = \\frac{g(x)}{g(x) + g_0(x)}$$ \n\n$\\mu_x$ can be estimated by supervised learning by combining the $N$ samples from $g(x)$ with $Y=1$, and the $N_0$ samples from $g_0(x)$ with $Y=0$. \nThen, we can get an estimate for $g(x)$:\n$$\\hat{g}(x) = g_0(x) \\frac{\\hat{\\mu}(x)}{1 - \\hat{\\mu}(x)}$$ \n\nThe accuracy of $\\hat{g}(x)$ greatly depends on the choice of $g_0(x)$.",
"_____no_output_____"
],
[
"## Generalized Association rules\n\nThe goal is to find a subset of integers $\\mathcal{J} \\subset \\{ 1, 2, \\text{...}, p \\}$ and the corresponding value subjects $s_j$ so that the following value is large:\n$$P \\left( \\bigcap_{j \\in \\mathcal{J}} (X_j \\in s_j) \\right)$$ \nThis can be estimated by:\n$$\\frac{1}{N} \\sum_{i=1}^N I \\left( \\bigcap_{j \\in \\mathcal{J}} (x_{ij} \\in s_j) \\right)$$ \n\nThis favors the discovery of itemsets whose marginal constituents $(X_j \\in s_j)$ are frequent, that is the following value is large:\n$$\\frac{1}{N} \\sum_{i=1}^N I(x_{ij} \\in s_j)$$ \n\nA good reference distribution is the product of the marginal distributions:\n\n$$g_0(x) = \\prod_{j=1}^J g_j(x_j)$$\n\nA sample from $g_0(x)$ is easily generated from the original dataset by appliying different random permutation to the data values of each of the variables.\n\nAfter drawing samples from $g_0(x)$, we get a training dataset for supervised learning, with $Y \\in \\{ 0, 1 \\}$. \nThe goal is to use this data to find regrions:\n$$R = \\bigcap{j \\in \\mathcal{J}} (X_j \\in s_j)$$\n\nfor which $\\mu(x) = E(Y|x)$ is large. \nOne might also require that the support os these regions is big enough:\n$$T(R) = \\int_{x \\in R} g(x)dx$$ \n\nDecision trees are such a model, each leaf $t$ represent a region $R$:\n$$\\bar{y}_t = \\text{ave}(y_i|x_i \\in t)$$\nThe actual data support is given by:\n$$T(R) = \\bar{y}_t \\frac{N_t}{N + N_0}$$\nwith $R_t$ the number of observations in the leaf $t$.",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeRegressor\n\nclass GeneralizedAssosRules:\n \n \n def __init__(self, t):\n self.t = t\n \n def fit(self, X):\n N, p = X.shape\n N0 = 2*N\n \n X0 = X[np.random.choice(N, size=N0, replace=True)]\n for j in range(p):\n X0[:, j] = X0[np.random.permutation(N0), j]\n \n Xe = np.vstack((X, X0))\n ye = np.vstack((np.ones((N, 1)), np.zeros((N0, 1))))\n \n self.tree = DecisionTreeRegressor()\n self.tree.fit(Xe, ye)\n \n \n print(Xe.shape)\n print(ye.shape)\n \n \nX = np.random.randn(124, 7)\nmod = GeneralizedAssosRules(0.2)\n\nmod.fit(X)",
"(372, 7)\n(372, 1)\n"
]
],
[
[
"# Cluster Analysis\n\nIt consists of grouping a collection of obects into subsets (called clusters) such that those within each cluster are more closely related to one another than objects from other clusters. \nTo form clusters, an important notion is the degree of similarity or dissimmalirity between two objects. \nIt can be a distance, like the euclidian distance. It is used for example by K-Means clustering which use a top-down procedure to build clusters. \nOther approches are mostly bottom-up.",
"_____no_output_____"
],
[
"## Proximity Matrices\n\n$Let D \\in \\mathbb{R}^{N*N}$ the matrix of dissimilarities, with $N$ the number of objects. \n$D_{ij}$ represents the proximity between object $i$ and object $j$. Usually it's symmetric matrix with nonnegative entries, and zeroes on the diagonal. \n\nWe usually have $x_ij$ with $N$ observations and $p$ features. We need to compute the dissimilarity between 2 observations in order to build $D$. One solution is to use a dissimilarity $d_j(x_{ij}, x_{i'j})$ for each feature:\n$$D(x_i, x_{i'}) = \\sum_{j=1}^p d_j(x_{ij}, x_{i'j})$$\n\nFor quantitative variables, we define an error: $d(x_i, x_{i'}) = l(|x_i - x_{i'}|)$. Usually it's the squared error loss, or the absolute error. \nOr in cal also be based on correlation:\n$$p(x_i, x_{i'}) = \\frac{\\sum_j (x_{ij} - \\bar{x}_i)(x_{i'j} - \\bar{x}_{i'})}{\\sqrt{\\sum_j (x_{ij} - \\bar{x}_i)^2 \\sum_j (x_{i'j} - \\bar{x}_{i'})^2}}$$\n\nwith $\\bar{x}_i = \\sum_{j} x_{ij}/p$. \n\nFor ordinal variables, we usually replace their $M$ original values by:\n$$\\frac{i - 1/2}{M}$$\nwith $i$ the original order of the variable. \nThen they are treated as quantitative variables. \n\nFor categorical variables, the dissemilarity must be defined explicitly, by using a $M*M$ matrix for examples. \n\nThey are several ways to combine all $d_j(x_{ij}, x_{i'j})$. It can be with a weighted average:\n$$D(x_i, x_{i'}) = \\sum_{j=1}^p w_j d_j(x_{ij}, x_{i'j})$$\n$$\\text{with } \\sum_{j=1}^p w_j = 1$$\n\nSetting $w_j = 1/j$ does not give all attribute equal influence. To get equal influence, you should set $w_j = 1/\\bar{d}_j$ with:\n$$\\bar{d}_j = \\frac{1}{N^2} \\sum_{i=1}^N \\sum_{i'=1}^N d_j(x_{ij}, x_{i'j})$$ \nThis seems a reasonable idea, but may be counterproductive. To cluser data, you may not want all attributes to contribute equally.",
"_____no_output_____"
],
[
"## Clustering Algorithms\n\nThe goal of clustering os to partition data into groups so that the dissimilarities between those assigned to the same cluster are smaller than those in different clusters. \nThey fall into three types:\n- combinatorial algorithms\n- mixture modelling\n- mode seeking",
"_____no_output_____"
],
[
"## Combinatorial algorithms\n\nThese algorithms assign each observation to a cluster without any probability model. Each observation $x_i$ is assigned to a cluster $k \\in \\{1, \\text{...}, K \\}$. \nThese assignments can be characterized by an encoder: $k = C(i)$. \nThe models looks for $C^*(i)$ that achieves a particular goal. It is adjusted to minimize a loss function that charactherize the clustering goal. \n\nOne possible loss is the within-cluster point scatter. It make observations in the same cluster as close as possible:\n$$W(C) = \\frac{1}{2} \\sum_{k=1}^K \\sum_{C(i)=k} \\sum_{C(i')=k} d(x_i, x_{i'})$$\n\nAnother loss is the between-cluster point scatter. It makes observations in different cluster as far as possible:\n$$B(C) = \\frac{1}{2} \\sum_{k=1}^K \\sum_{C(i)=k} \\sum_{C(i')\\neq k} d(x_i, x_{i'})$$\n\nMinimize $W(C)$ is equivalent to maximize $B(C)$. \nTh total point scatter $T$ is a constant given the data, independant of cluster assignment.\n$$T = W(C) + B(C)$$ \n\nMinimize this loss function by testing all assignments is intractable. For only $N=19$ and $K=4$, they are around $10^{10}$ possible assignments. \nAlgorithms are often based on iterative greedy descent. It starts with initial assignments, that are changed in each step, in a way to reduce the loss function. The algorithm terminates when there is no possible improvment. But the result is a local optima, which may be highly suboptimal compared to the global optimum.",
"_____no_output_____"
],
[
"## K-Means",
"_____no_output_____"
],
[
"K-Means is a combinatorial algorithm that uses the squared Euclidian distance:\n$$d(x_i, x_{i'}) = ||x_i - x_{i'}||^2$$\n\nWe are minimizing the within-cluster distance:\n$$W(C) = \\sum_{k=1}^K N_k \\sum_{C(i) = k} ||x_i - \\bar{x}_k||^2$$\nwith $\\bar{x}_k$ the mean of all observations in cluster $k$, and $N_k$ the number of observations in cluster $K$\n\nWe are trying to solve:\n$$C^* = \\min_C \\sum_{k=1}^K N_k \\sum_{C(i) = k} ||x_i - \\bar{x}_k||^2$$\n\nThe K-Means algorithm is reaally simple:\n1. Initialize the $R$ clusters randomly (from training set)\n2. Repeat until convergence:\n - Assign each training point to the closest centroid\n - The center of each cluster becomes the mean of all its assigned points\n \nEach step reduce the loss function, but it converges only to a local mininum. \nOne should start the algorithm with many different random inititialization, and choose the one with the lowest loss.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\n\n\nclass KMeansClustering:\n \n def __init__(self, R, nstarts = 100):\n self.R = R\n self.nstarts = nstarts\n \n def fit(self, X):\n \n best_loss = float('inf')\n best_means = None\n \n for _ in range(self.nstarts):\n self.train(X)\n loss = self.get_loss(X)\n if loss < best_loss:\n best_loss = loss\n best_means = self.means\n \n self.means = best_means\n \n \n def train(self, X):\n \n N, p = X.shape \n self.means = X[np.random.choice(N, self.R)]\n \n while True:\n old_means = self.means.copy()\n \n #assign each point to the closest cluster\n ctrs = [list() for _ in range(self.R)]\n for x in X:\n ctrs[self.get_closest_ctr_idx(x)].append(x)\n \n \n # compute the new center position of every cluster\n for i in range(self.R):\n if len(ctrs[i]) != 0:\n self.means[i] = np.mean(np.vstack(ctrs[i]), axis=0)\n \n \n if np.linalg.norm(old_means - self.means) < 1e-6:\n break\n \n def get_loss(self, X):\n \n #assign each point to the closest cluster\n ctrs = [list() for _ in range(self.R)]\n for x in X:\n ctrs[self.get_closest_ctr_idx(x)].append(x)\n \n #compute distance between each point and the cluster center\n loss = 0\n for k in range(self.R):\n for x in ctrs[k]:\n loss += len(ctrs[k])*(x-self.means[k]) @ (x-self.means[k])\n return loss\n \n \n def get_closest_ctr_idx(self, x):\n min_idx = None\n min_dist = float('inf')\n for i in range(self.R):\n dist = (x - self.means[i]) @ (x - self.means[i])\n if dist < min_dist:\n min_idx = i\n min_dist = dist\n\n return min_idx\n \n def predict(self, X):\n y = np.empty(len(X)).astype(np.int)\n for i in range(len(X)):\n y[i] = self.get_closest_ctr_idx(X[i])\n return y\n \n \nX, y = load_iris().data, load_iris().target\nX = X[np.random.permutation(len(X))]\npca = PCA(n_components=2)\nX = pca.fit_transform(X)\nX = X - np.mean(X, axis=0)\nX = X / np.std(X, axis=0)\nprint(X.shape)\nprint(y.shape)\nmod = KMeansClustering(3)\nmod.fit(X)\ncolors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)]\nprint('loss:', mod.get_loss(X))\n\nplt.scatter(X[:,0], X[:,1], c=colors)\nplt.show()",
"(150, 2)\n(150,)\nloss: 4613.551120505142\n"
]
],
[
[
"## Gaussian Mixtures as Soft K-Means\n\nK-Means is closely related to estimating a Gaussian mixture with the EM algorithm. \n- The E-step assign weight to each data point based on it's relative density under each mixture component (closeness)\n- The M-step recompute the component density based on current weights (mean / covariance)\n\nIf every Gaussian have covariance matrix $\\sigma^2 I$, the relative density under each mixture is a monote function of the euclidian distance between the data point and the mixture center. Hence EM as a soft K-Means, making probabalistic (rather than deterministic) assigment of points to cluster centers. \nAs $\\sigma^2 \\to 0$, the probabilities become $0$ and $1$, and the two methods coincide.",
"_____no_output_____"
],
[
"## Vector Quantization\n\nVector Quantization is a compression technique in image / signal processing, using K-Means. \n\nThe prodecure is:\n- Break the image into small blocks, for example for a $1024*1024$ image break into $2*2$ blocks, we get $512*512$ vectors in $\\mathbb{R}^4$\n\n- A K-Means is run on the blocks. As $K$ increases, the quality of the image and the compressed size decrease. Each block is approximated by it's closest centroid.\n\n- We just need to store the $K$ centroids vectors, and the index of the closest centroid of all the blocks.\n\n- To reconstruct the image, each block become it's closest centroid, and the blocks are converted to an image \n\nThis works because with typical images many blocks look the same. It only require only one block each to represent them. \nWe can go further by applying a hierarchical K-Means, or using a variable coding length.",
"_____no_output_____"
]
],
[
[
"from PIL import Image\nfrom sklearn.cluster import KMeans\n\nclass VectorQuantization:\n \n \n def __init__(self, K, bsize = 4):\n self.K = K\n self.bsize = 2\n \n def img2block(self, X):\n s = X.shape[0]\n res = np.empty((s//self.bsize, s//self.bsize,\n self.bsize*self.bsize)).astype(np.int)\n \n for i in range(res.shape[0]):\n for j in range(res.shape[1]):\n res[i, j] = np.array([\n X[2*i,2*j],X[2*i+1,2*j],X[2*i,2*j+1],X[2*i+1,2*j+1]\n ])\n \n return res.reshape(-1, self.bsize*self.bsize)\n \n def block2img(self, b):\n s2 = int(np.sqrt(b.shape[0]))\n b = b.reshape(s2, s2, self.bsize*self.bsize)\n \n X = np.empty((s2*self.bsize, s2*self.bsize)).astype(np.int)\n for i in range(s2):\n for j in range(s2):\n X[2*i,2*j] = b[i,j,0]\n X[2*i+1,2*j] = b[i,j,1]\n X[2*i,2*j+1] = b[i,j,2]\n X[2*i+1,2*j+1] = b[i,j,3]\n \n return X\n \n \n def encode(self, img):\n b = self.img2block(img)\n \n clf = KMeans(n_clusters=self.K, n_init=1)\n clf.fit(b)\n \n code = clf.labels_\n centers = clf.cluster_centers_\n return code, centers\n \n def decode(self, code, centers):\n b = np.empty((len(code), self.bsize*self.bsize)).astype(np.int)\n for i in range(len(b)):\n b[i] = centers[code[i]]\n \n return self.block2img(b)\n \nIMG_URL = 'https://i.ytimg.com/vi/J4Q86j9HOao/hqdefault.jpg'\nIMG_PATH = '/tmp/img.jpg'\nutils.dl_file(IMG_URL, IMG_PATH)\nX = Image.open(IMG_PATH)\nX = X.resize((256,256), Image.ANTIALIAS)\nX = X.convert('L')\nX = np.asarray(X.getdata(),dtype=np.int).reshape((X.size[1],X.size[0]))\n\nvq200 = VectorQuantization(K=200)\ncode, centers = vq200.encode(X)\nX2 = vq200.decode(code, centers)\n\nvq4 = VectorQuantization(K=4)\ncode, centers = vq4.encode(X)\nX3 = vq4.decode(code, centers)\n\nprint(metrics.tdist(X, X2))\nprint(metrics.tdist(X, X3))\n\nplt.imshow(X, cmap='gray')\nplt.show()\nplt.imshow(X2, cmap='gray') \nplt.show()\nplt.imshow(X3, cmap='gray') \nplt.show()",
"1206.3606425940793\n4790.491728413692\n"
]
],
[
[
"## K-medoids\n\nK-Means is appropriate when the dissimilairty measure $D(x_i, x_{i'})$ is the euclidian distance. These requires all variables to be of quantitative type, it the procedure lacks robustness on outliers. \nThe algorithm can be generalized to any $D(x_i, x_{i'})$ We don't need the inputs $x$, only the distances. \nIt's far more expensive to compute than K-Means.\n\nK-medoids algorithm:\n1. Start with a particular intialization $C(i)$\n2. Repeat until the cluster assignments $C(i)$ doesn't change:\n - For each cluster $k$, find the cluster center $m_k$:\n $$m_k = \\arg \\min_{ \\{i:C(i)=k \\} } \\sum_{C(i')=k} D(x_i, x_{i'})$$\n \n - Minitmize the total error by assigning each observation to the closest cluster:\n $$C(i) = \\arg \\min_k D(x_i, m_k)$$\n ",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\n\nclass KMedoidsClustering:\n \n \n def __init__(self, K):\n self.K = K\n \n def fit(self, X):\n N, p = X.shape\n self.centers = [None] * self.K\n \n # build distance matrix\n D = np.empty((N, N))\n for i in range(N):\n for j in range(N):\n D[i,j] = (X[i] - X[j]) @ (X[i] - X[j])\n X = None #X is useless, we only need D\n \n # initialization\n #assign each point ro a random cluster\n ctrs = [list() for _ in range(self.K)]\n for i in range(N):\n ctrs[np.random.randint(0, self.K)].append(i)\n \n \n while True:\n \n #estimate cluster centers\n for k in range(self.K):\n best_i = None\n best_dist = float('inf')\n ck = ctrs[k]\n for i in ck:\n dist = 0\n for i2 in ck:\n dist += D[i, i2]\n if dist < best_dist:\n best_dist = dist\n best_i = i\n \n self.centers[k] = best_i\n \n ##\n old_ctrs = ctrs\n ctrs = [list() for _ in range(self.K)]\n \n # assign each point to the closest cluster center\n for i in range(N):\n best_k = None\n best_dist = float('inf')\n for k in range(self.K):\n dist = D[i, self.centers[k]]\n if dist < best_dist:\n best_dist = dist\n best_k = k\n ctrs[best_k].append(i)\n \n \n #stop only if the assigments didn't changed\n changed = False\n for k in range(self.K):\n if ctrs[k] != old_ctrs[k]:\n changed = True\n break\n if not changed:\n break\n \n \n #build labels vectors\n self.labels = np.empty(N).astype(np.int)\n for k in range(self.K):\n for i in ctrs[k]:\n self.labels[i] = k\n \n \n \n \n \nX, y = load_iris().data, load_iris().target\nX = X[np.random.permutation(len(X))]\npca = PCA(n_components=2)\nX = pca.fit_transform(X)\nX = X - np.mean(X, axis=0)\nX = X / np.std(X, axis=0)\nprint(X.shape)\nprint(y.shape)\nmod = KMedoidsClustering(3)\nmod.fit(X)\ncolors = [ ['red', 'blue', 'green'][x] for x in mod.labels]\n\nplt.scatter(X[:,0], X[:,1], c=colors)\nplt.show()",
"(150, 2)\n(150,)\n"
]
],
[
[
"## Initialization\n\nIt can be defined by specifying an initial set of centers $\\{ m_1, \\text{...}, m_K$ or an initial encoder $C(i)$. Specifying the center is usually more convenient. \nA strategy based of forward stepwise assignment is derived, called K-Means++. \n\nK-Means++ agorithm: \n\n1. Initialize the first center $m_1$ uniformly at random from all observations.\n2. For $k=2 \\to K$:\n\n - Compute for every observation the distance with the closest of the already chosen centroids:\n $$D(i) = \\min_{c = \\{ m_1, \\text{...}, m_{k-1} \\} } D(x_i, c)$$\n \n - Choose the center $m_k$ from a weighted probability probability distribution of $X$, with weights $D(i)^2$ ",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\n\n\nclass KMeansClustering:\n \n def __init__(self, R, nstarts = 100):\n self.R = R\n self.nstarts = nstarts\n \n def fit(self, X):\n \n best_loss = float('inf')\n best_means = None\n \n for _ in range(self.nstarts):\n self.train(X)\n loss = self.get_loss(X)\n if loss < best_loss:\n best_loss = loss\n best_means = self.means\n \n self.means = best_means\n \n \n def train(self, X):\n \n N, p = X.shape\n \n # K-means++ Initialization\n self.means = np.empty((self.R, p))\n self.means[0] = X[np.random.choice(N)]\n \n for k in range(1, self.R):\n d = np.empty(N)\n for i in range(N):\n d[i] = min([(X[i]-self.means[c])@(X[i]-self.means[c])\n for c in range(k)])\n d /= np.sum(d)\n self.means[k] = X[np.random.choice(N, p=d)]\n \n \n while True:\n old_means = self.means.copy()\n \n #assign each point to the closest cluster\n ctrs = [list() for _ in range(self.R)]\n for x in X:\n ctrs[self.get_closest_ctr_idx(x)].append(x)\n \n \n # compute the new center position of every cluster\n for i in range(self.R):\n if len(ctrs[i]) != 0:\n self.means[i] = np.mean(np.vstack(ctrs[i]), axis=0)\n \n \n if np.linalg.norm(old_means - self.means) < 1e-6:\n break\n \n def get_loss(self, X):\n \n #assign each point to the closest cluster\n ctrs = [list() for _ in range(self.R)]\n for x in X:\n ctrs[self.get_closest_ctr_idx(x)].append(x)\n \n #compute distance between each point and the cluster center\n loss = 0\n for k in range(self.R):\n for x in ctrs[k]:\n loss += len(ctrs[k])*(x-self.means[k]) @ (x-self.means[k])\n return loss\n \n \n def get_closest_ctr_idx(self, x):\n min_idx = None\n min_dist = float('inf')\n for i in range(self.R):\n dist = (x - self.means[i]) @ (x - self.means[i])\n if dist < min_dist:\n min_idx = i\n min_dist = dist\n\n return min_idx\n \n def predict(self, X):\n y = np.empty(len(X)).astype(np.int)\n for i in range(len(X)):\n y[i] = self.get_closest_ctr_idx(X[i])\n return y\n \n \nX, y = load_iris().data, load_iris().target\nX = X[np.random.permutation(len(X))]\npca = PCA(n_components=2)\nX = pca.fit_transform(X)\nX = X - np.mean(X, axis=0)\nX = X / np.std(X, axis=0)\nprint(X.shape)\nprint(y.shape)\nmod = KMeansClustering(3)\nmod.fit(X)\ncolors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)]\nprint('loss:', mod.get_loss(X))\n\nplt.scatter(X[:,0], X[:,1], c=colors)\nplt.show()",
"(150, 2)\n(150,)\nloss: 4613.551120505142\n"
]
],
[
[
"## Choice of K\n\nOne technique is to use a loss function, such as the within-cluster dissimilarity $W_K$, and compute it for several values of K. But this loss is decreasing with the number of $K$, even when used on a validation set with Cross-Validation. \n\nThe value of $K$ start decreasing exponentially, then at a point the difference between each $K$ abruptly decrease. \nHeuristically, set $K^*=K$ for this particular $K$ when the difference become less important, gives good results. \nThe $K^*$ can be found simply by plotting $W_K$ for different values of $K$. The plot looks like an elbow at $K^*$. This method is also called the elbow method.",
"_____no_output_____"
]
],
[
[
"losses = []\nfor k in range(1, 10):\n mod = KMeansClustering(k)\n mod.fit(X)\n losses.append(mod.get_loss(X))\n \nplt.plot(np.arange(1, 10), losses)\nplt.show()\n\nbest_k = 2 #by looking at plot\n\nmod = KMeansClustering(best_k)\nmod.fit(X)\ncolors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)]\nplt.scatter(X[:,0], X[:,1], c=colors)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Hierarchical Clustering\n\nHierarchical clustering uses only a measure of dissimilarity between 2 groups of observations. \nThey produce hierarchical representations in wich the clusters at each level are created by merging clusters at the next lower level. At the lowest level there is $N$ clusters of size $1$, and at the highest $1$ cluster of size $N$. \n\nThere exist two-strategies:\n- Aglomerative (bottom-up): Start at the bottom and and recursively merge a pair of clusters into one\n- Divisive (top-down): Start at the top and recursively split one cluster into two. \n\nEach level represents a different grouping of the data. It's up to the user to decide which level represents a natural clustering. \nMost methods posseses a monotonicity property: The dissimilarity between clusters is monotone increasing with the level. \n\nThe model can be plotted as a binary tree, where the height of each node is proportional to the value of the intergroup dissimilarity between it's two children. This is called a dendogram. \nThe results are valid only if the data really posseses a hierarchical structure.",
"_____no_output_____"
],
[
"## Agglomerative Clustering\n\nIt starts with every observation in a different cluster. \nAt each step, the closest 2 clusters are merged. \nAfter $N-1$ steps, the algorithm stops with only one cluster left. \nA measure of dissimilary between 2 groups, $d(G,H)$ is needed. They are several possibilities:\n\n- Single Linkage is the least dissimilar of all pairs:\n$$d_\\text{SL}(G,H) = \\min_{i \\in G, i' \\in H} d_{ii'}$$\n\n- Complete Linkage is the most dissimilar of all pairs:\n$$d_\\text{CL}(G,H) = \\max_{i \\in G, i' \\in H} d_{ii'}$$\n\n- Group Average is the mean dissimilairty between all pairs:\n$$d_\\text{GA}(G,H) = \\frac{1}{N_G N_H} \\sum_{i \\in G} \\sum_{i' \\in H} d_{ii'}$$\n\nIf the data is compact (small dissimilarities between clusters, clusters well separated from each others), all methods produce similar results. \n\nSingle Linkage only requires a single pair of two groups to be small to combine them. It has a tendency to combine at relatively low thresholds, observations linked by a series of close intermediates. This phenomem, called chaining, is a defect of the method. \n\nComplete Linkage is the other extreme, two groups are similar if all their obrservations are close. It tends to produce compact clusters, however it may produce clusters with observations much closer to members of other clusters than to member of their own cluster, breaking the closoness property. \n\nGroupe average is a compromise between the two.",
"_____no_output_____"
],
[
"## Divisive Clustering\n\nIt begins with the whole dataset into one cluster, then recursively divide one existing cluster in two. \nAter $N-1$ steps, are $N$ clusters of size $1$. \nThis approach is less used than agglomerative clustering. \n\nPlace all observations in a single cluster $G$. \nChooses the observations whose average dissimilairy from all other observations is the largest. It is the first member of a new cluster H. \nAt each step, the observation in $G$ whose average dissimilarity from those in H, minus the remaining observations in G, is transfered to H. \nIt continues until the largest value became negative. The original cluster is then split in two, G and H. \nAt each step a new cluster is chosen an split in two. The cluster chosen can be the one with the largest diameter, or the largest average dissimilarity between it's members.",
"_____no_output_____"
],
[
"# Self-Organizing Maps\n\nSelf-organization of a massive document collection - Kohonen, T., Kaski, S., Lagus, K., Saloj ̈arvi, J., Paatero,A. and Saarela,A. (2000) - [PDF](http://lig-membres.imag.fr/bisson/cours/M2INFO-AIW-ML/papers/Kohonen00.pdf)\n\nThis method can be viewed as a constrained version of K-Means, where the prototype are encouraged to lie in a or two dimensional manifold in the feature space. \nWe consider a SOM as a rectangular grid of $q_1*q_2=K$ prototypes $m_j \\in \\mathbb{R}^p$. \nOnce the model is fit, the observations can be mapped into the rectangular grid. \n\nAlgorithm:\nFor each observation $x$_i:\n- Find the cluster $m_j$ closest to $x_i$\n- Find all clusters $m_k$ such that the distance in the grid between $l_j$ and $l_k$ is lower than $r$.\n- Move all $m_k$ closer to $x_i$:\n$$m_k \\leftarrow m_k + \\alpha (x_i - m_k)$$\n\nThousands of iterations are made over the dataset. At each iteration, $\\alpha$ and $r$ are decreased. \nThe update both move the prototypes closer to the data, but also maintain a smooth 2D spatial relationship between prototypes.",
"_____no_output_____"
]
],
[
[
"def d2_dist(a, b):\n return (a[0]-b[0])**2 + (a[1]-b[1])**2\n\nclass SOM:\n \n def __init__(self, Q, niters = 1000):\n self.Q = Q\n self.niters = niters\n \n self.alpha_beg = 1\n self.alpha_end = 0\n self.dalpha = (self.alpha_end - self.alpha_beg) / self.niters\n \n self.r_beg = 10\n self.r_end = 1\n self.dr = (self.r_end - self.r_beg) / self.niters\n \n def get_closest_centroid(self, x):\n best_dist = float('inf')\n best_pos = None\n \n for i in range(self.Q):\n for j in range(self.Q):\n dist = (self.clusters[i,j]-x) @ (self.clusters[i,j]-x)\n if dist < best_dist:\n best_dist = dist\n best_pos = (i,j)\n \n return best_pos\n\n \n def fit(self, X):\n N, p = X.shape\n \n self.clusters = np.random.randn(self.Q, self.Q, p)\n alpha = self.alpha_beg\n r = self.r_beg\n \n for it in range(self.niters):\n \n for x in X:\n \n i0, j0 = self.get_closest_centroid(x)\n \n for i in range(self.Q):\n for j in range(self.Q):\n if d2_dist((i,j), (i0,j0)) < r:\n d = x - self.clusters[i,j]\n self.clusters[i,j] += alpha * d\n \n if it % 50 == 0:\n print('iteration:', it)\n \n alpha += self.dalpha\n r += self.dr\n \n def reconstruct(self, X):\n X2 = np.empty(X.shape)\n for i in range(len(X)):\n pos = self.get_closest_centroid(X[i])\n X2[i] = self.clusters[pos]\n return X2\n \n \nX, y = load_iris().data, load_iris().target\nX = X[np.random.permutation(len(X))]\npca = PCA(n_components=2)\nX = pca.fit_transform(X)\nX = X - np.mean(X, axis=0)\nX = X / np.std(X, axis=0)\nmod = SOM(Q=5, niters=250)\nmod.fit(X)\nXr = mod.reconstruct(X)\n\nprint('recons error:', np.linalg.norm(X - Xr))\nplt.scatter(X[:,0], X[:,1], c='blue')\nplt.scatter(Xr[:,0], Xr[:,1], c='red')\nplt.show()",
"iteration: 0\niteration: 50\niteration: 100\niteration: 150\niteration: 200\nrecons error: 4.41780419397064\n"
]
],
[
[
"# Principal Components, Curves and Surfaces\n\nPrincipal Components provides a sequence of best linear approximations of the data, of all ranks $q \\leq p$. \nThe parametric representation of an affine hyperplane is:\n$$f(\\lambda) = \\mu + V_q \\lambda$$\nwith $\\mu \\in \\mathbb{R}^p$ a location vector, $V_q \\in \\mathbb{R}^{p*q}$ a matrix with unit orthogonal columns vectors, and $\\lambda \\in \\mathbb{R}^q$ a vector of parameters. \n\nWe can fit this model by minimizing the reconstruction error:\n$$\\min_{\\mu, \\lambda_i, V_q} \\sum_{i=1}^N ||x_i - \\mu - V_q\\lambda_i||^2$$\nWhen we partially optimize of $\\mu$ and $\\lambda_i$ we get:\n$$\\hat{\\mu} = \\bar{x}$$\n$$\\hat{\\lambda}_i = V^T_q(x_i - \\bar{x})$$\n\nThe problem becomes:\n$$\\min_{V_q} ||(x_i - \\bar{x}) - V_qV^T_q(x_i - \\bar{x})||^2$$\n\nWe assume $\\bar{x} = 0$. The reconstruction matrix $H_q \\in \\mathbb{R}^{p*p}$ is a projection matrix such that $H_q = V_qV_q^T$ \n\nThe solution can be found with the singular value decomposition of $X$ centered:\n$$X = UDV^T$$\n\nFor each rank $q$, the solution $V_q$ are the first $q$ columns of V.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\n\nclass MyPCA:\n \n def __init__(self, q):\n self.q = q\n \n def fit(self, X):\n Xc = np.mean(X, axis=0, keepdims=True)\n X = X - Xc\n \n U, d, VT = np.linalg.svd(X)\n Vq = VT[:self.q].T\n \n self.Xc = Xc\n self.Vq = Vq\n \n \n def transform(self, X):\n return (X - self.Xc) @ self.Vq \n \n def inverse_transform(self, Xq):\n return (Xq @ self.Vq.T) + self.Xc\n \n \nX, y = load_iris().data, load_iris().target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=15)\n\n\np1 = MyPCA(q=2)\np1.fit(X_train)\np2 = PCA(n_components=2)\np2.fit(X_train)\n\nXtrq1 = p1.transform(X_train)\nXtrq2 = p2.transform(X_train)\nprint(Xtrq1.shape)\nprint(metrics.tdist(Xtrq1, Xtrq2))\n\nXteq1 = p1.transform(X_test)\nXteq2 = p2.transform(X_test)\nprint(Xteq1.shape)\nprint(metrics.tdist(Xteq1, Xteq2))\n\nXtrr1 = p1.inverse_transform(Xtrq1)\nXtrr2 = p2.inverse_transform(Xtrq2)\nprint(Xtrr1.shape)\nprint(metrics.tdist(Xtrr1, Xtrr2))\n\nXter1 = p1.inverse_transform(Xteq1)\nXter2 = p2.inverse_transform(Xteq2)\nprint(Xter1.shape)\nprint(metrics.tdist(Xter1, Xter2))",
"(120, 2)\n6.633321496558339e-15\n(30, 2)\n3.045051981330325e-15\n(120, 4)\n1.050730930714469e-14\n(30, 4)\n5.0193869307412495e-15\n"
]
],
[
[
"The colums of $UD$ are called the principal components. the $N$ optimal $\\hat{\\lambda}_i$ are given by the first q principal components.",
"_____no_output_____"
],
[
"## Principal Curves and Surfaces\n\nPrincipal curve generelize the principal component line. It provides a smooth one-dimensional curved approximation of a set of data points. \nA principal surface is more general, provides a curved manifold approximation of dimension 2 or more.",
"_____no_output_____"
],
[
"## Spectral Clustering\n\nTraditional clustering methods use spherical or elliptical metrics, and won't work well if the clusters are non-convex. Spectral clustering is a generalization designed for these situations. \n\nLet's define a matrix of similarities $S \\in \\mathbb{R}^{N*N}$, with $s_{ii'} \\geq 0$ the similarity between $x_i$ and $x_{i'}$. \nLet $G = <V, E>$ an undirected similarity graph with vertices $v_i$ for each observation, and edges weighted by $s_{ii'}$ only if reaches a specific threshold, otherwhise there is no edge. \n\nClustering is now a graph problem, we wish to partition the graph such that edges between different groups have low weight, and within a group have high weight. \n\nLet $d_{ii'}$ the euclidian distance between $x_i$ and $x_{ii'}$. One similarity mesure is the radial-kernel gram matrix: $s_{ii'} = \\exp (-d^2_{ii'}/c)$, with $c > 0$ a scale parameter. \n\nOne way to define a similarity graph is the mutual K-nearest neighbor graph. Define $\\mathcal{N}_k$ the symmetric set of nearby pair of points. A pair $(i,i')$ if $x_i$ is among the K-nearest neighbors of $x_{i'}$, or vice versa. \nWe connect all pairs in $\\mathcal{N}_k$ with weight $w_{ii'} = s_{ii'}$, otherwhise the weight is 0. \nAnother way is to include all edges to get a fully connected graph, with weights $w_{ii'}=s_{ii'}$. \n\nThe matrix of edge weights $W \\in \\mathbb{R}^{N*N}$ is called the adjency matrix. \nThe degree of vertex $i$ is $g_i = \\sum_{i'} w_{ii'}$. Let $G \\in \\mathbb{R}^{N*N}$ a diagonal matrix with diagonal elements $g_i$. \nThe graph Laplacian is defined by $L = G - W$. \n\nSpectral clustering find the $m$ eigenvectors corresponding to the $m$ smallest eigenvalues of $L$. It gives us the matrix $Z \\in \\mathbb{R}^{N*m}$. \nUsing a standard method like K-Means, we cluster the rows of $Z$ to yield a clustering of the original points.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\n\nclass SpectralClustering:\n \n def __init__(self, K, c, m):\n self.K = K\n self.c = c\n self.m = m\n \n def fit(self, X):\n N, p = X.shape\n \n S = np.array([\n [np.exp(-(X[i]-X[j])@(X[i]-X[j])/self.c) for i in range(N)]\n for j in range(N)\n ])\n \n W = S\n G = np.diag(np.sum(W, axis=0))\n L = G - W\n \n w, V = np.linalg.eigh(L)\n Z = V[:, 1:self.m+1]\n \n km = KMeansClustering(self.K)\n km.fit(Z)\n \n self.Z = Z\n self.km = km\n self.labels = km.predict(Z)\n \n\n\nX, y = load_iris().data, load_iris().target\nX = X[np.random.permutation(len(X))]\npca = PCA(n_components=2)\nX = pca.fit_transform(X)\nX = X - np.mean(X, axis=0)\nX = X / np.std(X, axis=0)\nmod = SpectralClustering(K=4, c=1, m=2)\nmod.fit(X)\ncolors = [ ['red', 'blue', 'green', 'yellow'][x] for x in mod.labels]\n\nplt.scatter(X[:,0], X[:,1], c=colors)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Kernel Principal Components\n\nKernel principal component analysis - Bernhard Schoelkopf, Alexander J. Smola, and Klaus-Robert Mueller. (1999) - [PDF](http://pca.narod.ru/scholkopf_kernel.pdf)\n\nIn PCA, we diagonalize an estimate an estimate of the covariance matrix:\n$$C = \\frac{1}{p} \\sum_{j=1}^p x_{:,j} x_{:,j}^T$$\n\nKernel PCA follows the same principle, but first map non lineary the data into another feature space using the transformation $\\Phi$. As for kernel SVM methods, be don't need to compute $\\Phi(x)$, only the dot product $\\Phi(x_i)^T\\Phi(x_j)$\n\nThe covariance matrix became:\n\n$$\\bar{C} = \\frac{1}{p} \\sum_{j=1}^p \\Phi(x_{:,j}) \\Phi(x_{:,j})^T$$ \nWe need to find engeinvalues $\\lambda$ and eigenvectors $V$ satisfying $\\lambda V = \\bar{C} V$.\n\nLet's define the kernel matrix $K \\in \\mathbb{R}^{N*N}$ such that:\n$$K_{ij} = \\Phi(x_i)^T \\Phi(x_j)$$\n\nWe now solve the eigenvalue problems:\n$$\\lambda \\alpha = K \\alpha$$\n\nWe get the projected data on $q$ components with:\n$$Z_q = \\alpha * \\sqrt{\\lambda}$$\n\nThe kernel matrix is computed with data not centered. We need to center it first, using the following trick:\n$$K_\\text{center} = K - 1_NK - K1_N + 1_N K 1_N = (I - 1_N)K(I-1_N)$$\n\nwith $1_n \\in \\mathbb{R}^{N*N}$ a matrix with all elements equal to $1/N$.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\nfrom sklearn.decomposition import KernelPCA \n\nX, y = load_iris().data, load_iris().target\npca = KernelPCA(n_components=2, kernel='rbf', gamma=0.5)\nX = pca.fit_transform(X)\n\nplt.scatter(X[:,0], X[:,1])\nplt.show()\n\nprint(pca.lambdas_.shape, pca.alphas_.shape)\nprint(pca.lambdas_)\nprint(X[:10])",
"_____no_output_____"
],
[
"from sklearn.datasets import load_iris\n\nfrom sklearn.preprocessing import KernelCenterer\n\ndef center_kernel(K):\n N = len(K)\n IM1 = np.eye(N) - (np.ones((N,N)) / N)\n return IM1 @ K @ IM1\n\ndef kernel_linear():\n return lambda a, b: a @ b\n\ndef kernel_rbf(gamma):\n return lambda x, y: np.exp(-gamma * (x - y)@(x - y))\n\nclass MyKernelPCA:\n \n def __init__(self, q, kernel):\n self.q = q\n self.kernel = kernel\n \n def fit_transform(self, X):\n N, p = X.shape \n \n K = np.empty((N, N))\n for i in range(N):\n for j in range(N):\n K[i,j] = self.kernel(X[i], X[j])\n K = center_kernel(K)\n \n w, V = np.linalg.eigh(K)\n w, V = np.flip(w), np.flip(V, axis=1)\n wq, Vq = w[:self.q], V[:, :self.q]\n\n self.X = X\n self.lambdas = wq\n self.alphas = Vq\n \n return self.alphas * np.sqrt(self.lambdas)\n \n \n\nX, y = load_iris().data, load_iris().target\npca = MyKernelPCA(q=2, kernel=kernel_rbf(0.5))\nX = pca.fit_transform(X)\n\nplt.scatter(X[:,0], X[:,1])\nplt.show()\n\nprint(pca.lambdas.shape, pca.alphas.shape)\nprint(pca.lambdas)\nprint(X[:10])",
"_____no_output_____"
]
],
[
[
"## Sparse Principal Components\n\nSparse principal component analysis - Zou, H., Hastie, T. and Tibshirani, R. (2006) - [PDF](https://web.stanford.edu/~hastie/Papers/spc_jcgs.pdf)\n\nPrincipal components can be interpret by examining the $v_j$, called loadings. The interpretiation may be easier if they are parse. Methods are usually based on a kind of Lasso (L1) penalty. \n\nOne approach is to solve the following problem:\n$$\\max_v v^T(X^TX)v$$\n$$\\text{s.t. } \\sum_{j=1}^p |v_j| \\leq t, \\space v^Tv=1$$ \n\nAnother strategy use the reconstruction error with some penalty. For a signe component, the criterion is:\n$$\\min_{\\theta, v} \\sum_{i=1}^N ||x_i - \\theta v^T x_i||_2^2 + \\lambda ||v||_2^2 + \\lambda_1 ||v||_1$$\n$$\\text{s.t. } ||\\theta||_2 = 1$$\n\nIf $\\lambda=\\lambda_1=0$, then $v=\\theta$ is the largest principal component direction. \nThe second penalty on $v$ encourages sparseness of the loadings. \n\nFor $K$ components, the problem became:\n\n$$\\min_{\\theta, v} \\sum_{i=1}^N ||x_i - \\Theta V^T x_i||_2^2 + \\lambda \\sum_{k=1}^K ||v_k||_2^2 + \\sum_{k=1}^K \\lambda_{1k} ||v||_1$$\n$$\\text{s.t. } \\Theta^T\\Theta= I$$\n\nThe criterion is not jointly convex in $\\Theta$ and $V$, but is convex in each parameter with the other fixed. \nMinimization over $V$ is equivalent to $K$ elastic net problems. \nMinimization over $\\Theta$ is solved by SVD. \nAlternation the 2 steps converges to the solution.",
"_____no_output_____"
],
[
"# Non-Negative Matrix Factorization\n\nLearning the parts of objects by non-negative matrix factorization - Lee, D. and Seung, H. (1999) - [PDF](http://www.columbia.edu/~jwp2128/Teaching/E4903/papers/nmf_nature.pdf) \nAlgorithms for non-negative matrix factorization - Lee, D. and Seung, H. (2001)- [PDF](https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf)\n\nNon-negative matrix factorization is an alternative approach to PCA, where data and components are assumed to be non-negative. \nThe data matrix $X$ is approximated by:\n$$X \\approx WH$$\nwith $X \\in \\mathbb{R}^{N*p}$, $W \\in \\mathbb{R}^{N*r}$, $H \\in \\mathbb{R}^{r*p}$, and $r \\leq \\max(N,p)$. We also assume that $x_{ij}, w_{ik}, h_{kj} \\geq 0$. \n\n$W$ and $H$ are found by maximizing the log-likelihood of the data following a Poisson distribution:\n$$L(W,H) = \\sum_{i=1}^N \\sum_{j=1}^p \\left( x_{ij} \\log(WH)_{ij} - (WH)_{ij} \\right)$$ \n\nBy iteratively applying the following updates, we converges to a local maximum:\n$$w_{ik} \\leftarrow w_{ik} \\frac{\\sum_{j=1}^p h_{kj}x_{ij}/(WH)_{ij}}{\\sum_{j=1}^p h_{kj}}$$\n$$h_{kj} \\leftarrow h_{kj} \\frac{\\sum_{i=1}^N w_{ik}x_{ij}/(WH)_{ij}}{\\sum_{i=1}^N w_{ik}}$$",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import NMF\nfrom sklearn.datasets import load_iris\n\nX, y = load_iris().data, load_iris().target\nmod = NMF(3)\nW = mod.fit_transform(X)\nH = mod.components_\n\nprint(H)\nprint(metrics.tdist(W @ H, X))\nprint(np.sum(W>=0) == W.size)\nprint(np.sum(H>=0) == H.size)",
"[[6.45917865 1.47563158 4.51670771 0.66771724]\n [3.42634388 2.86934556 0.35195317 0. ]\n [0. 4.92253048 7.40243384 7.18382697]]\n1.9140723652638665\nTrue\nTrue\n"
],
[
"from sklearn.datasets import load_iris\n\nclass MyNMF:\n \n def __init__(self, r):\n self.r = r\n \n def fit(self, X):\n N, p = X.shape\n W = np.abs(np.random.randn(N, self.r))\n H = np.abs(np.random.randn(self.r, p))\n \n for it in range(1000):\n \n WH = W @ H\n \n for i in range(N):\n for k in range(self.r):\n W[i,k] *= np.sum(H[k]*X[i] / WH[i]) / np.sum(H[k])\n \n WH = W @ H\n \n \n for k in range(self.r):\n for j in range(p):\n H[k,j] *=np.sum(W[:,k]*X[:,j]/WH[:,j])/np.sum(W[:,k]) \n \n \n return W, H\n \n\n \nX, y = load_iris().data, load_iris().target\nmod = MyNMF(3)\nW, H = mod.fit(X)\n\nprint(H)\nprint(metrics.tdist(W @ H, X))\nprint(np.sum(W>=0) == W.size)\nprint(np.sum(H>=0) == H.size)",
"[[9.11162364e-01 1.88621995e-01 1.57064676e+00 7.46003135e-01]\n [1.84031763e+00 1.48089946e+00 2.21888784e-01 1.57375484e-03]\n [2.20057220e+00 9.06710552e-01 1.22898456e+00 1.31254850e-01]]\n2.34613178170397\nTrue\nTrue\n"
]
],
[
[
"## Archetypal Analysis\n\nArchetypal analysis - Cutler, A. and Breiman, L. (1994) - [PDF](http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/379.pdf)\n\nThis method is a propotypes method, simila to K-Means. It approximates each data point by a convex combination of a collection of prototypes.\n$$X \\approx WH$$\nwith $X \\in \\mathbb{R}^{N*p}$, $W \\in \\mathbb{R}^{N*r}$, $H \\in \\mathbb{R}^{r*p}$. \nWe assume $w_{ik} \\geq 0$ and $\\sum_{k=1}^r w_{ik} = 1$. The $N$ rows of $X$ are representation by convex combinations of the $r$ archetypes (rows of H). \n\nThe archetypes themselves are combex combinations of the obvervations:\n$$H = BX$$\nwith $B \\in \\mathbb{R}^{r*N}$, $b_{ki} \\geq 0$, and $\\sum_{i=1}^N b_{ki}=1$. \n\nWe minimize the following criterion:\n$$J(W,B) = ||X - WBX||^2$$ \n\nMinimizing J with respect to one variable, with the other fixed is convex for both of them. \nWe iteratively minimizes $J$ with respect to $W$ then $B$ until convergence. \nBut the overall problem is not convex, and it converges to a local minimum.",
"_____no_output_____"
],
[
"# Independant Component Analysis\n\nMultivariate data are often viewed as multiple indirect measurement arising from underlying sources, that cannot be directly measured. \nFactor analysis is a classical technique to identify these latent sources. They are usually based on Gaussian distributions. \nIndependant Cpomponent Analysis is another approach, that relies on the non-Gaussian nature of the underlying sources.",
"_____no_output_____"
],
[
"## Latent Variables and Factor Analysis\n\nLet's define the reduced singular value decomposition of $X \\in \\mathbb{R}^{N*p}$:\n$$X = UDV^T$$\nLet's define $S = \\sqrt{N} U$ and $A^T = DV^T / \\sqrt{N}$. We get a latent variable representation:\n$$X = SA^T$$\nEach column of $X$ is a linar combination of the columns of $S$. The columns of $S$ have zero mean, unit variance, and are uncorrelated.\n\n$$X_j = a_{j1}S_1 + a_{j2}S_2 + \\text{...} + a_{jp}S_p$$\n\nWe can rewrite it as $X = AS$. But for any orthogonal matrix $R \\in \\mathbb{R}^{p*p}$ we have:\n$$X = AS = AR^TRS = A^*S^*$$\nwith $\\text{cov}(S^*) = I$. Hence they are many such decompositions, and it is therefore impossible to identify any particular latent variables as unique underlying sources.\n\nThe classic factor analysis model has the form:\n$$X_j = a_{j1}S_1 + a_{j2}S_2 + \\text{...} + a_{jq}S_q + \\sigma_j$$\n$$X = AS + \\sigma$$\n\nwith $S$ a vector of $q$ underlying latent variables or factors, and $A \\in \\mathbb{R}^{p*q}$ a matrix of factors loadings, and $\\sigma_j$ uncorrelated 0-mean disturbances. \nTypically the $S_l$ and $\\sigma_j$ are modeled as Gaussians, and the model is fit by maximum ikelihood. ",
"_____no_output_____"
],
[
"## Independant Component Analysis\n\nLet $X \\in \\mathbb{R}^{p*N}$, where each column of $X$ represent an observation. The goal is to find the decomposition:\n$$X = AS$$\nwith $A \\in \\mathbb{R}^{p*p}$ orthogonal matrix and $S \\in \\mathbb{R}^{p*N}$, such that the columns of $S$ are statically independant. \nWe suppose that $X$ have already been whitened ($Cov(X) = I$) \nWe are trying to find an orthogonal matrix $A$ such that the components of $S=A^TX$ are indepandant (and Non-Gaussian). \n\nSeveral ICA approches are based on entropy. The diferential entropy $H$ of a random variable with density $g(Y)$ is:\n$$H(Y) = - \\int g(y) \\log g(y) dy$$\n\nA natural mesure of dependance is the mutual information $I(Y)$ between the components of the random vector $Y$:\n$$I(Y) = \\sum_{j=1}^p H(Y_j) - H(Y)$$\n\nLet $Y=A^TX$ with $A$ orthogonal and $\\text{cov}(X)=I$. It can be show that:\n$$I(Y) = \\sum_{j=1}^p H(Y_j) - H(X)$$\nFinding $A$ that minimize $I(Y) = I(A^TX)$ looks for orthogonal transformation that leads to the mods independance between its components. \n\nInstead of using the entropy $H(Y_j)$, we can use the negentropy measure:\n$$J(Y_j) = H(Z_j) - H(Y_j)$$\nwith $Z_j$ a gaussian random variable with the same variance as $Y_j$. \nWe can use an aproximation that can be computed and optimized on the data:\n$$J(Y_j) \\approx (E G(Y_j) - E G(Z_j))^2$$\n$$\\text{where } G(u) = \\frac{1}{a} \\log \\cosh (au), \\space \\forall 1 \\leq a \\leq 2$$",
"_____no_output_____"
],
[
"## Exploratory Projection Pursuit\n\nA projection pursuit algorithm for exploratory data analysis - Friedman, J. and Tukey, J. (1974) - [PDF](http://www.slac.stanford.edu/pubs/slacpubs/1250/slac-pub-1312.pdf)\n\nThis is a graphical exploration technique for visualizing high-dimensional data.",
"_____no_output_____"
],
[
"## A Direct Approach to ICA\n\nIndependent components analysis through product density estimation - Hastie, T. and Tibshirani, R. (2003) - [PDF](https://papers.nips.cc/paper/2155-independent-components-analysis-through-product-density-estimation.pdf)\n\nWe observe a random vector $X \\in \\mathbb{R}^p$, assumed to arise from a linear mixing of a latent source random vector $S \\in \\mathbb{R}^P$:\n$$X = AS$$. \nThe components $S_j$ are assumed to be independently distributed. We assume $E(S) = 0$, $Cov(S) = I$, $Cov(X) = I$, and $A$ ortohogonal. \n\nBecause the $S_j$ are independant, the joint density of $S$ is given by:\n$$f_S(s) = \\prod_{j=1}^p f_j(s_j)$$.\n\nAnd since $A$ is orthogonal, the joint density of $X$ is:\n$$f_X(x) = \\prod_{j=1}^p f_j(a_j^Tx)$$.\n\nthe model $f_X$ is fit using semi-parametric maximum likelihood. Each $f_j$ is represented by an exponentially tilted Gaussian density:\n$$f_j(s_j) = \\phi (s_j) \\exp (g_j(s_j))$$\n\nWhith $\\phi$ the standard Gaussian and $j_g$ a cubic smoothing pline restricted such that $f_j$ integrates to $1$ \n\n### Fitting the Model\n\nWe got the data $x_1, \\text{...}, x_N$. We first center and whiten it. \nThen we fit the model using penalized maximum log-likelihood:\n$$\\min_{A, \\{ g_j \\}_1^p} \\sum_{j=1}^p \\left[ \\frac{1}{N} \\sum_{i=1}^N (\\log \\phi(a_j^Tx_i) + g_j(a_j^Tx_i)) - \\lambda_j \\int g_j''^2(t)dt \\right]$$\n$$\\text{s.t. } a_j^Ta_k = \\delta_{jk} \\space \\forall j,k$$\n$$\\text{s.t. } \\int \\phi(s) \\exp(g_j(s)) ds = 1 \\space \\forall j$$\n\nProDen ICA algorithm: \n- Initialize A from a random gaussian, then orgonalize it\n- Repeat until convergence:\n - Given fixed $A$, optimize seperately each for each $g_j$ using the penalized density estimation algorithm.\n - Given fixed $g_j$, optimize for A using one step of the fixed point algorithm.\n\n### Penalized density estimation\n\nWhen $p=1$, the problem simplifies to:\n$$\\min_g \\frac{1}{N} \\sum_{i=1}^N (\\log \\phi(s_i) + g(s_i)) - \\lambda \\int g''^2(t)dt$$\n$$\\text{s.t. } \\int \\phi(s) \\exp(g(s)) ds = 1$$\n\nThe constraint can be integraded with the modified criterion:\n$$\\min_g \\frac{1}{N} \\sum_{i=1}^N (\\log \\phi(s_i) + g(s_i)) - \\int \\phi(s) \\exp(g(s)) ds - \\lambda \\int g''^2(t)dt$$\n\nWe approximate the integral using a grid of $L$ values $s_l^*$ separated by $\\Delta$, covering the observed values $s_i$:\n$$y_l^* = \\frac{\\# s_i \\in (s_l^* - \\Delta/2, s_l^* + \\Delta/2)}{N}$$\n\nThe final criterion is:\n$$\\min_g \\sum_{l=1}^L \\left[ y_l^*(\\log \\phi(s_l^*) + g(s_l^*)) - \\Delta \\phi(s_l^*) \\exp(g(s_l^*)) \\right] - \\lambda \\int g''^2(t)dt$$\n\nThis is a generalized additive model, that can be fit using a newton algorithm, turned into an iteratively reweighted penalized least square regression problem. This is done using a weighted cubic smoothing spline.\n\n### Fixed-point method\n\nThe penalty term does not depend on $A$, and because all colums of $A$ are othogonal, the Gaussian component $\\log \\phi(a_j^Tx_i)$ does not depend of A either. What remains to be optimized is:\n$$C(A) = \\frac{1}{N} \\sum_{i=1}^N \\sum_{j=1}^p g_j(a_j^Tx_i)$$",
"_____no_output_____"
],
[
"# Multidimensional Scaling",
"_____no_output_____"
],
[
"Multidimensional Scaling tries to learn a lower-dimensional manifold like PCA. \nIt only works with distances $d_{ij}$, distance between obervation $i$ and $j$. \nThe goal is to find a lower-dimensional representation of the data that preserves the distance as well as possible. \n\nKrukaskal-Shephard scaling (least squares) minimizes the following stress function:\n$$S_M(Z) = \\sum_{i \\neq i'} (d_{ii'} - ||z_i - z_{i'}||)^2$$\n\nThe criterion is minimized using gradient descent. \n\nAnother criterion is the Sammon mapping:\n$$S_{Sm}(Z) = \\sum_{i \\neq i'} \\frac{(d_{ii'} - ||z_i - z_{i'}||)^2}{d_{ii'}}$$ \n\nIn classical scaling, we use similarities $s_{ii'}$. One example is the center inner product $s_{ii'} = \\langle x_i - \\bar{x}, x_{i'} - \\bar{x} \\rangle$. The criterion is:\n$$S_C(Z) = \\sum_{i,i'} (s_{ii'} - \\langle z_i - \\bar{z}, z_{i'} - \\bar{z} \\rangle)^2$$\n\nIf the similarities are the center inner product, this is equivalent to PCA. \n\nAnother approach is nonmetric scaling, this minimizes the following criterion:\n$$S_{NM}(Z) = \\sum_{i \\neq i'} \\frac{(||z_i - z_{i'}|| - \\theta(d_{ii'}))^2}{\\sum_{i \\neq i'} ||z_i - z_{i'}||^2}$$\n\nwith $\\theta$ an arbitrary increasing function. \nWe fit the model by iteratively optimizing for $Z$ with gradient descent and $\\theta$ with isotonic regression until convergence. \nIsotonic regression is a regression technique trying to minimize the squared error, but the approximator is any form of monotone function.",
"_____no_output_____"
],
[
"# Nonlinear Dimension Reduction\n\nSeveral methods exist to find a low-dimensional nonlinear manifold of the data\n\n## Isometric feature mapping\n\nA global geometric framework for nonlinear dimensionality reduction - Tenenbaum, J. B., de Silva, V. and Langford, J. C. (2000) - [PDF](https://web.mit.edu/cocosci/Papers/sci_reprint.pdf)\n\nWe build a graph of the dataset, We find the neighbors of each of the points, and build edges with its neighbors. We approximate the geodesic distance between 2 points by the shortest path between these 2 points on the graph. \nClassical scaling is applied to the graph distances.\n\n## Local linear embedding\n\nNonlinear dimensionality reduction by locally linear embedding - Roweis, S. T. and Saul, L. K. (2000) - [PDF](http://www.robots.ox.ac.uk/~az/lectures/ml/lle.pdf)\n\nThe point are approximated locally, and a lower dimensional representation is built from these approximations. \n\n1. For each data point $x_i$m we find its K-nearest neighbors $\\mathcal{N}(i)$\n2. We approximate each point by an affine mixture of its neighbors:\n $$\\min_{W_{ik}} ||x_i - \\sum_{k \\in \\mathcal{N}(i)} w_{ik}x_k||^2$$\n\n over weights $w_{ik}$ satysfying $\\sum_k w_{ik}=1$.\n \n3. We find points $y_i$ in a lower-dimensional space that minimizes:\n $$\\sum_{i=1}^N ||y_i - \\sum_{k=1}^N w_{ik} y_k||^2$$\n \n## Local Multidimension Scaling\n\nLocal multidimensional scaling for nonlineardimension reduction, graph drawing and proximity analysis - Chen, L. and Buja, A. (2008) - [PDF](https://pdfs.semanticscholar.org/183f/fb91f924ae7b938e4bfd1f5b2c3f8ef3b35c.pdf)\n\nLet $\\mathcal{N}$ the set of nearby pairs, suchat that $(i,i') \\in \\mathcal{N}$ if $i$ is among the K-nearest neighbors of $i'$ or vice-versa.\n\nThe goal if to find the point representations $z_i$ that minimize the stress function:\n$$S_L(Z) = \\sum_{(i,i') \\in \\mathcal{N}} (d_{ii'} - ||z_i - z_{i'}||)^2 - \\tau \\sum_{(i,i') \\notin \\mathcal{N}} ||z_i - z_{i'}||$$\n\nwith tuning parameters $\\tau$ and $K$. \nThe first term tries to preserve local structure in the data, while the second encourage representations of points that are non-neighbors to be farther appart. \nThe model is trained with gradient descent",
"_____no_output_____"
],
[
"# The Google PageRank Algorithm\n\nThe pagerank citation ranking: bringing order to the web - Page, L., Brin, S., Motwani, R. and Winograd, T. (1998) - [PDF](http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf)\n\nWe have $N$ webpages, and want to rank them in term of importance. A webpage is important if many webpages point to it. It also takes into account the importance of the linkin pages and the number of outgoing pages they have. \nLet $L$ a binary matrix, $L_{ij} = 1$ if page $j$ points to page $i$, $0$ otherwhise. \nLet $c_j = \\sum_{i=1}^N L_{ij}$ the number of pages pointed to by page $j$. \n\nThen the google PageRanks $p_i$ are defined recursively as:\n$$p_i = (1 - d) + d \\sum_{j=1}^N \\frac{L_{ij}}{c_j} p_j$$\nwith $d$ a positive constant that ensures that each page get a PageRank of at least $1-d$. \nWe can write it in matrix notiation:\n$$p = (1 - d)e + d LD_c^{-1}p$$\nwith $e$ a vector of $N$ ones and $D_c = \\text{diag}(c)$. If we had the constraint that the average PageRank is 1 ($e^Tp=N$), the equation can be rewritten as:\n$$p= \\left[ (1-d)ee^T/N + dLD_c^{-1} \\right] p$$\n$$p=Ap$$\n\nIt can be shown that this problem is the same as a random walk expressed by a Markov Chain, and so the largest eigenvalue of $A$ is $1$. \nThis means we can find $p$ with the power method. \n\nAlgorithm:\n- Start with some random $p_0$\n- Iterative until convergence:\n $$p_k \\leftarrow A p_{k-1}$$\n $$p_k \\leftarrow N \\frac{p_k}{e^Tp_k}$$",
"_____no_output_____"
]
],
[
[
"def page_rank(L, d=0.85, tol=1e-12):\n N = L.shape[0]\n c = np.sum(L, axis=0)\n e = np.ones(N)\n \n A = (1-d)/N + d * L * (1/c).reshape(1, N)\n \n pk = np.random.rand(N)\n \n its = 0\n while True:\n its += 1\n pk1 = A @ pk\n pk1 /= np.mean(pk1)\n \n if (pk - pk1) @ (pk - pk1) < tol:\n break\n pk = pk1\n \n print(metrics.tdist(pk1, A @ pk1))\n print('Niters:', its)\n\n \n return pk1\n \n\nL = np.array([\n [0, 0, 1, 0],\n [1, 0, 0, 0],\n [1, 1, 0, 1],\n [0, 0, 0, 0]\n])\n\np = page_rank(L)\nprint(p)",
"7.799005841601055e-07\nNiters: 30\n[1.49010703 0.78329573 1.57659724 0.15 ]\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a1fd25e4689c10aa5c0aa78affc0f3f95131f6d
| 10,029 |
ipynb
|
Jupyter Notebook
|
lectures/python/Funktionen.ipynb
|
rcmlz/edu
|
e3874e0367eafef7cad94dbaf900655ba5fa78b5
|
[
"MIT"
] | 1 |
2021-06-14T08:10:19.000Z
|
2021-06-14T08:10:19.000Z
|
lectures/python/Funktionen.ipynb
|
rcmlz/edu
|
e3874e0367eafef7cad94dbaf900655ba5fa78b5
|
[
"MIT"
] | null | null | null |
lectures/python/Funktionen.ipynb
|
rcmlz/edu
|
e3874e0367eafef7cad94dbaf900655ba5fa78b5
|
[
"MIT"
] | null | null | null | 25.325758 | 246 | 0.545019 |
[
[
[
"# Wie Sie dieses Notebook nutzen:\n- Führen Sie diesen Code Zelle für Zelle aus.\n- Um die Variableninhalte zu beobachten, nutzen Sie in Jupyter-Classic den \"Variable Inspektor\". Falls Sie dieses Notebook in Jupyter-Lab verwenden, nutzen Sie hierfür den eingebauten Debugger.\n- Wenn Sie \"Code Tutor\" zur Visualisierung des Ablaufes einzelner Zellen nutzen möchten, führen Sie einmalig die nachfolgende Zelle aus. Anschliessend schreiben Sie %%tutor in die erste Zeile jeder Zelle, die Sie visualisieren möchten.\n- Die Dokumentation von range(), len() und allen anderen eingebauten Funktionen finden Sie hier: https://docs.python.org/3/library/functions.html\n",
"_____no_output_____"
]
],
[
[
"# Für Code Tutor Visualisierungen\nfrom metakernel import register_ipython_magics\nregister_ipython_magics()",
"_____no_output_____"
]
],
[
[
"## Funktionen\n\n- Funktionen definiert man mit __def__ \n- Die korrekte Einrückung des Anweisungsblocks ist zu beachten.\n- Funktionen haben optional __Parameter__ und einen __Rückgabewert__. Letzterer wird mit \"return\" zurückgegeben. \n- Funktionen haben eine __Dokumentation__, die im Docstring hinterlegt ist.\n- Funktionen haben __Testfälle__, die automatisch ausgeführt werden können und die Funktion dokumentieren und die Verwendung demonstrieren.\n- Funktionen können Tesfälle im Docstring haben, aber auch auf viele andere Arten getestet werden, etwa mittels __assert__-Statements oder fortgeschritten mit [unittest](https://docs.python.org/3/library/unittest.html#module-unittest).\n\n### Definition\n\n```python\ndef name_der_funktion(parameter1, parameter2):\n \"\"\"\n Hier steht in einem Satz, was diese Funktion macht. \n \n Tests:\n >>> print(name_der_funktion(\"Rot\", \"Grün\"))\n Gelb\n \n >>> print(name_der_funktion(\"Rot\", \"Blau\"))\n Cyan\n \n Hier können weitere relevante Hinweise zur Nutzung gegeben werden.\n \"\"\"\n berechung 1\n berechung 2\n berechung ...\n ergebnis = berechung n\n\n return ergebnis\n\n```",
"_____no_output_____"
],
[
"### Anwendung\n\nFunktionen lassen sich sehr gut wiederverwenden, etwa in Schleifen. Dazu muss man die Funktion selbst nicht verstehen, wie sie intern funktioniert, sondern nur das Ergebnis.",
"_____no_output_____"
]
],
[
[
"def hash13(s):\n \"\"\"\n Erzeugt einen Hashwert des Stings s zwischen 0 und 13\n \n Tests:\n \n ToDo\n \n \"\"\"\n summe = 0\n i=0\n while i < len(s): \n j = ord(s[i])\n# print(\"Buchstabe: {} Code: {}\".format(s[i], j))\n summe = summe + j\n i+=1\n return summe % 13",
"_____no_output_____"
],
[
"passwoerter = [\"Hallo\", \"3re4kl4\", \"abcde\", \"rambo\"]\n\nfor p in passwoerter:\n h = hash13(p)\n print(\"{} - {}\".format(p, h))",
"_____no_output_____"
]
],
[
[
"### Gültigkeitsbereich (Scope)",
"_____no_output_____"
],
[
"Die an die Funktions-Parameter übergebenen Werte sind nur innerhalb des aktuellen Funktionsaufrufs gültig.",
"_____no_output_____"
]
],
[
[
"def funktions_name(parameter1, parameter2):\n ergebnis = parameter1 * parameter2\n return ergebnis",
"_____no_output_____"
],
[
"rueckgabewert = funktions_name(7,2)",
"_____no_output_____"
],
[
"print(rueckgabewert)",
"_____no_output_____"
]
],
[
[
"Ausserhalb einer Funktion sind die Parameter-Variablen nicht definiert",
"_____no_output_____"
]
],
[
[
"print(parameter1)",
"_____no_output_____"
]
],
[
[
"### Tests\n\nUm die eingebetteten Tests laufen zu lassen, muss die Funktion \"run_docstring_examples\" aus dem Packet \"doctest\" importiert werden.\n\n```python\nfrom doctest import run_docstring_examples\n```\n\nDann können durch ff. Aufruf die Tests, die im Docstring stehen, ausgeführt werden.\n\n```python\nrun_docstring_examples(name_der_funktion, locals())\n```",
"_____no_output_____"
]
],
[
[
"from doctest import run_docstring_examples",
"_____no_output_____"
],
[
"def mittelwert(zahlen):\n \"\"\"\n Berechnet das arithmetrische Mittel einer Liste von Zahlen.\n\n >>> print(mittelwert([20, 30, 70]))\n 40.0\n >>> print(mittelwert([0, 0, 0]))\n 0.0\n \n \"\"\"\n ergebnis = sum(zahlen) / len(zahlen)\n return ergebnis",
"_____no_output_____"
],
[
"run_docstring_examples(mittelwert, locals())",
"_____no_output_____"
],
[
"assert mittelwert([20, 30, 70]) == 40.0\nassert mittelwert([0, 0, 0]) == 0.0",
"_____no_output_____"
]
],
[
[
"Es können auch alle Testfälle in den Docstrings aller Funktionen einer .py-Datei gleichzeitig getestet werden.",
"_____no_output_____"
]
],
[
[
"def average(values):\n \"\"\"\n Computes the arithmetic mean of a list of numbers.\n\n >>> print(average([20, 30, 70]))\n 40.0\n >>> print(average([0, 0, 0]))\n 0.0\n \"\"\"\n return sum(values) / len(values)\n\ndef second_best(values):\n \"\"\"\n Computes the second highest value of a list of numbers.\n\n >>> print(second_best([20, 30, 70]))\n 30\n >>> print(second_best([0, 0, 0]))\n 0\n \"\"\"\n pass",
"_____no_output_____"
],
[
"import doctest\ndoctest.testmod() # automatically validate the embedded tests of all functions",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1fda3ab1d97d9a5fda07d558f20de23bed7fa6
| 66,529 |
ipynb
|
Jupyter Notebook
|
docs/assemble_vignette.ipynb
|
jormacmoo/ah
|
dde2f179d1ad70ededa491ba0472b38ca9ef758e
|
[
"RSA-MD"
] | null | null | null |
docs/assemble_vignette.ipynb
|
jormacmoo/ah
|
dde2f179d1ad70ededa491ba0472b38ca9ef758e
|
[
"RSA-MD"
] | 3 |
2019-10-03T18:34:59.000Z
|
2019-11-12T15:14:04.000Z
|
docs/assemble_vignette.ipynb
|
jormacmoo/ah
|
dde2f179d1ad70ededa491ba0472b38ca9ef758e
|
[
"RSA-MD"
] | 3 |
2019-07-26T19:28:39.000Z
|
2019-11-20T00:24:39.000Z
| 179.808108 | 12,444 | 0.884682 |
[
[
[
"# The Assemble Module\n\nThe `assemble` module of the `repytah` package finds and forms essential structure components. These components are the smallest building blocks that form the basis for every repeat in the song. The functions in this module ensure that each time step of a song is contained in at most one of the song's essential structure components by making none of the repeats overlap in time. When repeats overlap, these repeats undergo a process where they are divided until there are only non-overlapping pieces left. \n\nThe following functions are exported from the `assemble` module:\n\n- `breakup_overlaps_by_intersect`: Extracts repeats in **input\\_pattern\\_obj** that has the starting indices of the repeats into the essential structure components using **bw\\_vec** that has the lengths of each repeat.\n \n- `check_overlaps`: Compares every pair of groups, determining if there are any repeats in any pairs of the groups that overlap. \n\n- `hierarchical_structure`: Distills the repeats encoded in **matrix\\_no\\_overlaps** (and **key\\_no\\_overlaps**) to the essential structure components and then builds the hierarchical representation. Also optionally outputs visualizations of the hierarchical representations.\n\nThis module uses `find_all_repeats` from the [`search`](https://github.com/smith-tinkerlab/repytah/blob/main/docs/search_vignette.ipynb) module and `reconstruct_full_block` from the [`utilities`](https://github.com/smith-tinkerlab/repytah/blob/main/docs/utilities_vignette.ipynb) module. \n\nFor more in-depth information on the function calls, an example function pipeline is shown below. Functions from the current module are shown in red.\n\n<img src=\"pictures/function_pipeline.jpg\" width=\"380\">\n",
"_____no_output_____"
],
[
"## Import Modules",
"_____no_output_____"
]
],
[
[
"# NumPy is used for mathematical calculations\nimport numpy as np\n\n# Import other modules\nfrom inspect import signature \n\n# Import assemble\nfrom repytah.assemble import *",
"_____no_output_____"
]
],
[
[
"## breakup_overlaps_by_intersect \n\nThe purpose of this function is to create the essential structure components matrix. Essential structure components contain the smallest building blocks that form every repeat in the song. This matrix is created using **input\\_pattern\\_obj** that has the starting indices of the repeats and a vector **bw\\_vec** that has the lengths of each repeat. \n \nThe inputs for this function are: \n- **input_pattern_obj** (np.ndarray): A binary matrix with 1's where repeats begin and 0's otherwise\n- **bw_vec** (np.ndarray): Lengths of the repeats encoded in **input\\_pattern\\_obj**\n- **thresh_bw** (int): The smallest allowable repeat length \n\nThe outputs for this function are: \n- **pattern_no_overlaps** (np.ndarray): A binary matrix with 1's where repeats of essential structure components begin \n- **pattern_no_overlaps_key** (np.ndarray): A vector containing the lengths of the repeats of essential structure components in **pattern\\_no\\_overlaps** ",
"_____no_output_____"
]
],
[
[
"input_pattern_obj = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]])\nbw_vec = np.array([[3],\n [5],\n [8],\n [8]])\nthresh_bw = 0\nprint(\"The input array is: \\n\", input_pattern_obj)\nprint(\"The lengths of the repeats in the input array is: \\n\", bw_vec)\nprint(\"The smallest allowable repeat length is: \", thresh_bw)",
"The input array is: \n [[1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0]\n [0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]\n [1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]]\nThe lengths of the repeats in the input array is: \n [[3]\n [5]\n [8]\n [8]]\nThe smallest allowable repeat length is: 0\n"
],
[
"output = breakup_overlaps_by_intersect(input_pattern_obj, bw_vec, thresh_bw)\n\nprint(\"The output array is: \\n\", output[0])\nprint(\"The lengths of the repeats in the output array is: \\n\", output[1])",
"The output array is: \n [[1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0]\n [0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]]\nThe lengths of the repeats in the output array is: \n [[3]\n [5]]\n"
]
],
[
[
"## check_overlaps\n\nThis function compares every pair of groups and checks for overlaps between those pairs. To check every pair of groups, the function creates *compare\\_left* and *compare\\_right*. *compare\\_left* repeats each row the number of rows times, and *compare\\_right* repeats the whole input the number of rows times. By comparing each corresponding time step in *compare\\_left* and *compare\\_right*, it determines if there are any overlaps between groups.\n\nThe input for this function is: \n\n- **input_mat** (np.ndarray): An array waiting to be checked for overlaps\n\nThe output for this function is: \n- **overlaps\\_yn** (np.ndarray): A logical array where (i,j) = 1 if row i of input matrix and row j of input matrix overlap and (i, j) = 0 elsewhere",
"_____no_output_____"
]
],
[
[
"input_mat = np.array([[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]])\n\nprint(\"The input array waiting to be checked for overlaps is: \\n\", input_mat)",
"The input array waiting to be checked for overlaps is: \n [[0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]\n [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0]\n [0 0 0 1 1 1 1 1 0 0 0 1 1 1 1 1 0 0 0]\n [1 1 1 0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1]]\n"
],
[
"output = check_overlaps(input_mat)\n\nprint(\"The output logical array is: \\n\", output)",
"The output logical array is: \n [[False True True True]\n [False False True True]\n [False False False False]\n [False False False False]]\n"
]
],
[
[
"## hierarchical\\_structure\n\nThis function distills the repeats encoded in **matrix\\_no\\_overlaps** (and **key\\_no\\_overlaps**), which are the outputs from the [`remove_overlaps`](https://github.com/smith-tinkerlab/repytah/blob/main/docs/transform_vignette.ipynb) function from the transform module, to the essential structure components and then builds the hierarchical representation. It optionally shows visualizations of the hierarchical structure via the **vis** argument.\n\nThe inputs for this function are: \n\n- **matrix\\_no\\_overlaps** (np.array\\[int]): A binary matrix with 1's where repeats begin and 0's otherwise\n- **key\\_no\\_overlaps** (np.array\\[int]): A vector containing the lengths of the repeats encoded in **matrix_no_overlaps**\n- **sn** (int): The song length, which is the number of audio shingles\n- **vis** (bool): Shows visualizations if True (default = False)\n \nThe outputs for this function are: \n\n- **full_visualization** (np.array\\[int]): A binary matrix representation for **full_matrix_no_overlaps** with blocks of 1's equal to the length's prescribed in **full_key**\n \n- **full_key** (np.array\\[int]): A vector containing the lengths of the hierarchical structure encoded in **full_matrix_no_overlaps**\n \n- **full_matrix_no_overlaps** (np.array\\[int]): A binary matrix with 1's where hierarchical structure begins and 0's otherwise\n \n- **full_anno_lst** (np.array\\[int]): A vector containing the annotation markers of the hierarchical structure encoded in each row of **full_matrix_no_overlaps**",
"_____no_output_____"
]
],
[
[
"matrix_no_overlaps = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]])\nkey_no_overlaps = np.array([2])\nsn = 20\n\nprint(\"The matrix representation of the non-overlapping repeats is: \\n\", matrix_no_overlaps)\nprint(\"The lengths of the repeats in matrix_no_overlaps are: \\n\", key_no_overlaps)\nprint(\"The song length is: \\n\", sn)",
"The matrix representation of the non-overlapping repeats is: \n [[0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]]\nThe lengths of the repeats in matrix_no_overlaps are: \n [2]\nThe song length is: \n 20\n"
],
[
"output = hierarchical_structure(matrix_no_overlaps, key_no_overlaps, sn, vis=True)\n\nfull_visualization = output[0]\nfull_key = output[1]\nfull_matrix_no_overlaps = output[2]\nfull_anno_lst = output[3]\n\nprint(\"The binary matrix representation for the full_matrix_no_overlaps is: \\n\", full_visualization)\nprint(\"The vector containing the lengths of the hierarchical structure encoded in full_matrix_no_overlaps is: \\n\", full_key)\nprint(\"The binary matrix with 1's where hierarchical structure begins and 0's otherwise is: \\n\", full_matrix_no_overlaps)\nprint(\"The vector containing the annotation markers of the hierarchical structure encoded in each row \\n of full_matrix_no_overlaps is: \\n\", full_anno_lst)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1fdb828800a65673c5bb3e3f927869a285e4dd
| 27,657 |
ipynb
|
Jupyter Notebook
|
archive/geopandas.ipynb
|
bethanysciences/jupyter
|
e9af86d4515c9f404b478e3953c16d591a5ad63c
|
[
"MIT"
] | null | null | null |
archive/geopandas.ipynb
|
bethanysciences/jupyter
|
e9af86d4515c9f404b478e3953c16d591a5ad63c
|
[
"MIT"
] | null | null | null |
archive/geopandas.ipynb
|
bethanysciences/jupyter
|
e9af86d4515c9f404b478e3953c16d591a5ad63c
|
[
"MIT"
] | null | null | null | 117.689362 | 20,848 | 0.83158 |
[
[
[
"import sys\nimport geopandas as gpd # !{sys.executable} -m pip install geopandas\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n!python3 --version",
"_____no_output_____"
],
[
"# geo_df = geopandas.read_file(\"data/maps/usgeojson/gz_2010_us_040_00_5m.json\")\n# geo_df.head()\ngeo_df = gpd.read_file(\"data/maps/states_21basic/states.shp\")\ngeo_df[\"STATE_FIPS\"] = geo_df[\"STATE_FIPS\"].astype(np.int64)\n# geo_df.head()\n# geo_df.dtypes\n# geo_df.plot()",
"_____no_output_____"
],
[
"df = pd.read_csv(\"data/uspop-nst-2018.csv\", header=0)\ndf = df[['STATE_FIPS','POP_2018']]\n# df.dtypes\n# df.head()",
"_____no_output_____"
],
[
"merged = geo_df.join(df.set_index(\"STATE_FIPS\"), on=\"STATE_FIPS\")\nmerged.head()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1)\ndivider = make_axes_locatable(ax)\nmerged.plot(column='POP_2018',\n ax=ax,\n legend=True,\n legend_kwds={'label': \"Population by State (m)\",\n 'orientation': \"horizontal\"})",
"_____no_output_____"
],
[
"# merged.plot(column='POP_2018');\n# cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n# merged.plot(column='POP_2018', ax=ax, legend=True, cax=cax)\n\n# fig.savefig(\"leaddistribution.png\", dpi=300)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1fea0e482f5675356435aebec1530a95c299da
| 3,932 |
ipynb
|
Jupyter Notebook
|
alink/alink-demo.ipynb
|
cleuton/datascience
|
afbdc5a32bb3ce981b6616305e6a7da7a690bc7b
|
[
"Apache-2.0"
] | 29 |
2017-11-02T11:05:28.000Z
|
2022-03-01T13:52:48.000Z
|
alink/.ipynb_checkpoints/alink-demo-checkpoint.ipynb
|
cleuton/datascience
|
afbdc5a32bb3ce981b6616305e6a7da7a690bc7b
|
[
"Apache-2.0"
] | 2 |
2018-03-19T21:08:48.000Z
|
2020-05-22T09:40:35.000Z
|
alink/alink-demo.ipynb
|
cleuton/datascience
|
afbdc5a32bb3ce981b6616305e6a7da7a690bc7b
|
[
"Apache-2.0"
] | 18 |
2018-01-03T22:06:44.000Z
|
2022-02-25T17:47:47.000Z
| 25.044586 | 122 | 0.524161 |
[
[
[
"# Alink demonstration\nThis is a sample machine learning demo created with Alink from Alibaba. \n*Cleuton Sampaio*, [**Data Learning Hub**](http://datalearninghub.com)",
"_____no_output_____"
]
],
[
[
"#Imports\nfrom pyalink.alink import *",
"\nUse one of the following command to start using pyalink:\n使用以下一条命令来开始使用 pyalink:\n - useLocalEnv(parallelism, flinkHome=None, config=None)\n - useRemoteEnv(host, port, parallelism, flinkHome=None, localIp=\"localhost\", config=None)\nCall resetEnv() to reset environment and switch to another.\n使用 resetEnv() 来重置运行环境,并切换到另一个。\n\n"
],
[
"#Environment configuration\nuseLocalEnv(1, flinkHome=None, config=None)\n#parallism We will not use, but we could use a Flink cluster https://flink.apache.org/poweredby.html",
"JVM listening on 127.0.0.1:41249\n"
],
[
"#Preparing dataframe\n#we'll read a CSV dataset containing Weights and Heights of students. We'll try to predict Weight based on Height\nURL = \"./weight-height.csv\"\nSCHEMA_STR = \"weight double,height double\"\nmnist_data = CsvSourceBatchOp() \\\n .setFilePath(URL) \\\n .setSchemaStr(SCHEMA_STR)\\\n .setFieldDelimiter(\",\")\nspliter = SplitBatchOp().setFraction(0.8)\ntrain = spliter.linkFrom(mnist_data)\ntest = spliter.getSideOutput(0)",
"_____no_output_____"
],
[
"#Creating Linear Regression Model based on operator\nlr = LinearRegression().setFeatureCols([\"weight\"]).setLabelCol(\"height\").setPredictionCol(\"prediction\")",
"_____no_output_____"
],
[
"#Training and printing results\nmodel = lr.fit(train)\nmodel.transform(train).print()",
" weight height prediction\n0 61.0 1.62 1.610758\n1 61.0 1.63 1.610758\n2 68.0 1.68 1.681880\n3 73.0 1.75 1.732682\n4 67.0 1.68 1.671720\n.. ... ... ...\n234 66.0 1.67 1.661560\n235 50.0 1.51 1.498994\n236 70.0 1.70 1.702201\n237 58.0 1.59 1.580277\n238 73.0 1.75 1.732682\n\n[239 rows x 3 columns]\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a1ff60eebba6fc61a4334e5b1148877dccd3c76
| 22,127 |
ipynb
|
Jupyter Notebook
|
Course 1- NLP with Classification and Vector Space/Datasets/Week 3 Lab/NLP_C1_W3_lecture_nb_01.ipynb
|
ajitpande/NLP-Specialization
|
c66f37716cb63f7c9775f61b85b56d4d49ed736e
|
[
"Apache-2.0"
] | null | null | null |
Course 1- NLP with Classification and Vector Space/Datasets/Week 3 Lab/NLP_C1_W3_lecture_nb_01.ipynb
|
ajitpande/NLP-Specialization
|
c66f37716cb63f7c9775f61b85b56d4d49ed736e
|
[
"Apache-2.0"
] | null | null | null |
Course 1- NLP with Classification and Vector Space/Datasets/Week 3 Lab/NLP_C1_W3_lecture_nb_01.ipynb
|
ajitpande/NLP-Specialization
|
c66f37716cb63f7c9775f61b85b56d4d49ed736e
|
[
"Apache-2.0"
] | null | null | null | 26.659036 | 399 | 0.534731 |
[
[
[
"# Linear algebra in Python with NumPy\n",
"_____no_output_____"
],
[
"In this lab, you will have the opportunity to remember some basic concepts about linear algebra and how to use them in Python.\n\nNumpy is one of the most used libraries in Python for arrays manipulation. It adds to Python a set of functions that allows us to operate on large multidimensional arrays with just a few lines. So forget about writing nested loops for adding matrices! With NumPy, this is as simple as adding numbers.\n\nLet us import the `numpy` library and assign the alias `np` for it. We will follow this convention in almost every notebook in this course, and you'll see this in many resources outside this course as well.",
"_____no_output_____"
]
],
[
[
"import numpy as np # The swiss knife of the data scientist.",
"_____no_output_____"
]
],
[
[
"## Defining lists and numpy arrays",
"_____no_output_____"
]
],
[
[
"alist = [1, 2, 3, 4, 5] # Define a python list. It looks like an np array\nnarray = np.array([1, 2, 3, 4]) # Define a numpy array",
"_____no_output_____"
]
],
[
[
"Note the difference between a Python list and a NumPy array.",
"_____no_output_____"
]
],
[
[
"print(alist)\nprint(narray)\n\nprint(type(alist))\nprint(type(narray))",
"[1, 2, 3, 4, 5]\n[1 2 3 4]\n<class 'list'>\n<class 'numpy.ndarray'>\n"
]
],
[
[
"## Algebraic operators on NumPy arrays vs. Python lists\n\nOne of the common beginner mistakes is to mix up the concepts of NumPy arrays and Python lists. Just observe the next example, where we add two objects of the two mentioned types. Note that the '+' operator on NumPy arrays perform an element-wise addition, while the same operation on Python lists results in a list concatenation. Be careful while coding. Knowing this can save many headaches.",
"_____no_output_____"
]
],
[
[
"print(narray + narray)\nprint(alist + alist)",
"[2 4 6 8]\n[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n"
]
],
[
[
"It is the same as with the product operator, `*`. In the first case, we scale the vector, while in the second case, we concatenate three times the same list.",
"_____no_output_____"
]
],
[
[
"print(narray * 3)\nprint(alist * 3)",
"[ 3 6 9 12]\n[1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n"
]
],
[
[
"Be aware of the difference because, within the same function, both types of arrays can appear. \nNumpy arrays are designed for numerical and matrix operations, while lists are for more general purposes.",
"_____no_output_____"
],
[
"## Matrix or Array of Arrays\n\nIn linear algebra, a matrix is a structure composed of n rows by m columns. That means each row must have the same number of columns. With NumPy, we have two ways to create a matrix:\n* Creating an array of arrays using `np.array` (recommended). \n* Creating a matrix using `np.matrix` (still available but might be removed soon).\n\nNumPy arrays or lists can be used to initialize a matrix, but the resulting matrix will be composed of NumPy arrays only.",
"_____no_output_____"
]
],
[
[
"npmatrix1 = np.array([narray, narray, narray]) # Matrix initialized with NumPy arrays\nnpmatrix2 = np.array([alist, alist, alist]) # Matrix initialized with lists\nnpmatrix3 = np.array([narray, [1, 1, 1, 1], narray]) # Matrix initialized with both types\n\nprint(npmatrix1)\nprint(npmatrix2)\nprint(npmatrix3)",
"[[1 2 3 4]\n [1 2 3 4]\n [1 2 3 4]]\n[[1 2 3 4 5]\n [1 2 3 4 5]\n [1 2 3 4 5]]\n[[1 2 3 4]\n [1 1 1 1]\n [1 2 3 4]]\n"
]
],
[
[
"However, when defining a matrix, be sure that all the rows contain the same number of elements. Otherwise, the linear algebra operations could lead to unexpected results.\n\nAnalyze the following two examples:",
"_____no_output_____"
]
],
[
[
"# Example 1:\n\nokmatrix = np.array([[1, 2], [3, 4]]) # Define a 2x2 matrix\nprint(okmatrix) # Print okmatrix\nprint(okmatrix * 2) # Print a scaled version of okmatrix\nnp.dot(okmatrix,okmatrix)",
"[[1 2]\n [3 4]]\n[[2 4]\n [6 8]]\n"
],
[
"# Example 2:\n\nbadmatrix = np.array([[1, 2], [3, 4], [5,6, 7]]) # Define a matrix. Note the third row contains 3 elements\nprint(badmatrix) # Print the malformed matrix\nprint(badmatrix * 2) # It is supposed to scale the whole matrix",
"[list([1, 2]) list([3, 4]) list([5, 6, 7])]\n[list([1, 2, 1, 2]) list([3, 4, 3, 4]) list([5, 6, 7, 5, 6, 7])]\n"
]
],
[
[
"## Scaling and translating matrices\n\nNow that you know how to build correct NumPy arrays and matrices, let us see how easy it is to operate with them in Python using the regular algebraic operators like + and -. \n\nOperations can be performed between arrays and arrays or between arrays and scalars.",
"_____no_output_____"
]
],
[
[
"# Scale by 2 and translate 1 unit the matrix\nresult = okmatrix * 2 + 1 # For each element in the matrix, multiply by 2 and add 1\nprint(result)",
"[[3 5]\n [7 9]]\n"
],
[
"# Add two sum compatible matrices\nresult1 = okmatrix + okmatrix\nprint(result1)\n\n# Subtract two sum compatible matrices. This is called the difference vector\nresult2 = okmatrix - okmatrix\nprint(result2)",
"[[2 4]\n [6 8]]\n[[0 0]\n [0 0]]\n"
]
],
[
[
"The product operator `*` when used on arrays or matrices indicates element-wise multiplications.\nDo not confuse it with the dot product.",
"_____no_output_____"
]
],
[
[
"result = okmatrix * okmatrix # Multiply each element by itself\nprint(result)",
"[[ 1 4]\n [ 9 16]]\n"
]
],
[
[
"## Transpose a matrix\n\nIn linear algebra, the transpose of a matrix is an operator that flips a matrix over its diagonal, i.e., the transpose operator switches the row and column indices of the matrix producing another matrix. If the original matrix dimension is n by m, the resulting transposed matrix will be m by n.\n\n**T** denotes the transpose operations with NumPy matrices.",
"_____no_output_____"
]
],
[
[
"matrix3x2 = np.array([[1, 2], [3, 4], [5, 6]]) # Define a 3x2 matrix\nprint('Original matrix 3 x 2')\nprint(matrix3x2)\nprint('Transposed matrix 2 x 3')\nprint(matrix3x2.T)",
"Original matrix 3 x 2\n[[1 2]\n [3 4]\n [5 6]]\nTransposed matrix 2 x 3\n[[1 3 5]\n [2 4 6]]\n"
]
],
[
[
"However, note that the transpose operation does not affect 1D arrays.",
"_____no_output_____"
]
],
[
[
"nparray = np.array([1, 2, 3, 4]) # Define an array\nprint('Original array')\nprint(nparray)\nprint('Transposed array')\nprint(nparray.T)",
"Original array\n[1 2 3 4]\nTransposed array\n[1 2 3 4]\n"
]
],
[
[
"perhaps in this case you wanted to do:",
"_____no_output_____"
]
],
[
[
"nparray = np.array([[1, 2, 3, 4]]) # Define a 1 x 4 matrix. Note the 2 level of square brackets\nprint('Original array')\nprint(nparray)\nprint('Transposed array')\nprint(nparray.T)",
"Original array\n[[1 2 3 4]]\nTransposed array\n[[1]\n [2]\n [3]\n [4]]\n"
]
],
[
[
"## Get the norm of a nparray or matrix\n\nIn linear algebra, the norm of an n-dimensional vector $\\vec a$ is defined as:\n\n$$ norm(\\vec a) = ||\\vec a|| = \\sqrt {\\sum_{i=1}^{n} a_i ^ 2}$$\n\nCalculating the norm of vector or even of a matrix is a general operation when dealing with data. Numpy has a set of functions for linear algebra in the subpackage **linalg**, including the **norm** function. Let us see how to get the norm a given array or matrix:",
"_____no_output_____"
]
],
[
[
"nparray1 = np.array([1, 2, 3, 4]) # Define an array\nnorm1 = np.linalg.norm(nparray1)\n\nnparray2 = np.array([[1, 2], [3, 4]]) # Define a 2 x 2 matrix. Note the 2 level of square brackets\nnorm2 = np.linalg.norm(nparray2) \n\nprint(norm1)\nprint(norm2)",
"5.477225575051661\n5.477225575051661\n"
]
],
[
[
"Note that without any other parameter, the norm function treats the matrix as being just an array of numbers.\nHowever, it is possible to get the norm by rows or by columns. The **axis** parameter controls the form of the operation: \n* **axis=0** means get the norm of each column\n* **axis=1** means get the norm of each row. ",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. \n\nnormByCols = np.linalg.norm(nparray2, axis=0) # Get the norm for each column. Returns 2 elements\nnormByRows = np.linalg.norm(nparray2, axis=1) # get the norm for each row. Returns 3 elements\n\nprint(normByCols)\nprint(normByRows)",
"[3.74165739 3.74165739]\n[1.41421356 2.82842712 4.24264069]\n"
]
],
[
[
"However, there are more ways to get the norm of a matrix in Python.\nFor that, let us see all the different ways of defining the dot product between 2 arrays.",
"_____no_output_____"
],
[
"## The dot product between arrays: All the flavors\n\nThe dot product or scalar product or inner product between two vectors $\\vec a$ and $\\vec a$ of the same size is defined as:\n$$\\vec a \\cdot \\vec b = \\sum_{i=1}^{n} a_i b_i$$\n\nThe dot product takes two vectors and returns a single number.",
"_____no_output_____"
]
],
[
[
"nparray1 = np.array([0, 1, 2, 3]) # Define an array\nnparray2 = np.array([4, 5, 6, 7]) # Define an array\n\nflavor1 = np.dot(nparray1, nparray2) # Recommended way\nprint(flavor1)\n\nflavor2 = np.sum(nparray1 * nparray2) # Ok way\nprint(flavor2)\n\nflavor3 = nparray1 @ nparray2 # Geeks way\nprint(flavor3)\n\n# As you never should do: # Noobs way\nflavor4 = 0\nfor a, b in zip(nparray1, nparray2):\n flavor4 += a * b\n \nprint(flavor4)",
"38\n38\n38\n38\n"
]
],
[
[
"**We strongly recommend using np.dot, since it is the only method that accepts arrays and lists without problems**",
"_____no_output_____"
]
],
[
[
"norm1 = np.dot(np.array([[1, 2],[3, 4]]), np.array([[1, 2],[3, 4]])) # Dot product on nparrays\nnorm2 = np.dot([1, 2], [3, 4]) # Dot product on python lists\n\nprint(norm1, '=', norm2 )",
"[[ 7 10]\n [15 22]] = 11\n"
]
],
[
[
"Finally, note that the norm is the square root of the dot product of the vector with itself. That gives many options to write that function:\n\n$$ norm(\\vec a) = ||\\vec a|| = \\sqrt {\\sum_{i=1}^{n} a_i ^ 2} = \\sqrt {a \\cdot a}$$\n",
"_____no_output_____"
],
[
"## Sums by rows or columns\n\nAnother general operation performed on matrices is the sum by rows or columns.\nJust as we did for the function norm, the **axis** parameter controls the form of the operation:\n* **axis=0** means to sum the elements of each column together. \n* **axis=1** means to sum the elements of each row together.",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. \n\nsumByCols = np.sum(nparray2, axis=0) # Get the sum for each column. Returns 2 elements\nsumByRows = np.sum(nparray2, axis=1) # get the sum for each row. Returns 3 elements\n\nprint('Sum by columns: ')\nprint(sumByCols)\nprint('Sum by rows:')\nprint(sumByRows)",
"_____no_output_____"
]
],
[
[
"## Get the mean by rows or columns\n\nAs with the sums, one can get the **mean** by rows or columns using the **axis** parameter. Just remember that the mean is the sum of the elements divided by the length of the vector\n$$ mean(\\vec a) = \\frac {{\\sum_{i=1}^{n} a_i }}{n}$$",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, -1], [2, -2], [3, -3]]) # Define a 3 x 2 matrix. Chosen to be a matrix with 0 mean\n\nmean = np.mean(nparray2) # Get the mean for the whole matrix\nmeanByCols = np.mean(nparray2, axis=0) # Get the mean for each column. Returns 2 elements\nmeanByRows = np.mean(nparray2, axis=1) # get the mean for each row. Returns 3 elements\n\nprint('Matrix mean: ')\nprint(mean)\nprint('Mean by columns: ')\nprint(meanByCols)\nprint('Mean by rows:')\nprint(meanByRows)",
"Matrix mean: \n0.0\nMean by columns: \n[ 2. -2.]\nMean by rows:\n[0. 0. 0.]\n"
]
],
[
[
"## Center the columns of a matrix\n\nCentering the attributes of a data matrix is another essential preprocessing step. Centering a matrix means to remove the column mean to each element inside the column. The sum by columns of a centered matrix is always 0.\n\nWith NumPy, this process is as simple as this:",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, 1], [2, 2], [3, 3]]) # Define a 3 x 2 matrix. \n\nnparrayCentered = nparray2 - np.mean(nparray2, axis=0) # Remove the mean for each column\n\nprint('Original matrix')\nprint(nparray2)\nprint('Centered by columns matrix')\nprint(nparrayCentered)\n\nprint('New mean by column')\nprint(nparrayCentered.mean(axis=0))",
"Original matrix\n[[1 1]\n [2 2]\n [3 3]]\nCentered by columns matrix\n[[-1. -1.]\n [ 0. 0.]\n [ 1. 1.]]\nNew mean by column\n[0. 0.]\n"
]
],
[
[
"**Warning:** This process does not apply for row centering. In such cases, consider transposing the matrix, centering by columns, and then transpose back the result. \n\nSee the example below:",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. \n\nnparrayCentered = nparray2.T - np.mean(nparray2, axis=1) # Remove the mean for each row\nnparrayCentered = nparrayCentered.T # Transpose back the result\n\nprint('Original matrix')\nprint(nparray2)\nprint('Centered by columns matrix')\nprint(nparrayCentered)\n\nprint('New mean by rows')\nprint(nparrayCentered.mean(axis=1))",
"Original matrix\n[[1 3]\n [2 4]\n [3 5]]\nCentered by columns matrix\n[[-1. 1.]\n [-1. 1.]\n [-1. 1.]]\nNew mean by rows\n[0. 0. 0.]\n"
]
],
[
[
"Note that some operations can be performed using static functions like `np.sum()` or `np.mean()`, or by using the inner functions of the array",
"_____no_output_____"
]
],
[
[
"nparray2 = np.array([[1, 3], [2, 4], [3, 5]]) # Define a 3 x 2 matrix. \n\nmean1 = np.mean(nparray2) # Static way\nmean2 = nparray2.mean(axis=1) # Dinamic way\n\nprint(mean1, ' == ', mean2)",
"3.0 == [2. 3. 4.]\n"
]
],
[
[
"Even if they are equivalent, we recommend the use of the static way always.\n\n**Congratulations! You have successfully reviewed vector and matrix operations with Numpy!**",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a20042d26559ad7aaa0a6dd285d57e5702654f2
| 38,410 |
ipynb
|
Jupyter Notebook
|
Final Project - News Headline Generation and Validation/data/plots.ipynb
|
vighneshvnkt/deep-learning-assignments
|
61ef33fbcd228e06a0aa3059f6a849d33234a3ab
|
[
"MIT"
] | 29 |
2018-08-09T03:00:19.000Z
|
2021-11-08T09:31:03.000Z
|
Final Project - News Headline Generation and Validation/data/plots.ipynb
|
vighneshvnkt/deep-learning-assignments
|
61ef33fbcd228e06a0aa3059f6a849d33234a3ab
|
[
"MIT"
] | 2 |
2018-11-03T04:06:04.000Z
|
2018-11-30T19:32:51.000Z
|
Final Project - News Headline Generation and Validation/data/plots.ipynb
|
vighneshvnkt/deep-learning-assignments
|
61ef33fbcd228e06a0aa3059f6a849d33234a3ab
|
[
"MIT"
] | 7 |
2019-01-18T16:33:31.000Z
|
2021-09-11T13:27:14.000Z
| 100.026042 | 18,812 | 0.794793 |
[
[
[
"import csv\nimport os\nfrom collections import defaultdict\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom textblob import TextBlob, Word\nfrom gensim.scripts.glove2word2vec import glove2word2vec\nfrom nltk.stem import PorterStemmer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom gensim.models import KeyedVectors # load the Stanford GloVe model\nimport ftfy\nimport string\nfrom nltk.tokenize import word_tokenize\nimport pickle",
"C:\\Users\\Naini\\Anaconda3\\lib\\site-packages\\gensim\\utils.py:1197: UserWarning: detected Windows; aliasing chunkize to chunkize_serial\n warnings.warn(\"detected Windows; aliasing chunkize to chunkize_serial\")\n"
],
[
"df2 = pd.read_pickle('tokenized_data.pickle')",
"_____no_output_____"
],
[
"heads = df2['heads']\nheads",
"_____no_output_____"
],
[
"descs = df2['descs']\ndescs",
"_____no_output_____"
],
[
"i=0\nheads[i]",
"_____no_output_____"
],
[
"descs[i]\n",
"_____no_output_____"
],
[
"len(heads),len(set(heads))",
"_____no_output_____"
],
[
"len(descs),len(set(descs))\n",
"_____no_output_____"
],
[
"from collections import Counter\nfrom itertools import chain\ndef get_vocab(lst):\n vocabcount = Counter(w for txt in lst for w in txt.split())\n vocab = map(lambda x: x[0], sorted(vocabcount.items(), key=lambda x: -x[1]))\n return vocab, vocabcount",
"_____no_output_____"
],
[
"vocab, vocabcount = get_vocab(heads+descs)",
"_____no_output_____"
]
],
[
[
"### Most popular tokens ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nplt.plot([vocabcount[w] for w in vocab]);\nplt.gca().set_xscale(\"log\", nonposx='clip')\nplt.gca().set_yscale(\"log\", nonposy='clip')\nplt.title('word distribution in headlines and discription')\nplt.xlabel('rank')\nplt.ylabel('total appearances');",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a2017ef47869e853baab0be700b5089e6fde528
| 4,161 |
ipynb
|
Jupyter Notebook
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/project01- Automatic Review Analyzer/sentiment_analysis/part_1/.ipynb_checkpoints/01_hinge_loss-checkpoint (1).ipynb
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | 1 |
2020-09-29T17:29:34.000Z
|
2020-09-29T17:29:34.000Z
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/project01- Automatic Review Analyzer/sentiment_analysis/part_1/.ipynb_checkpoints/adr_project1-checkpoint.ipynb
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null |
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/project01- Automatic Review Analyzer/sentiment_analysis/part_1/.ipynb_checkpoints/adr_project1-checkpoint.ipynb
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null | 32.76378 | 360 | 0.596732 |
[
[
[
"# Hinge Loss\nIn this project you will be implementing linear classifiers beginning with the Perceptron algorithm. You will begin by writing your loss function, a hinge-loss function. For this function you are given the parameters of your model θ and θ0\n\nAdditionally, you are given a feature matrix in which the rows are feature vectors and the columns are individual features, and a vector of labels representing the actual sentiment of the corresponding feature vector.",
"_____no_output_____"
],
[
"1. First, implement the basic hinge loss calculation on a single data-point. Instead of the entire feature matrix, you are given one row, representing the feature vector of a single data sample, and its label of +1 or -1 representing the ground truth sentiment of the data sample\n\ndef hinge_loss_single(feature_vector, label, theta, theta_0):\n\n feature_vector - A numpy array describing the given data point.\n label - A real valued number, the correct classification of the data\n point.\n theta - A numpy array describing the linear classifier.\n theta_0 - A real valued number representing the offset parameter.\nReturns: A real number representing the hinge loss associated with the\n given data point and parameters.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfeature_vector= np.array([1, 2])\nlabel= 1\ntheta= np.array([-1, 1])\ntheta_0= -0.2",
"_____no_output_____"
],
[
"def hinge_loss_single(feature_vector, label, theta, theta_0):\n if (label* np.dot(feature_vector, theta) + theta_0) >=1:\n loss= 0\n else: \n loss= 1 - (label* (np.dot(theta, feature_vector) + theta_0))\n return loss",
"_____no_output_____"
]
],
[
[
"# The Complete Hinge Loss\nNow it's time to implement the complete hinge loss for a full set of data. Your input will be a full feature matrix this time, and you will have a vector of corresponding labels. The kth row of the feature matrix corresponds to the kth element of the labels vector. This function should return the appropriate loss of the classifier on the given dataset.",
"_____no_output_____"
]
],
[
[
"def hinge_loss_full(feature_matrix, labels, theta, theta_0):\n total_loss=[]\n for i, x in enumerate(feature_matrix):\n if (labels[i]*(np.dot(theta, feature_matrix[i]) + theta_0)) >= 1:\n loss= 0\n else: \n loss= 1 - (labels[i]*(np.dot(theta, feature_matrix[i])+ theta_0))\n total_loss.append(loss)\n return sum(total_loss)/len(feature_matrix)",
"_____no_output_____"
],
[
"feature_matrix = np.array([[1, 2], [1, -1]])\nlabel, theta, theta_0 = np.array([1, 1]), np.array([-1, 1]), -0.2\nhinge_loss_full(feature_matrix, label, theta, theta_0)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a201a4db66965100dbfedb4d65ac87c0faead17
| 5,999 |
ipynb
|
Jupyter Notebook
|
notebooks/e_extra/pytorch_image_filtering_ml/Chapter 8 -- Feedforward.ipynb
|
primer-computational-mathematics/book
|
305941b4f1fc4f15d472fd11f2c6e90741fb8b64
|
[
"MIT"
] | 3 |
2020-08-02T07:32:14.000Z
|
2021-11-16T16:40:43.000Z
|
notebooks/e_extra/pytorch_image_filtering_ml/Chapter 8 -- Feedforward.ipynb
|
primer-computational-mathematics/book
|
305941b4f1fc4f15d472fd11f2c6e90741fb8b64
|
[
"MIT"
] | 5 |
2020-07-27T10:45:26.000Z
|
2020-08-12T15:09:14.000Z
|
notebooks/e_extra/pytorch_image_filtering_ml/Chapter 8 -- Feedforward.ipynb
|
primer-computational-mathematics/book
|
305941b4f1fc4f15d472fd11f2c6e90741fb8b64
|
[
"MIT"
] | 4 |
2020-08-05T13:57:32.000Z
|
2022-02-02T19:03:57.000Z
| 22.468165 | 151 | 0.471745 |
[
[
[
"(Feedforward)=\n# Chapter 8 -- Feedforward",
"_____no_output_____"
],
[
"Let's take a look at how feedforward is processed in a three layers neural net.",
"_____no_output_____"
],
[
"<img src=\"images/feedForward.PNG\" width=\"500\">\nFigure 8.1",
"_____no_output_____"
],
[
"From the figure 8.1 above, we know that the two input values for the first and the second neuron in the hidden layer are",
"_____no_output_____"
],
[
"$$\nh_1^{(1)} = w_{11}^{(1)}*x_1 + w_{21}^{(1)}*x_2 + w_{31}^{(1)}*x_3+ w_{41}^{(1)}*1\n$$ (eq8_1)",
"_____no_output_____"
],
[
"$$\nh_2^{(1)} = w_{12}^{(2)}*x_1 + w_{22}^{(2)}*x_2 + w_{32}^{(2)}*x_3+ w_{42}^{(2)}*1\n$$ (eq8_2)",
"_____no_output_____"
],
[
"where the $w^{(n)}_{4m}$ term is the bias term in the form of weight.",
"_____no_output_____"
],
[
"To simplify the two equations above, we can use matrix",
"_____no_output_____"
],
[
"$$\nH^{(1)} = [h_1^{(1)} \\;\\; h_2^{(1)}] = [x_1 \\;\\; x_2 \\;\\; x_3 \\;\\; 1]\n\\begin{bmatrix}\nw^{(1)}_{11} & w^{(1)}_{12} \\\\\nw^{(1)}_{21} & w^{(1)}_{22} \\\\\nw^{(1)}_{31} & w^{(1)}_{32} \\\\\nw^{(1)}_{41} & w^{(1)}_{4\n2}\n\\end{bmatrix}\n$$ (eq8_3)",
"_____no_output_____"
],
[
"Similarly, the two outputs from the input layer can be the inputs for the hidden layer",
"_____no_output_____"
],
[
"$$\n\\sigma(H^{(1)}) = [\\sigma(h_1^{(1)}) \\;\\; \\sigma( h_2^{(1)})]\n$$ (eq8_4)",
"_____no_output_____"
],
[
"This in turns can be the input values for the next layer (output layer)",
"_____no_output_____"
],
[
"$$\nh^{(2)} = w^{(2)}_{11}* \\sigma(h^{(1)}_1)+w^{(2)}_{21} *\\sigma(h^{(1)}_2)+w^{(2)}_{31}*1 \n$$ (eq8_5)",
"_____no_output_____"
],
[
"Again, we can simplify this equation by using matrix",
"_____no_output_____"
],
[
"$$\nH^{(2)} = [\\sigma(h_1^{(1)}) \\;\\;\\sigma(h_2^{(1)}) \\; \\; 1]\n\\begin{bmatrix}\nw^{(2)}_{11} \\\\\nw^{(2)}_{21} \\\\\nw^{(2)}_{31} \n\\end{bmatrix}\n$$ (eq8_6)",
"_____no_output_____"
],
[
"Then we send this value $h^{(2)}$ into the sigma function in the final output layer to obtain the prediction",
"_____no_output_____"
],
[
"$$\n \\hat{y} = \\sigma(h^{(2)})\n$$ (eq8_7)",
"_____no_output_____"
],
[
"To put all the equation of three layers together, we can have",
"_____no_output_____"
],
[
"$$\n\\hat{y} = \\sigma(\\sigma([x_1 \\;\\; x_2 \\;\\; x_3 \\;\\; 1]\n\\begin{bmatrix}\nw^{(1)}_{11} & w^{(1)}_{12} \\\\\nw^{(1)}_{21} & w^{(1)}_{22} \\\\\nw^{(1)}_{31} & w^{(1)}_{32} \\\\\nw^{(1)}_{41} & w^{(1)}_{42}\n\\end{bmatrix}) \n\\begin{bmatrix}\nw^{(2)}_{11} \\\\\nw^{(2)}_{21} \\\\\nw^{(2)}_{31} \n\\end{bmatrix})\n$$ (eq8_8)",
"_____no_output_____"
],
[
"Or we can simplify it to be",
"_____no_output_____"
],
[
"$$\n \\hat{y} = \\sigma(\\sigma(xW^{(1)})W^{(2)})\n$$ (eq8_9)",
"_____no_output_____"
],
[
"This is the feedforward process: based on the known weights $W$ and input $x$ to calculate the prediction $\\hat{y}$.",
"_____no_output_____"
],
[
"Finally, it's easy to write code computing the output from a Network instance. We begin by defining the sigmoid function:",
"_____no_output_____"
]
],
[
[
"def sigmoid(z):\n return 1.0/(1.0+np.exp(-z))",
"_____no_output_____"
]
],
[
[
"Note that when the input z is a vector or Numpy array, Numpy automatically applies the function sigmoid elementwise, that is, in vectorized form.",
"_____no_output_____"
],
[
"We then add a feedforward method to the Network class, which, given an input a for the network, returns the corresponding output:",
"_____no_output_____"
]
],
[
[
"def feedforward(self, a):\n \"\"\"Returning the output a, which is the input to the next layer\"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a)+b)\n return a",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a201d75f65daa11fd526936f2b8096d6a5f9e34
| 398 |
ipynb
|
Jupyter Notebook
|
Allfiles/00/artifacts/environment-setup/notebooks/Setup - Probe.ipynb
|
v-harsha/DP203
|
65b3dad97fb6fcf850f57520e5191a282eafdb21
|
[
"MIT"
] | 180 |
2021-07-27T01:50:32.000Z
|
2022-03-30T19:44:44.000Z
|
Allfiles/00/artifacts/environment-setup/notebooks/Setup - Probe.ipynb
|
v-harsha/DP203
|
65b3dad97fb6fcf850f57520e5191a282eafdb21
|
[
"MIT"
] | 116 |
2021-07-25T09:08:49.000Z
|
2022-03-30T15:13:51.000Z
|
Allfiles/00/artifacts/environment-setup/notebooks/Setup - Probe.ipynb
|
v-harsha/DP203
|
65b3dad97fb6fcf850f57520e5191a282eafdb21
|
[
"MIT"
] | 170 |
2021-07-27T07:30:04.000Z
|
2022-03-31T20:07:59.000Z
| 17.304348 | 52 | 0.464824 |
[
[
[
"import pip\ndisplay(pip.get_installed_distributions())",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a20349f1f7a047a86da728340510a8c77383474
| 8,660 |
ipynb
|
Jupyter Notebook
|
hpc/miniprofiler/English/Fortran/jupyter_notebook/profiling-fortran-lab2.ipynb
|
Anish-Saxena/gpubootcamp
|
469ed5ed1fbfdaee780cec90e2fb59e5bba089b5
|
[
"Apache-2.0"
] | 1 |
2022-02-20T12:33:03.000Z
|
2022-02-20T12:33:03.000Z
|
hpc/miniprofiler/English/Fortran/jupyter_notebook/profiling-fortran-lab2.ipynb
|
hiter-joe/gpubootcamp
|
bc358e6af3a06f5f554ec93cbf402da82c56e93d
|
[
"Apache-2.0"
] | null | null | null |
hpc/miniprofiler/English/Fortran/jupyter_notebook/profiling-fortran-lab2.ipynb
|
hiter-joe/gpubootcamp
|
bc358e6af3a06f5f554ec93cbf402da82c56e93d
|
[
"Apache-2.0"
] | 1 |
2021-03-02T17:24:29.000Z
|
2021-03-02T17:24:29.000Z
| 42.871287 | 463 | 0.655774 |
[
[
[
"In this lab, we will optimize the weather simulation application written in Fortran (if you prefer to use C++, click [this link](../../C/jupyter_notebook/profiling-c.ipynb)). \n\nLet's execute the cell below to display information about the GPUs running on the server by running the pgaccelinfo command, which ships with the PGI compiler that we will be using. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.",
"_____no_output_____"
]
],
[
[
"!pgaccelinfo",
"_____no_output_____"
]
],
[
[
"## Exercise 2 \n\n### Learning objectives\nLearn how to identify and parallelise the computationally expensive routines in your application using OpenACC compute constructs (A compute construct is a parallel, kernels, or serial construct.). In this exercise you will:\n\n- Implement OpenACC parallelism using parallel directives to parallelise the serial application\n- Learn how to compile your parallel application with PGI compiler\n- Benchmark and compare the parallel version of the application with the serial version\n- Learn how to interpret PGI compiler feedback to ensure the applied optimization were successful\n",
"_____no_output_____"
],
[
"From the top menu, click on *File*, and *Open* `miniWeather_openacc.f90` and `Makefile` from the current directory at `Fortran/source_code/lab2` directory and inspect the code before running below cells.We have already added OpenACC compute directives (`!$acc parallel loop`) around the expensive routines (loops) in the code.\n\nOnce done, compile the code with `make`. View the PGI compiler feedback (enabled by adding `-Minfo=accel` flag) and investigate the compiler feedback for the OpenACC code. The compiler feedback provides useful information about applied optimizations.",
"_____no_output_____"
]
],
[
[
"!cd ../source_code/lab2 && make clean && make",
"_____no_output_____"
]
],
[
[
"Let's inspect part of the compiler feedback and see what it's telling us.\n\n<img src=\"images/ffeedback1-0.png\">\n\n- Using `-ta=tesla:managed`, instruct the compiler to build for an NVIDIA Tesla GPU using \"CUDA Managed Memory\"\n- Using `-Minfo` command-line option, we will see all output from the compiler. In this example, we use `-Minfo=accel` to only see the output corresponding to the accelerator (in this case an NVIDIA GPU).\n- The first line of the output, `compute_tendencies_x`, tells us which function the following information is in reference to.\n- The line starting with 247 and 252, shows we created a parallel OpenACC loop. This loop is made up of gangs (a grid of blocks in CUDA language) and vector parallelism (threads in CUDA language) with the vector size being 128 per gang. \n- The line starting with 249 and 252, `Loop is parallelizable` of the output tells us that on these lines in the source code, the compiler found loops to accelerate.\n- The rest of the information concerns data movement. Compiler detected possible need to move data and handled it for us. We will get into this later in this lab.\n\nIt is very important to inspect the feedback to make sure the compiler is doing what you have asked of it.\n\nNow, **Run** the application for small values of `nx_glob`,`nz_glob`, and `sim_time`: **40, 20, 1000**. ",
"_____no_output_____"
]
],
[
[
"!cd ../source_code/lab2 && ./miniWeather",
"_____no_output_____"
]
],
[
[
"**Profile** it with Nsight Systems command line `nsys`.",
"_____no_output_____"
]
],
[
[
"!cd ../source_code/lab2 && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o miniWeather_3 ./miniWeather",
"_____no_output_____"
]
],
[
[
"You can see that the changes made actually slowed down the code and it runs slower compared to the non-accelerated CPU only version. Let's checkout the profiler's report. [Download the profiler output](../source_code/lab2/miniWeather_3.qdrep) and open it via the GUI. \n\nFrom the \"timeline view\" on the top pane, double click on the \"CUDA\" from the function table on the left and expand it. Zoom in on the timeline and you can see a pattern similar to the screenshot below. The blue boxes are the compute kernels and each of these groupings of kernels is surrounded by purple and teal boxes (annotated with red color) representing data movements. **Screenshots represents profiler report for the values of 400,200,1500.**\n\n<img src=\"images/nsys_slow.png\" width=\"80%\" height=\"80%\">\n\nLet's hover your mouse over kernels (blue boxes) one by one from each row and checkout the provided information.\n\n<img src=\"images/occu-1.png\" width=\"60%\" height=\"60%\">\n\n**Note**: In the next two exercises, we start optimizing the application by improving the occupancy and reducing data movements.",
"_____no_output_____"
],
[
"## Post-Lab Summary\n\nIf you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below.",
"_____no_output_____"
]
],
[
[
"%%bash\ncd ..\nrm -f openacc_profiler_files.zip\nzip -r openacc_profiler_files.zip *",
"_____no_output_____"
]
],
[
[
"**After** executing the above zip command, you should be able to download the zip file [here](../openacc_profiler_files.zip).",
"_____no_output_____"
],
[
"-----\n\n# <p style=\"text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em\"> <a href=../../profiling_start.ipynb>HOME</a> <span style=\"float:center\"> <a href=profiling-fortran-lab3.ipynb>NEXT</a></span> </p>\n\n-----",
"_____no_output_____"
],
[
"# Links and Resources\n\n[OpenACC API Guide](https://www.openacc.org/sites/default/files/inline-files/OpenACC%20API%202.6%20Reference%20Guide.pdf)\n\n[NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/)\n\n[CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads)\n\n**NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems).\n\nDon't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.\n\n--- \n\n## Licensing \n\nThis material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0). ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a203a4aa98d87b58cd0bee18587674848c61815
| 29,373 |
ipynb
|
Jupyter Notebook
|
model.ipynb
|
ThiruRJST/Submarine-Surfers
|
86582b3ef8ca8f2d15a25a51d4ed9b660ef678a0
|
[
"MIT"
] | 2 |
2020-11-07T01:58:20.000Z
|
2020-11-09T07:13:24.000Z
|
model.ipynb
|
ThiruRJST/Submarine-Surfers
|
86582b3ef8ca8f2d15a25a51d4ed9b660ef678a0
|
[
"MIT"
] | null | null | null |
model.ipynb
|
ThiruRJST/Submarine-Surfers
|
86582b3ef8ca8f2d15a25a51d4ed9b660ef678a0
|
[
"MIT"
] | 1 |
2020-10-31T07:28:32.000Z
|
2020-10-31T07:28:32.000Z
| 75.899225 | 204 | 0.742485 |
[
[
[
"import matplotlib\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport io\nimport imageio\nimport glob\nimport scipy.misc\nimport numpy as np\nfrom six import BytesIO\nfrom PIL import Image, ImageDraw, ImageFont\nfrom IPython.display import display, Javascript\nfrom IPython.display import Image as IPyImage\n\nimport tensorflow as tf\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import config_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### Function to run inference on a single image",
"_____no_output_____"
]
],
[
[
"def run_inference_single_image(model,image):\n image = np.asarray(image)\n input_tensor = tf.convert_to_tensor(image)\n input_tensor = input_tensor[tf.newaxis,...]\n model_fn = model.signatures[\"serving_default\"]\n output = model_fn(input_tensor)\n\n num_detections = int(output.pop(\"num_detections\"))\n output = {key:value[0, :num_detections].numpy() for key,value in output.items()}\n output['num_detections'] = num_detections\n output['detection_classes']=output['detection_classes'].astype(np.int64)\n\n \n return output",
"_____no_output_____"
],
[
"LABEL_PATH = '/home/thirumalaikumar/Intern Projects/TrafficControl/content/sub_surf/gate_label_map.pbtxt'\nci = label_map_util.create_category_index_from_labelmap(LABEL_PATH,use_display_name=True)",
"_____no_output_____"
],
[
"def show_inference(model,frame):\n image_np = np.array(frame)\n output = run_inference_single_image(model,image_np)\n\n classes = np.squeeze(output['detection_classes'])#class to which the object belongs to\n boxes = np.squeeze(output['detection_boxes'])#box coords\n scores = np.squeeze(output['detection_scores'])#prob score of the model\n\n#condition for Detecting only the gate \n indices = np.argwhere(classes==2)\n boxes = np.squeeze(boxes[indices])\n classes = np.squeeze(classes[indices])\n scores = np.squeeze(scores[indices])\n\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np,\n boxes,\n classes,\n scores,\n ci,\n use_normalized_coordinates=True,\n max_boxes_to_draw=100,\n min_score_thresh=.8,\n agnostic_mode=False,\n \n )\n return image_np",
"_____no_output_____"
],
[
"model = tf.saved_model.load(\"/home/thirumalaikumar/Intern Projects/TrafficControl/content/sub_surf/saved_model\")",
"4) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_92004) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_bifpn_layer_call_and_return_conditional_losses_64523) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_EfficientDet-D0_layer_call_and_return_conditional_losses_85999) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\nWARNING:tensorflow:Importing a function (__inference_call_func_20892) with ops with custom gradients. Will likely fail if a gradient is requested.\n"
],
[
"import cv2",
"_____no_output_____"
],
[
"def post_process_bb(model,img,threshold=0.5):\n img = cv2.imread(img)\n output = run_inference_single_image(model,img)\n assert len(output['detection_boxes']) == len(output['detection_scores'])\n max_score_index = np.squeeze(np.argwhere(output['detection_scores']>=threshold))\n detection_score = output['detection_scores'][max_score_index]\n box_coords = output['detection_boxes'][max_score_index]\n detecction_class = output['detection_classes'][max_score_index]\n return img,detection_score,detecction_class,box_coords",
"_____no_output_____"
],
[
"def midpoint():\n img,score,classes,coords = post_process_bb(model,\"/home/thirumalaikumar/hackathon/images_1005-20201030T064700Z-001/images_1005/1_458/download (37).jpg\")\n im_width = img.shape[0]\n im_height = img.shape[1]\n try:\n coords = coords.reshape(1,coords.shape[0])\n except ValueError as v:\n print(\"Your Object detector has detected more than 1 BB\")\n print(coords.shape)\n for i in range(len(coords)):\n x1,y1,x2,y2 = coords[i]\n (left, right, top, bottom) = (y1 * im_width, y2 * im_width, \n x1 * im_height, x2 * im_height)\n p1 = (int(left), int(top))\n p2 = (int(right), int(bottom))\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n _ = cv2.rectangle(img, p1, p2, (255,0,0), 15)\n \n x_center = int((left+right)/2)\n y_center = int(bottom)\n\n x2_center = int((left+right)/2)\n y2_center = int(top)\n \n center1 = (x_center, y_center)\n center2 = (x2_center,y2_center)\n res = tuple(map(lambda i, j: i + j, center1, center2))\n res = tuple(map(lambda i: i / 2, res))\n res = tuple(map(lambda i: int(i) , res))\n img1 = cv2.circle(img,res, 15, (0, 255, 0), -1)\n #cv2.putText(img1,\"Gate\",p1, cv2.FONT_HERSHEY_SIMPLEX,1, (255, 255, 255), 2, cv2.LINE_AA)\n \n return img1",
"_____no_output_____"
],
[
"plot.imshow(midpoint())",
"_____no_output_____"
],
[
"vid = cv2.VideoCapture(0) \n \nwhile(True): \n \n # Capture the video frame \n # by frame \n ret, frame = vid.read() \n imagen = show_inference(model,frame)\n # Display the resulting frame \n cv2.imshow('frame', cv2.resize(imagen,(800,600))) \n \n # the 'q' button is set as the \n # quitting button you may use any \n # desired button of your choice \n if cv2.waitKey(1) & 0xFF == ord('q'): \n break\n \n# After the loop release the cap object \nvid.release() \n# Destroy all the windows \ncv2.destroyAllWindows() ",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a203abca3fa76a297e874bff1d5f05cd39f8914
| 13,438 |
ipynb
|
Jupyter Notebook
|
zp_database/make_zp/create_hard_xray_zp.ipynb
|
sajid-ali-nu/zone_plate_geometry
|
c50afd575a6e733fce265db2ab8cc1c7b21cfe69
|
[
"MIT"
] | null | null | null |
zp_database/make_zp/create_hard_xray_zp.ipynb
|
sajid-ali-nu/zone_plate_geometry
|
c50afd575a6e733fce265db2ab8cc1c7b21cfe69
|
[
"MIT"
] | null | null | null |
zp_database/make_zp/create_hard_xray_zp.ipynb
|
sajid-ali-nu/zone_plate_geometry
|
c50afd575a6e733fce265db2ab8cc1c7b21cfe69
|
[
"MIT"
] | null | null | null | 29.213043 | 363 | 0.545394 |
[
[
[
"This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numba import njit\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm, trange\n\nimport urllib,os,pickle\nfrom os.path import dirname as up",
"_____no_output_____"
]
],
[
[
"Importing all the required libraries. Numba is used to optimize functions.",
"_____no_output_____"
]
],
[
[
"def repeat_pattern(X,Y,Z):\n flag_ = np.where((X>0)&(Y>0))\n flag1 = np.where((X>0)&(Y<0))\n flag1 = tuple((flag1[0][::-1],flag1[1]))\n Z[flag1] = Z[flag_]\n flag2 = np.where((X<0)&(Y>0))\n flag2 = tuple((flag2[0],flag2[1][::-1]))\n Z[flag2] = Z[flag_]\n flag3 = np.where((X<0)&(Y<0))\n flag3 = tuple((flag3[0][::-1],flag3[1][::-1]))\n Z[flag3] = Z[flag_]\n return Z",
"_____no_output_____"
]
],
[
[
"*repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.\n* *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.\n* *Outputs* : Z itself is modified to reflect the repition.",
"_____no_output_____"
]
],
[
[
"def get_property(mat,energy):\n url = \"http://henke.lbl.gov/cgi-bin/pert_cgi.pl\"\n data = {'Element':str(mat), 'Energy':str(energy), 'submit':'Submit Query'}\n data = urllib.parse.urlencode(data)\n data = data.encode('utf-8')\n req = urllib.request.Request(url, data)\n resp = urllib.request.urlopen(req)\n respDat = resp.read()\n response = respDat.split()\n d = b'g/cm^3<li>Delta'\n i = response.index(d)\n delta = str(response[i+2])[:str(response[i+2]).index('<li>Beta')][2:]\n beta = str(response[i+4])[2:-1]\n return float(delta),float(beta)\n",
"_____no_output_____"
]
],
[
[
"*get_property* : gets delta and beta for a given material at the specified energy from Henke et al.\n* *Inputs* : mat - material, energy - energy in eV\n* *Outputs* : delta, beta",
"_____no_output_____"
]
],
[
[
"@njit # equivalent to \"jit(nopython=True)\".\ndef partial_fill(x,y,step,r1,r2,n):\n x_ = np.linspace(x-step/2,x+step/2,n)\n y_ = np.linspace(y-step/2,y+step/2,n)\n cnts = 0\n for i in range(n):\n for j in range(n):\n z = (x_[i] * x_[i] + y_[j] * y_[j])\n if r1*r1 < z < r2*r2:\n cnts += 1\n fill_factor = cnts/(n*n)\n return fill_factor",
"_____no_output_____"
]
],
[
[
"*partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.\n* *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution\n* *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it ",
"_____no_output_____"
]
],
[
[
"#find the radius of the nth zone\ndef zone_radius(n,f,wavel):\n return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)",
"_____no_output_____"
]
],
[
[
"*zone_radius* : functon to find the radius of a zone given the zone number and wavelength\n* *Inputs* : n - zone number, f - focal length, wavel - wavelength\n* *Outputs* : radius of the zone as specified by the inputs",
"_____no_output_____"
]
],
[
[
"def make_quadrant(X,Y,flag,r1,r2,step,n,zone_number):\n z = np.zeros(np.shape(X))\n Z = np.sqrt(X**2+Y**2)\n for l in range(len(flag[0])):\n i = flag[0][l]\n j = flag[1][l]\n if 0.75*r1< Z[i][j] < 1.25*r2:\n x1 = X[i][j]\n y1 = Y[i][j]\n z[i][j] = partial_fill(x1,y1,step,r1,r2,n)\n z[tuple((flag[1],flag[0]))] = z[tuple((flag[0],flag[1]))]\n return z",
"_____no_output_____"
]
],
[
[
"*make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number\n* *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function \n* *Outputs* : z - output pattern with one quadrant filled.",
"_____no_output_____"
]
],
[
[
"#2D ZP\ndef make_ring(i):\n print(i)\n r1 = radius[i-1]\n r2 = radius[i]\n n = 250\n ring = make_quadrant(X,Y,flag,r1,r2,step_xy,n,zone_number = i)\n ring = repeat_pattern(X,Y,ring)\n ring_ = np.where(ring!=0)\n vals_ = ring[ring_]\n np.save('ring_locs_'+str(i)+'.npy',ring_)\n np.save('ring_vals_'+str(i)+'.npy',vals_)\n return",
"_____no_output_____"
]
],
[
[
"*make_ring* : function used to create a ring given the relevant parameters\n* *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function \n* *Outputs* : None. Saves the rings to memory. ",
"_____no_output_____"
]
],
[
[
"mat = 'Au'\nenergy = 10000 #Energy in EV\nf = 10e-3 #focal length in meters \nwavel = (1239.84/energy)*10**(-9) #Wavelength in meters\ndelta,beta = get_property(mat,energy)\nzones = 700 #number of zones\nradius = np.zeros(zones)",
"_____no_output_____"
]
],
[
[
"Setting up the parameters and initializing the variables.",
"_____no_output_____"
]
],
[
[
"for k in range(zones):\n radius[k] = zone_radius(k,f,wavel)",
"_____no_output_____"
]
],
[
[
"Filling the radius array with the radius of zones for later use in making the rings.",
"_____no_output_____"
],
[
"In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.",
"_____no_output_____"
]
],
[
[
"grid_size = 55296\ninput_xrange = 262e-6\nstep_xy = input_xrange/grid_size\nL_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)\nstep_xy_output = L_out/grid_size\nprint(' Ouput L : ',L_out)\nprint(' output pixel size(nm) : ',step_xy_output*1e9)\nprint(' input pixel size(nm) : ',step_xy*1e9)",
" Ouput L : 0.0002616724909923664\n output pixel size(nm) : 4.732213740458016\n input pixel size(nm) : 4.738136574074074\n"
],
[
"drn = radius[-1]-radius[-2]\nprint(' maximum radius(um) : ',radius[-1]*1e6)\nprint(' outermost zone width(nm) :',drn*1e9)",
" maximum radius(um) : 29.438920457407793\n outermost zone width(nm) : 21.065465432240995\n"
],
[
"print(' max shift of focal spot(um) : ',(L_out/2)*1e6)\n\n# invert the following to get max tilt allowance\n# after which the focal spot falls of the \n# simulation plane\n# np.sin(theta*(np.pi/180))*f = (L_out/2)\ntheta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)\nprint(' max wavefield aligned tilt(deg) : ',theta_max)",
" max shift of focal spot(um) : 130.8362454961832\n max wavefield aligned tilt(deg) : 0.7496578563568063\n"
],
[
"if step_xy > 0.25*drn :\n print(' WARNING ! input pixel size too small')\n print(' ratio of input step size to outermost zone width', step_xy/drn)\nif step_xy_output > 0.25*drn :\n print(' WARNING ! output pixel size too small')\n print(' ratio of output step size to outermost zone width', step_xy_output/drn)",
"_____no_output_____"
],
[
"zones_to_fill = []\nfor i in range(zones):\n if i%2 == 1 :\n zones_to_fill.append(i)\nzones_to_fill = np.array(zones_to_fill)",
"_____no_output_____"
]
],
[
[
"Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)",
"_____no_output_____"
]
],
[
[
"try :\n os.chdir(up(os.getcwd())+str('/hard_xray_zp'))\nexcept :\n os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))\n os.chdir(up(os.getcwd())+str('/hard_xray_zp'))",
"_____no_output_____"
]
],
[
[
"Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !",
"_____no_output_____"
]
],
[
[
"x1 = input_xrange/2\nx = np.linspace(-x1,x1,grid_size)\nstep_xy = x[-1]-x[-2]\nzp_coords =[-x1,x1,-x1,x1]",
"_____no_output_____"
],
[
"X,Y = np.meshgrid(x,x)\nflag = np.where((X>0)&(Y>0)&(X>=Y)) ",
"_____no_output_____"
]
],
[
[
"Creating the input 1D array and setting the parameters for use by the make ring function. \nNote that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.",
"_____no_output_____"
]
],
[
[
"%%capture\nfrom joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)",
"_____no_output_____"
]
],
[
[
"Creating the rings ! (Adjust the number of jobs depending on CPU cores.)",
"_____no_output_____"
]
],
[
[
"params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}\npickle.dump(params,open('parameters.pickle','wb'))",
"_____no_output_____"
]
],
[
[
"Pickling and saving all the associated parameters along with the rings for use in simulation! ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a204030e7c2e7fa2bd1d756b9c6c2be4814a675
| 220,367 |
ipynb
|
Jupyter Notebook
|
sandbox/Simple_peak_picker.ipynb
|
slyon/pyFAI
|
f00dc9bdced334b9a0e3d76080e4515808821744
|
[
"MIT"
] | 45 |
2016-07-16T19:43:47.000Z
|
2022-03-12T16:53:47.000Z
|
sandbox/Simple_peak_picker.ipynb
|
slyon/pyFAI
|
f00dc9bdced334b9a0e3d76080e4515808821744
|
[
"MIT"
] | 1,125 |
2016-06-09T07:47:57.000Z
|
2022-03-31T20:34:00.000Z
|
sandbox/Simple_peak_picker.ipynb
|
slyon/pyFAI
|
f00dc9bdced334b9a0e3d76080e4515808821744
|
[
"MIT"
] | 52 |
2016-06-09T07:30:46.000Z
|
2022-02-14T08:25:11.000Z
| 153.458914 | 162,887 | 0.834826 |
[
[
[
"%matplotlib nbagg",
"_____no_output_____"
],
[
"import os\nos.environ[\"PYOPENCL_COMPILER_OUTPUT\"]=\"1\"\nimport numpy\nimport fabio\nimport pyopencl\nfrom pyopencl import array as cla\nfrom matplotlib.pyplot import subplots",
"_____no_output_____"
],
[
"ctx = pyopencl.create_some_context(interactive=True)\nqueue = pyopencl.CommandQueue(ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)\nctx",
"Choose platform:\n[0] <pyopencl.Platform 'Portable Computing Language' at 0x7fd9084e5020>\n[1] <pyopencl.Platform 'NVIDIA CUDA' at 0x2f41c40>\n[2] <pyopencl.Platform 'Intel(R) OpenCL' at 0x2e338d0>\nChoice [0]:1\nChoose device(s):\n[0] <pyopencl.Device 'GeForce GTX TITAN' on 'NVIDIA CUDA' at 0x2f4d510>\n[1] <pyopencl.Device 'Quadro M2000' on 'NVIDIA CUDA' at 0x2f41bb0>\nChoice, comma-separated [0]:0\nSet the environment variable PYOPENCL_CTX='1:0' to avoid being asked again.\n"
],
[
"image = fabio.open(\"/users/kieffer/workspace-400/tmp/pyFAI/test/testimages/Pilatus6M.cbf\").data\nmask = (image<0).astype(\"int8\")",
"_____no_output_____"
],
[
"fig, ax = subplots()\nax.imshow(image.clip(0,100))",
"_____no_output_____"
],
[
"%load_ext pyopencl.ipython_ext",
"_____no_output_____"
],
[
"%%cl_kernel\n\n//read withou caching\nfloat inline read_simple(global int *img, \n int height,\n int width,\n int row,\n int col){\n //This kernel reads the value and returns it without active caching\n float value = NAN;\n \n // Read\n if ((col>=0) && (col<width) && (row>=0) && (row<height)){\n int read_pos = col + row*width;\n value = (float)img[read_pos];\n if (value<0){\n value = NAN;\n }\n }\n return value;\n}\n\n\nvoid inline read_and_store(global int *img, \n int height,\n int width,\n int row,\n int col,\n int half_wind_height,\n int half_wind_width,\n local float* storage){\n //This kernel reads the value and stores in the local storage\n int line_size, write_pos, idx_line;\n float value = NAN;\n \n // Read\n if ((col>=0) && (col<width) && (row>0) && (row<height)){\n int read_pos = col + row*width;\n value = (float)img[read_pos];\n if (value<0){\n value = NAN;\n }\n }\n // Save locally\n if ((col>=-half_wind_width) && (col<=width+half_wind_width) && (row>-half_wind_height) && (row<=height+half_wind_height)){\n line_size = get_local_size(0) + 2 * half_wind_width;\n idx_line = (half_wind_height+row)%(2*half_wind_height+1);\n write_pos = line_size*idx_line + half_wind_width + col - get_group_id(0)*get_local_size(0);\n storage[write_pos] = value;\n }\n //return value\n}\n\n//Store a complete line\nvoid inline store_line(global int *img, \n int height,\n int width,\n int row,\n int half_wind_height,\n int half_wind_width,\n local float* storage){\n read_and_store(img, height, width, \n row, get_global_id(0), \n half_wind_height, half_wind_width, storage);\n if (get_local_id(0)<half_wind_width){\n // read_and_store_left\n read_and_store(img, height, width, \n row, get_group_id(0)*get_local_size(0)-half_wind_width+get_local_id(0), \n half_wind_height, half_wind_width, storage);\n //read_and_store_right\n read_and_store(img, height, width, \n row, (get_group_id(0)+1)*get_local_size(0)+get_local_id(0), \n half_wind_height, half_wind_width, storage); \n }\n}\n\nfloat read_back( int height,\n int width,\n int row,\n int col,\n int half_wind_height,\n int half_wind_width,\n local float* storage){\n float value=NAN;\n int write_pos, line_size, idx_line;\n if ((col>=-half_wind_width) && (col<=width+half_wind_width) && (row>-half_wind_height) && (row<=height+half_wind_height)){\n line_size = get_local_size(0) + 2 * half_wind_width;\n idx_line = (half_wind_height+row)%(2*half_wind_height+1);\n write_pos = line_size*idx_line + half_wind_width + col - get_group_id(0)*get_local_size(0);\n value = storage[write_pos]; \n }\n return value;\n}\n\n// workgroup size of kernel: 32 to 128, cache_read needs to be (wg+2*half_wind_width)*(2*half_wind_height+1)*sizeof(float)\nkernel void spot_finder(global int *img, \n int height,\n int width,\n int half_wind_height,\n int half_wind_width,\n float threshold,\n float radius,\n global int *cnt_high, //output\n global int *high, //output\n int high_size,\n local float *cache_read,\n local int *local_high,\n int local_size){\n //decaration of variables\n int col, row, cnt, i, j, where;\n float value, sum, std, centroid_r, centroid_c, dist, mean;\n col = get_global_id(0);\n \n local int local_cnt_high[1];\n local_cnt_high[0] = 0;\n for (i=0; i<local_size; i+=get_local_size(0)){\n local_high[i+get_local_id(0)] = 0;\n }\n \n row=0;\n \n //pre-load data for the first line\n for (i=-half_wind_height; i<half_wind_height; i++){\n store_line(img, height, width, row+i, half_wind_height, half_wind_width, cache_read);\n }\n barrier(CLK_LOCAL_MEM_FENCE);\n //loop within a column\n for (row=0;row<height; row++){\n //read data\n store_line(img, height, width, row+half_wind_height, half_wind_height, half_wind_width, cache_read);\n barrier(CLK_LOCAL_MEM_FENCE);\n //calculate mean\n sum = 0.0f;\n centroid_r = 0.0f;\n centroid_c = 0.0f;\n cnt = 0;\n for (i=-half_wind_height; i<=half_wind_height; i++){\n for (j=-half_wind_width; j<=half_wind_width; j++){\n value = read_back(height, width, row+i, col+j, half_wind_height, half_wind_width, cache_read);\n if (isfinite(value)){\n sum += value;\n centroid_r += value*i; \n centroid_c += value*j;\n cnt += 1;\n }\n }\n }\n if (cnt){\n mean = sum/cnt;\n dist = sum*radius;\n if ((fabs(centroid_r)<dist) && (fabs(centroid_c)<dist)){\n // calculate std\n sum = 0.0;\n for (i=-half_wind_height; i<=half_wind_height; i++){\n for (j=-half_wind_width; j<=half_wind_width; j++){\n value = read_back(height, width, row+i, col+j, half_wind_height, half_wind_width, cache_read);\n if (isfinite(value)){\n sum += pown(mean-value,2);\n }\n }\n }\n std = sqrt(sum/cnt);\n value = read_back(height, width, row, col, half_wind_height, half_wind_width, cache_read);\n if ((value-mean)>threshold*std){\n where = atomic_inc(local_cnt_high);\n if (where<local_size){\n local_high[where] = col+width*row;\n }\n } // if intense signal\n } // if properly centered\n } // if patch not empty \n barrier(CLK_LOCAL_MEM_FENCE);\n } //for row \n \n //Store the results in global memory\n barrier(CLK_LOCAL_MEM_FENCE);\n if (get_local_id(0) == 0) {\n cnt = local_cnt_high[0];\n if ((cnt>0) && (cnt<local_size)) {\n where = atomic_add(cnt_high, cnt);\n if (where+cnt>high_size){\n cnt = high_size-where; //store what we can\n }\n for (i=0; i<cnt; i++){\n high[where+i] = local_high[i];\n }\n }\n }//store results\n} //kernel\n\n// workgroup size of kernel: without cacheing read\nkernel void simple_spot_finder(global int *img, \n int height,\n int width,\n int half_wind_height,\n int half_wind_width,\n float threshold,\n float radius,\n global int *cnt_high, //output\n global int *high, //output\n int high_size,\n local int *local_high,\n int local_size){\n //decaration of variables\n int col, row, cnt, i, j, where, tid, blocksize;\n float value, sum, std, centroid_r, centroid_c, dist, mean, M2, delta, delta2, target_value;\n col = get_global_id(0);\n row = get_global_id(1);\n \n //Initialization of output array in shared\n local int local_cnt_high[2];\n blocksize = get_local_size(0) * get_local_size(1);\n tid = get_local_id(0) + get_local_id(1) * get_local_size(0);\n if (tid < 2){\n local_cnt_high[tid] = 0;\n }\n \n for (i=0; i<local_size; i+=blocksize){\n if ((i+tid)<local_size)\n local_high[i+tid] = 0;\n }\n barrier(CLK_LOCAL_MEM_FENCE); \n \n \n //Calculate mean + std + centroids\n mean = 0.0f;\n M2 = 0.0f;\n centroid_r = 0.0f;\n centroid_c = 0.0f;\n cnt = 0;\n \n for (i=-half_wind_height; i<=half_wind_height; i++){\n for (j=-half_wind_width; j<=half_wind_width; j++){\n value = read_simple(img, height, width, row+i, col+j);\n if (isfinite(value)){\n centroid_r += value*i; \n centroid_c += value*j;\n cnt += 1;\n delta = value - mean;\n mean += delta / cnt;\n delta2 = value - mean;\n M2 += delta * delta2;\n } \n }\n }\n if (cnt){\n dist = mean*radius*cnt;\n std = sqrt(M2 / cnt);\n target_value = read_simple(img, height, width, row, col);\n if (((target_value-mean)>threshold*std) && (fabs(centroid_r)<dist) && (fabs(centroid_c)<dist)){\n where = atomic_inc(local_cnt_high);\n if (where<local_size){\n local_high[where] = col+width*row;\n }\n } // if intense signal properly centered\n } // if patch not empty \n \n //Store the results in global memory\n barrier(CLK_LOCAL_MEM_FENCE);\n if (tid==0) {\n cnt = local_cnt_high[0];\n if ((cnt>0) && (cnt<local_size)) {\n where = atomic_add(cnt_high, cnt);\n if (where+cnt>high_size){\n cnt = high_size-where; //store what we can\n }\n local_cnt_high[0] = cnt;\n local_cnt_high[1] = where;\n }\n }\n barrier(CLK_LOCAL_MEM_FENCE);\n //copy the data from local to global memory\n for (i=0; i<local_cnt_high[0]; i+=blocksize){\n high[local_cnt_high[1]+i+tid] = local_high[i+tid];\n }//store results\n} //kernel",
"_____no_output_____"
],
[
"def peak_count(img,\n window=3,\n threshold=3.0,\n radius=1.0,\n workgroup=32,\n array_size=10000):\n img_d = cla.to_device(queue, image)\n high_d = cla.zeros(queue, (array_size,), dtype=numpy.int32)\n high_cnt_d = cla.zeros(queue, (1,), dtype=numpy.int32)\n read_cache = pyopencl.LocalMemory(4*(workgroup+2*window)*(2*window+1))\n write_cache = pyopencl.LocalMemory(4096)\n height, width = img.shape\n size = (width+workgroup-1)&~(workgroup-1)\n ev = spot_finder(queue, (size,), (workgroup,),\n img_d.data, \n numpy.int32(height),\n numpy.int32(width),\n numpy.int32(window),\n numpy.int32(window),\n numpy.float32( threshold),\n numpy.float32( radius),\n high_cnt_d.data,\n high_d.data,\n numpy.int32(array_size),\n read_cache,\n write_cache,\n numpy.int32(1024))\n size = high_cnt_d.get()[0] \n print(\"found %i peaks in %.3fms\"%(size, (ev.profile.end-ev.profile.start)*1e-6))\n return high_d.get()[:size]\n%time raw = peak_count(image, window=5, threshold=6)\nx=raw%image.shape[-1]\ny=raw//image.shape[-1]\nax.plot(x,y,\".w\")",
"found 234 peaks in 275.350ms\nCPU times: user 233 ms, sys: 51.7 ms, total: 285 ms\nWall time: 282 ms\n"
],
[
"def simple_peak_count(img,\n window=3,\n threshold=3.0,\n radius=1.0,\n workgroup=32,\n array_size=10000):\n img_d = cla.to_device(queue, image)\n high_d = cla.zeros(queue, (array_size,), dtype=numpy.int32)\n high_cnt_d = cla.zeros(queue, (1,), dtype=numpy.int32)\n #read_cache = pyopencl.LocalMemory(4*(workgroup+2*window)*(2*window+1))\n write_cache = pyopencl.LocalMemory(4096)\n height, width = img.shape\n size_w = (width+workgroup-1)&~(workgroup-1)\n size_h = (height+workgroup-1)&~(workgroup-1)\n ev = simple_spot_finder(queue, (size_w,size_h), (workgroup, workgroup),\n img_d.data, \n numpy.int32(height),\n numpy.int32(width),\n numpy.int32(window),\n numpy.int32(window),\n numpy.float32( threshold),\n numpy.float32( radius),\n high_cnt_d.data,\n high_d.data,\n numpy.int32(array_size),\n #read_cache,\n write_cache,\n numpy.int32(1024))\n size = high_cnt_d.get()[0] \n print(\"found %i peaks in %.3fms\"%(size, (ev.profile.end-ev.profile.start)*1e-6))\n return high_d.get()[:size]\n%time raw = simple_peak_count(image, window=5, threshold=6)\nx=raw%image.shape[-1]\ny=raw//image.shape[-1]\nax.plot(x,y,\".y\")",
"found 235 peaks in 21.018ms\nCPU times: user 25.3 ms, sys: 4.65 ms, total: 29.9 ms\nWall time: 27.9 ms\n"
],
[
"# Work on scan\nfrom math import log2\nn = 32\nary = numpy.ones(n)\nary",
"_____no_output_____"
],
[
"ary1 = numpy.copy(ary)\nary2 = numpy.empty_like(ary)\n\nfor i in range(int(log2(n))):\n start = 1<<i\n print(i,start)\n for j in range(start):\n ary2[j] = ary1[j]\n for j in range(start, n):\n ary2[j] = ary1[j] + ary1[j-start]\n ary1, ary2 = ary2, ary1\nprint(ary1)",
"0 1\n1 2\n2 4\n3 8\n4 16\n[ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18.\n 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32.]\n"
],
[
"ary-numpy.ones(n).cumsum()",
"_____no_output_____"
],
[
"(32+6)*7*4*2*4",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a20424ae746b758cf6fc68fd177fa161ee43d11
| 5,759 |
ipynb
|
Jupyter Notebook
|
06_download.ipynb
|
tightai/tightai
|
3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6
|
[
"Apache-2.0"
] | 1 |
2020-09-13T08:10:59.000Z
|
2020-09-13T08:10:59.000Z
|
06_download.ipynb
|
tightai/tightai
|
3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6
|
[
"Apache-2.0"
] | 1 |
2022-02-26T08:32:58.000Z
|
2022-02-26T08:32:58.000Z
|
06_download.ipynb
|
tightai/tightai
|
3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6
|
[
"Apache-2.0"
] | null | null | null | 32.721591 | 130 | 0.515715 |
[
[
[
"# default_exp downloaders",
"_____no_output_____"
],
[
"#export\nimport requests\nimport pathspec\nimport time\nfrom pathlib import Path, PurePosixPath\nfrom tightai.lookup import Lookup\nfrom tightai.conf import CLI_ENDPOINT",
"_____no_output_____"
],
[
"#hide\ntest = False\nif test:\n CLI_ENDPOINT = \"http://cli.desalsa.io:8000\"",
"_____no_output_____"
],
[
"#export \nclass DownloadVersion(Lookup):\n path = \".\"\n dest_path = \".\"\n project_id = \"\"\n version = \"\"\n api = CLI_ENDPOINT\n\n def __init__(self,\n path=\".\",\n project_id=None,\n version=None,\n *args, **kwargs):\n api = None\n if \"api\" in kwargs:\n api = kwargs.pop(\"api\")\n super().__init__(*args, **kwargs)\n if api != None:\n self.api = api\n assert project_id != None\n if \"v\" in f\"{version}\":\n version = version.replace(\"v\", \"\")\n try:\n version = int(version)\n except:\n raise Exception(\"Version must be a number or in the format v1, v2, v3, and so on.\")\n self.path = Path(path).resolve()\n self.version = version\n self.project_id = project_id\n self.endpoint = f\"{self.api}/projects/{project_id}/versions/{version}/download/\"\n \n def save_from_url(self, dest, url, force=True):\n dest_path = Path(dest)\n if not force:\n if dest_path.exists():\n print(f\"{dest_path} already exists\")\n return None\n dest_path_parent = dest_path.resolve().parent\n dest_path_parent.mkdir(parents=True, exist_ok=True)\n # NOTE the stream=True parameter below\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(dest_path, 'wb') as f:\n \n for chunk in r.iter_content(chunk_size=8192): \n # If you have chunk encoded response uncomment if\n # and set chunk_size parameter to None.\n #if chunk: \n f.write(chunk)\n return dest\n \n def download(self, overwrite=False):\n r = self.http_get(self.endpoint)\n self.handle_invalid_lookup(r, expected_status_code=200)\n files = r.json()\n \n for fdict in files:\n fname = fdict['fname']\n furl = fdict['url']\n dest = PurePosixPath(self.path / fname)\n print(\"Downloading\", fname, \"to\", dest)\n self.save_from_url(dest, furl, force=overwrite)\n return",
"_____no_output_____"
],
[
"#hide\n# path_str = \"/Users/jmitch/tight/my-tight-apps/dl-tests\"\n# path_str = Path(path_str)\n\n# assert path.exists() == True\n\n# dl = DownloadVersion(path=path_str, project_id='news-categories', version=1)\n# dl.download(overwrite=True)",
"Downloading Pipfile to /Users/jmitch/tight/my-tight-apps/dl-tests/Pipfile\nDownloading Pipfile.lock to /Users/jmitch/tight/my-tight-apps/dl-tests/Pipfile.lock\nDownloading __init__.py to /Users/jmitch/tight/my-tight-apps/dl-tests/__init__.py\nDownloading data/multi.hdf5 to /Users/jmitch/tight/my-tight-apps/dl-tests/data/multi.hdf5\nDownloading data/tokenizer.pkl to /Users/jmitch/tight/my-tight-apps/dl-tests/data/tokenizer.pkl\nDownloading entry.py to /Users/jmitch/tight/my-tight-apps/dl-tests/entry.py\nDownloading ml.py to /Users/jmitch/tight/my-tight-apps/dl-tests/ml.py\nDownloading news-categories.code-workspace to /Users/jmitch/tight/my-tight-apps/dl-tests/news-categories.code-workspace\nDownloading requirements.txt to /Users/jmitch/tight/my-tight-apps/dl-tests/requirements.txt\nDownloading tight.json to /Users/jmitch/tight/my-tight-apps/dl-tests/tight.json\nDownloading using_keras.txt to /Users/jmitch/tight/my-tight-apps/dl-tests/using_keras.txt\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a20486c01ad2d9631ebaa53e6272f34bbf69e85
| 235,065 |
ipynb
|
Jupyter Notebook
|
20180406_analysis_notebook.ipynb
|
DanielSchuette/python_notebooks
|
5948464521240cc91591c30493f7b7e016dc858d
|
[
"MIT"
] | null | null | null |
20180406_analysis_notebook.ipynb
|
DanielSchuette/python_notebooks
|
5948464521240cc91591c30493f7b7e016dc858d
|
[
"MIT"
] | null | null | null |
20180406_analysis_notebook.ipynb
|
DanielSchuette/python_notebooks
|
5948464521240cc91591c30493f7b7e016dc858d
|
[
"MIT"
] | null | null | null | 590.615578 | 128,988 | 0.941731 |
[
[
[
"# Import Required Modules",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\nfrom IPython.display import HTML\n%matplotlib inline",
"_____no_output_____"
],
[
"HTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\nThe raw code for this IPython notebook is by default hidden for easier reading.\nTo toggle on/off the raw code, click <a href=\"javascript:code_toggle()\">here</a>.''')",
"_____no_output_____"
]
],
[
[
"# Import Dataset\nThe dataset was downloaded from the [Brainspan - Atlas of the Developing Human Brain](http://www.brainspan.org/static/download.html) website on April 6, 2018.",
"_____no_output_____"
]
],
[
[
"# change working directory to directory with data files\nos.chdir(\"/Users/daniel/Documents/Yale/Projects/computations/Allen_brain_development/genes_matrix_csv/\")\n\n# load data file and two files with metadata\ngene_metadata = pd.read_csv(\"rows_metadata.csv\")\npatients_metadata = pd.read_csv(\"columns_metadata.csv\")\nexpression_data = pd.read_csv(\"expression_matrix.csv\", header=None, index_col=0)",
"_____no_output_____"
]
],
[
[
"# Data Overview\nThe database provides RNA-sequencing results (in RPKM units) for 524 samples:\n\ndata file | size (rows, cols)\n--------- | -----------------\n`gene_metadata` | 52376, 5\n`patients_metadata` | 524, 8\n`expression_data` | 52376, 524",
"_____no_output_____"
],
[
"# Cleaning and Joining Data Files\nThe joined data file `expression_data_joined` is of size 524, 52376+4 (patients, genes + `age`, `gender`, `structure_acronym`, `structure_name`) with indices being `donor_name`s and column names being gene names.",
"_____no_output_____"
]
],
[
[
"def transform_data():\n '''\n Run this function once to transform/join above input data files --> creates and returns \n \"expression_data_joined\". Assign function to keep return value alive!\n '''\n # join gene_metadata with expression data\n _gene_metadata = gene_metadata.set_index(\"row_num\", drop=True)\n _gene_metadata = _gene_metadata.drop([\"gene_id\", \"ensembl_gene_id\", \"entrez_id\"], axis=1)\n _expression_data_joined = _gene_metadata.join(other=expression_data)\n _expression_data_joined = _expression_data_joined.set_index(\"gene_symbol\", drop=True).transpose()\n \n # join patients_metadata with expression data (/joined data frame from above)\n _patients_metadata = patients_metadata.set_index(\"column_num\", drop=True).drop(\n [\"donor_id\", \"donor_name\", \"structure_id\"], axis=1)\n _expression_data_joined = _patients_metadata.join(other=_expression_data_joined)\n \n # return the joined table as described above\n return(_expression_data_joined)\n\nexpression_data_joined = transform_data()",
"_____no_output_____"
],
[
"# replace age column with float\ndef replace_age(_input=expression_data_joined):\n '''\n Replace string values of 'age' column with floats (depending on unit).\n '''\n # define regex patterns for replacement\n pattern1 = re.compile(\"^.*pcw$\")\n pattern2 = re.compile(\"^.*mos$\")\n pattern3 = re.compile(\"^.*yrs$\")\n \n # create a new age column: age_in_years\n _input[\"age_in_years\"] = None\n \n # loop over 'age' column and replace with appropriate value\n for i in range(len(_input.age.values)):\n # test for pattern 1\n if bool(pattern1.match(_input.age.values[i])):\n _res = round((- (40 - int((_input.age.values[i].split()[0]))) / 52), 2)\n _input.loc[_input.index[i], \"age_in_years\"] = _res\n # test for pattern 2\n elif bool(pattern2.match(_input.age.values[i])):\n _res = round(((int(_input.age.values[i].split()[0])) / 12), 2)\n _input.loc[_input.index[i], \"age_in_years\"] = _res\n # test for pattern 3\n if bool(pattern3.match(_input.age.values[i])):\n _res = int(_input.age.values[i].split()[0])\n _input.loc[_input.index[i], \"age_in_years\"] = _res \n \n # convert 'age_in_years' column to float type\n _input[\"age_in_years\"] = _input[\"age_in_years\"].astype(float)\n \n # return the data frame with transformed 'age' column\n return(_input)\n\nexpression_data_joined = replace_age()\n\n# add a dichotomized age column\nexpression_data_joined[\"Age Category\"] = np.array(\n [\"< 10 yrs\" if value < 10 else \">= 10 yrs\" for value in expression_data_joined[\"age_in_years\"]])\n\n# create a version of data frame with log2 transformed NCS1 expression\nexpression_data_joined_log2 = expression_data_joined.copy()\nexpression_data_joined_log2[[\"NCS1\"]] = np.log2(expression_data_joined[[\"NCS1\"]]+1)",
"_____no_output_____"
]
],
[
[
"# Exploratory Data Analysis\nScatter plots of NCS1 expression levels (gene level RPKM values) vs. donor age, stratified according to brain regions.",
"_____no_output_____"
]
],
[
[
"expression_data_joined_log2.plot(x=\"age_in_years\", y=\"NCS1\", kind=\"scatter\");\nplt.vlines(x=0, ymin=4, ymax=9, colors=\"black\", linestyles=\"dashed\");",
"_____no_output_____"
],
[
"# plot unique brain regions:\nregions, counts = np.unique(expression_data_joined.structure_name, return_counts=True)\nregions = regions[counts > 10]\n\ndef plot_unique_regions(df, regions=regions):\n '''\n Docstring\n '''\n # create subplots according to length of regions array [19]; this could be automatized...\n fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), \n (ax9, ax10, ax11, ax12), (ax13, ax14, ax15, ax16)) = plt.subplots(nrows=4, ncols=4, sharey=True, \n sharex=True, figsize=(24, 20))\n \n # generate an iterator\n axes = iter(list([ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9, ax10, ax11, ax12, ax13, ax14, ax15, ax16]))\n \n # loop over selected regions\n for region in regions:\n _df = df[df[\"structure_name\"] == region]\n _axis = next(axes)\n _df.plot(x=\"age_in_years\", y=\"NCS1\", kind=\"scatter\", ax=_axis);\n _axis.set_title(str(region));\n _axis.axvline(x=0, color=\"black\", linestyle=\"dashed\")\n _axis.set_xlabel(\"Age (in years)\", fontsize=14)\n _axis.set_ylabel(\"NCS Expression [log2(FPKM+1)]\", fontsize=14) \n \nplot_unique_regions(df=expression_data_joined, regions=regions)",
"_____no_output_____"
],
[
"unique, counts = np.unique(expression_data_joined_log2[\"structure_name\"], return_counts=True)\n_dict1 = dict(zip(unique, counts))\n_dict2 = dict(zip(pd.unique(expression_data_joined_log2[\"structure_name\"]),\n pd.unique(expression_data_joined_log2[\"structure_acronym\"])))\n\ndicts = [_dict1, _dict2]\nregions_dict = {}\n\n# loop over dictionaries and replace values by tuples ('acronym', int(count))\nfor d in dicts:\n for k, v in d.items():\n if k in regions_dict:\n regions_dict[k] = (regions_dict[k], v)\n else:\n regions_dict[k] = v",
"_____no_output_____"
],
[
"## boxplot of NCS1 expression levels for different brain regions\n# identify brain regions with at least 10 observations... \nregions, counts = np.unique(expression_data_joined.structure_name, return_counts=True)\nregions = regions[counts > 10]\n\n# filter dataframe accordingly...\n_df = expression_data_joined_log2[[\"NCS1\", \"structure_name\", \"structure_acronym\", \"Age Category\"]]\n_df = _df[_df[\"structure_name\"].isin(regions)]\n\n# ... and plot.\n_df.boxplot(\"NCS1\", \"structure_acronym\", figsize=(14, 8), rot=30, grid=False);\nplt.title(\"NCS1 Expression in 16 Different Brain Regions\", fontsize=24);\nplt.xlabel(\"Brain Structure\", fontsize=20);\nplt.ylabel(\"NCS1 Expression [log2(RPKM+1)]\", fontsize=20);\nplt.xticks(fontsize=12);\nplt.yticks(fontsize=18);",
"_____no_output_____"
],
[
"# select four brain regions of interest\n_dis = [\"AMY\", \"STR\", \"MFC\", \"CBC\", \"HIP\"]\n_df_selected = _df[([True if value in _dis else False for value in _df.structure_acronym.values])]\n\n# plot either all or just selected brain regions\nfc = sns.factorplot(x=\"structure_acronym\", y=\"NCS1\", hue=\"Age Category\", saturation=0.5, width=0.7, fliersize=8, linewidth=4,\n data=_df_selected, kind=\"box\", size=7, aspect=1.3, legend_out=False, \n order=[\"HIP\", \"MFC\", \"AMY\", \"CBC\", \"STR\"]); \nfc.despine(top=False, right=False);\nplt.grid(b=True, which=\"major\");\nplt.ylabel(\"NCS1 Gene Expression [log2(RPKM+1)]\", fontsize=16);\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.xlabel(\"Brain Regions\", fontsize=16);\n\nplt.title(\"NCS1 Expression in 16 Different Brain Regions\", fontsize=20);\nplt.savefig(\"/Users/daniel/Desktop/NCS1_in_16_brain_regions.pdf\", bbox_inches=\"tight\", pad_inches=1)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a20491799bb26bd95f2f59ba943e66f81a8f3bc
| 24,097 |
ipynb
|
Jupyter Notebook
|
tutorials/Certification_Trainings/Healthcare/Spark v2.7.6 Notebooks/21.Gender_Classifier.ipynb
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 687 |
2018-09-07T03:45:39.000Z
|
2022-03-20T17:11:20.000Z
|
tutorials/Certification_Trainings/Healthcare/Spark v2.7.6 Notebooks/21.Gender_Classifier.ipynb
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 89 |
2018-09-18T02:04:42.000Z
|
2022-02-24T18:22:27.000Z
|
tutorials/Certification_Trainings/Healthcare/Spark v2.7.6 Notebooks/21.Gender_Classifier.ipynb
|
hatrungduc/spark-nlp-workshop
|
4a4ec0195d1d3d847261df9ef2df7aa5f95bbaec
|
[
"Apache-2.0"
] | 407 |
2018-09-07T03:45:44.000Z
|
2022-03-20T05:12:25.000Z
| 24,097 | 24,097 | 0.781674 |
[
[
[
"",
"_____no_output_____"
],
[
"[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/Spark%20v2.7.6%20Notebooks/21.Gender_Classifier.ipynb)",
"_____no_output_____"
],
[
"# 21. Gender Classifier ",
"_____no_output_____"
],
[
"**Gender Classifier** detects the gender of the patient in the clinical document. \nIt can classify the documents into `Female`, `Male` and `Unknown`.\n\n\n-'**Classifierdl_gender_sbert**' (works with licensed `sbiobert_base_cased_mli`)\n\nIt has been trained on more than four thousands clinical documents (radiology reports, pathology reports, clinical visits etc.) which were annotated internally.",
"_____no_output_____"
],
[
"## Colab Setup",
"_____no_output_____"
]
],
[
[
"import json\n\nfrom google.colab import files\n\nlicense_keys = files.upload()\n\nwith open(list(license_keys.keys())[0]) as f:\n license_keys = json.load(f)",
"_____no_output_____"
],
[
"%%capture\nfor k,v in license_keys.items(): \n %set_env $k=$v\n\n!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh\n!bash jsl_colab_setup.sh -p 2.4.4",
"_____no_output_____"
],
[
"import json\nimport os\nfrom pyspark.ml import Pipeline,PipelineModel\nfrom pyspark.sql import SparkSession\n\nfrom sparknlp.annotator import *\nfrom sparknlp_jsl.annotator import *\nfrom sparknlp.base import *\nimport sparknlp_jsl\nimport sparknlp\n\nparams = {\"spark.driver.memory\":\"16G\",\n\"spark.kryoserializer.buffer.max\":\"2000M\",\n\"spark.driver.maxResultSize\":\"2000M\"}\n\nspark = sparknlp_jsl.start(license_keys['SECRET'],params=params)\n\nprint (sparknlp.version())\nprint (sparknlp_jsl.version())",
"2.7.4\n2.7.6\n"
],
[
"spark",
"_____no_output_____"
],
[
"# if you want to start the session with custom params as in start function above\ndef start(secret):\n builder = SparkSession.builder \\\n .appName(\"Spark NLP Licensed\") \\\n .master(\"local[*]\") \\\n .config(\"spark.driver.memory\", \"16G\") \\\n .config(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\") \\\n .config(\"spark.kryoserializer.buffer.max\", \"2000M\") \\\n .config(\"spark.jars.packages\", \"com.johnsnowlabs.nlp:spark-nlp_2.11:\"+version) \\\n .config(\"spark.jars\", \"https://pypi.johnsnowlabs.com/\"+secret+\"/spark-nlp-jsl-\"+jsl_version+\".jar\")\n \n return builder.getOrCreate()\n\n#spark = start(secret)",
"_____no_output_____"
]
],
[
[
"\n\n# Gender Classifier Pipeline with **sbert**",
"_____no_output_____"
]
],
[
[
"document = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")\n\nsbert_embedder = BertSentenceEmbeddings().pretrained(\"sbiobert_base_cased_mli\", 'en', 'clinical/models')\\\n .setInputCols([\"document\"])\\\n .setOutputCol(\"sentence_embeddings\")\\\n .setMaxSentenceLength(512)\n\ngender_classifier = ClassifierDLModel.pretrained( 'classifierdl_gender_sbert', 'en', 'clinical/models') \\\n .setInputCols([\"document\", \"sentence_embeddings\"]) \\\n .setOutputCol(\"class\") \n\ngender_pred_pipeline_sbert = Pipeline(stages = [ \n document, \n sbert_embedder, \n gender_classifier \n ])\n\nempty_data = spark.createDataFrame([[\"\"]]).toDF(\"text\")\n\nmodel_sbert = gender_pred_pipeline_sbert.fit(empty_data)\n",
"sbiobert_base_cased_mli download started this may take some time.\nApproximate size to download 384.3 MB\n[OK!]\nclassifierdl_gender_sbert download started this may take some time.\nApproximate size to download 22.2 MB\n[OK!]\n"
],
[
"text =\"\"\"social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.family history: shows a family history of breast cancer.\"\"\"\n\ngender_pipeline_sbert = LightPipeline(model_sbert)\n\nresult = gender_pipeline_sbert.annotate(text)\n\nresult['class'][0]\n",
"_____no_output_____"
]
],
[
[
"### Sample Clinical Notes",
"_____no_output_____"
]
],
[
[
"text1 = '''social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.\nfamily history: shows a family history of breast cancer.'''\n\nresult = gender_pipeline_sbert.annotate(text1)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text2 = '''The patient is a 48- year-old, with severe mitral stenosis diagnosed by echocardiography, moderate\n aortic insufficiency and moderate to severe pulmonary hypertension who is being evaluated as a part of a preoperative \n workup for mitral and possible aortic valve repair or replacement.'''\n\nresult = gender_pipeline_sbert.annotate(text2)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text3 = '''HISTORY: The patient is a 57-year-old XX, who I initially saw in the office on 12/27/07, as a referral from the Tomball Breast Center.\nOn 12/21/07, the patient underwent image-guided needle core biopsy of a 1.5 cm lesion at the 7 o'clock position of the left breast (inferomedial). \nThe biopsy returned showing infiltrating ductal carcinoma high histologic grade.\nThe patient stated that xx had recently felt and her physician had felt a palpable mass in that area prior to her breast imaging.'''\n\nresult = gender_pipeline_sbert.annotate(text3)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text4 = '''The patient states that xx has been overweight for approximately 35 years and has tried multiple weight loss modalities in \nthe past including Weight Watchers, NutriSystem, Jenny Craig, TOPS, cabbage diet, grape fruit diet, Slim-Fast, Richard Simmons,\nas well as over-the-counter measures without any long-term sustainable weight loss.\nAt the time of presentation to the practice, xx is 5 feet 6 inches tall with a weight of 285.4 pounds and a body mass index of 46.\nxx has obesity-related comorbidities, which includes hypertension and hypercholesterolemia.'''\n\nresult = gender_pipeline_sbert.annotate(text4)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text5 = '''Prostate gland showing moderately differentiated infiltrating adenocarcinoma, \nGleason 3 + 2 extending to the apex involving both lobes of the prostate, mainly right.'''\n\nresult = gender_pipeline_sbert.annotate(text5)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text6 = '''SKIN: The patient has significant subcutaneous emphysema of the upper chest and \nanterior neck area although he states that the subcutaneous emphysema has improved significantly since yesterday.'''\n\nresult = gender_pipeline_sbert.annotate(text6)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text7 = '''INDICATION: The patient is a 42-year-old XX who is five days out from transanal excision of a benign anterior base lesion.\nxx presents today with diarrhea and bleeding. Digital exam reveals bright red blood on the finger.\nxx is for exam under anesthesia and control of hemorrhage at this time.\n'''\nresult = gender_pipeline_sbert.annotate(text7)\n\nresult['class'][0]",
"_____no_output_____"
],
[
"text8 = '''INDICATION: ___ year old patient with complicated medical history of paraplegia\nand chronic indwelling foley, recurrent MDR UTIs, hx Gallbladder fossa\nabscess,type 2 DM, HTN, CAD, DVT s/p left AKA complicated complicated by\nrespiratory failure requiring tracheostomy and PEG placement, right ischium\nosteomyelitis due to chronic pressure ulcers with acute shortness of breath...'''\n\nresult = gender_pipeline_sbert.annotate(text8)\n\nresult['class'][0]\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2051f887397f17356e0c96cb61480bd05f3956
| 457,702 |
ipynb
|
Jupyter Notebook
|
kTSP.ipynb
|
aoxil/kTSP-predicting-age-feature-selection
|
0d277565947aa6e476ed78121ec2325fcec7f92e
|
[
"BSD-3-Clause"
] | null | null | null |
kTSP.ipynb
|
aoxil/kTSP-predicting-age-feature-selection
|
0d277565947aa6e476ed78121ec2325fcec7f92e
|
[
"BSD-3-Clause"
] | null | null | null |
kTSP.ipynb
|
aoxil/kTSP-predicting-age-feature-selection
|
0d277565947aa6e476ed78121ec2325fcec7f92e
|
[
"BSD-3-Clause"
] | null | null | null | 65.367324 | 151,947 | 0.60629 |
[
[
[
"# Overview\n\nEnsemble combined with LDA is effective in predicting age based on gene expression data. However, this method is prone to batch problem. The batch problem may caused by the different techniques in breeding cells that lead to difference in the mean gene expression level of cells between batches. \n\nkTSP is a potential solution to the batch problem because it uses the relative gene expression level rather than the raw qualititative data. It also involves significant less amount of features, improving both the speed of training and predicting and the interpretability of the features.\n\nThe data used for training in this notebook are all from gene_labelled_data.csv",
"_____no_output_____"
],
[
"# KTSP classifier and feature selection\n\nThis notebook completes three tasks: 10-fold CV repetead 3 times for predicting age using kTSP classifer on entire data; feature selection of gene pairs by kTSP algorithm to use in ensemble; and predict age on data reserved from feature selection using kTSP classifier.",
"_____no_output_____"
],
[
"# Set-up\nrpy2 version: 3.4.5\n\\\nR version: 4.1.0\n\\\nmultiClassPairs version: 0.4.3\n\\\nswitchBox version: 1.28.0\n\\\npython version: 3.8.8\n\\\nscikit-learn version: 0.24.1\n\\\nseaborn version: 0.11.1 ",
"_____no_output_____"
]
],
[
[
"%run age_predictors.py",
"_____no_output_____"
],
[
"#Importing interfaces and packages\n#Activate pandas2ri, numpy2ri for automatic transformation of python data structure to R\n\nimport pandas as pd\nimport rpy2\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects import pandas2ri\nimport rpy2.robjects.packages as rpackages\nfrom rpy2.robjects.vectors import StrVector\nfrom sklearn.model_selection import train_test_split\npandas2ri.activate()\nfrom rpy2.robjects import r\nfrom sklearn.utils import shuffle\nimport rpy2.robjects.numpy2ri\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nrpy2.robjects.numpy2ri.activate()",
"_____no_output_____"
],
[
"print(rpy2.situation.get_r_home())",
"/Library/Frameworks/R.framework/Resources\n"
],
[
"print(rpy2.__version__)",
"3.4.5\n"
],
[
"#Import R biocManager packages\nbase = importr('base')\nswitchBox = importr('switchBox')\nmulticlass = importr('multiclassPairs')",
"_____no_output_____"
],
[
"#This method loads the data\n#Set uid, age, meta as indices\ndef load_data(filename, transpose=False):\n \n ending = filename.split('.')[-1]\n \n if ending == 'csv':\n data = pd.read_csv(filename,header=None,index_col=None)\n elif ending == 'xlsx':\n data = pd.read_xlsx(filename,header=None,index_col=None)\n else:\n raise TypeError('dont know what file type this is')\n \n if transpose:\n data = data.T\n \n # make sure the index columns are named correctly, \n # otherwise use whatever the first row (header) contains for gene/transcript names\n cols = data.iloc[0,:]\n cols[0] = 'uid'\n cols[1] = 'age'\n cols[2] = 'meta'\n data.columns = cols\n \n # get the data, not the header now that we formed it\n data = data.iloc[1:,:]\n \n # make sure the age comes in as integer years... if you need to do floating point change this\n data.iloc[:,1] = data.iloc[:,1].astype(int)\n \n data = data.set_index(['uid','age','meta']).astype(float)\n \n return data\n",
"_____no_output_____"
],
[
"data = load_data('gene_labelled_data.csv')",
"/opt/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3357: DtypeWarning: Columns (0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,591,592,593,594,595,596,597,598,599,600,601,602,603,604,605,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,645,646,647,648,649,650,651,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868,869,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896,897,898,899,900,901,902,903,904,905,906,907,908,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924,925,926,927,928,929,930,931,932,933,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952,953,954,955,956,957,958,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072,1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1109,1110,1111,1112,1113,1114,1115,1116,1117,1118,1119,1120,1121,1122,1123,1124,1125,1126,1127,1128,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138,1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1160,1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1179,1180,1181,1182,1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204,1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226,1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248,1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1268,1269,1270,1271,1272,1273,1274,1275,1276,1277,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292,1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1306,1307,1308,1309,1310,1311,1312,1313,1314,1315,1316,1317,1318,1319,1320,1321,1322,1323,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336,1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1351,1352,1353,1354,1355,1356,1357,1358,1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380,1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1402,1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1416,1417,1418,1419,1420,1421,1422,1423,1424,1425,1426,1427,1428,1429,1430,1431,1432,1433,1434,1435,1436,1437,1438,1439,1440,1441,1442,1443,1444,1445,1446,1447,1448,1449,1450,1451,1452,1453,1454,1455,1456,1457,1458,1459,1460,1461,1462,1463,1464,1465,1466,1467,1468,1469,1470,1471,1472,1473,1474,1475,1476,1477,1478,1479,1480,1481,1482,1483,1484,1485,1486,1487,1488,1489,1490,1491,1492,1493,1494,1495,1496,1497,1498,1499,1500,1501,1502,1503,1504,1505,1506,1507,1508,1509,1510,1511,1512,1513,1514,1515,1516,1517,1518,1519,1520,1521,1522,1523,1524,1525,1526,1527,1528,1529,1530,1531,1532,1533,1534,1535,1536,1537,1538,1539,1540,1541,1542,1543,1544,1545,1546,1547,1548,1549,1550,1551,1552,1553,1554,1555,1556,1557,1558,1559,1560,1561,1562,1563,1564,1565,1566,1567,1568,1569,1570,1571,1572,1573,1574,1575,1576,1577,1578,1579,1580,1581,1582,1583,1584,1585,1586,1587,1588,1589,1590,1591,1592,1593,1594,1595,1596,1597,1598,1599,1600,1601,1602,1603,1604,1605,1606,1607,1608,1609,1610,1611,1612,1613,1614,1615,1616,1617,1618,1619,1620,1621,1622,1623,1624,1625,1626,1627,1628,1629,1630,1631,1632,1633,1634,1635,1636,1637,1638,1639,1640,1641,1642,1643,1644,1645,1646,1647,1648,1649,1650,1651,1652,1653,1654,1655,1656,1657,1658,1659,1660,1661,1662,1663,1664,1665,1666,1667,1668,1669,1670,1671,1672,1673,1674,1675,1676,1677,1678,1679,1680,1681,1682,1683,1684,1685,1686,1687,1688,1689,1690,1691,1692,1693,1694,1695,1696,1697,1698,1699,1700,1701,1702,1703,1704,1705,1706,1707,1708,1709,1710,1711,1712,1713,1714,1715,1716,1717,1718,1719,1720,1721,1722,1723,1724,1725,1726,1727,1728,1729,1730,1731,1732,1733,1734,1735,1736,1737,1738,1739,1740,1741,1742,1743,1744,1745,1746,1747,1748,1749,1750,1751,1752,1753,1754,1755,1756,1757,1758,1759,1760,1761,1762,1763,1764,1765,1766,1767,1768,1769,1770,1771,1772,1773,1774,1775,1776,1777,1778,1779,1780,1781,1782,1783,1784,1785,1786,1787,1788,1789,1790,1791,1792,1793,1794,1795,1796,1797,1798,1799,1800,1801,1802,1803,1804,1805,1806,1807,1808,1809,1810,1811,1812,1813,1814,1815,1816,1817,1818,1819,1820,1821,1822,1823,1824,1825,1826,1827,1828,1829,1830,1831,1832,1833,1834,1835,1836,1837,1838,1839,1840,1841,1842,1843,1844,1845,1846,1847,1848,1849,1850,1851,1852,1853,1854,1855,1856,1857,1858,1859,1860,1861,1862,1863,1864,1865,1866,1867,1868,1869,1870,1871,1872,1873,1874,1875,1876,1877,1878,1879,1880,1881,1882,1883,1884,1885,1886,1887,1888,1889,1890,1891,1892,1893,1894,1895,1896,1897,1898,1899,1900,1901,1902,1903,1904,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1920,1921,1922,1923,1924,1925,1926,1927,1928,1929,1930,1931,1932,1933,1934,1935,1936,1937,1938,1939,1940,1941,1942,1943,1944,1945,1946,1947,1948,1949,1950,1951,1952,1953,1954,1955,1956,1957,1958,1959,1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2048,2049,2050,2051,2052,2053,2054,2055,2056,2057,2058,2059,2060,2061,2062,2063,2064,2065,2066,2067,2068,2069,2070,2071,2072,2073,2074,2075,2076,2077,2078,2079,2080,2081,2082,2083,2084,2085,2086,2087,2088,2089,2090,2091,2092,2093,2094,2095,2096,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111,2112,2113,2114,2115,2116,2117,2118,2119,2120,2121,2122,2123,2124,2125,2126,2127,2128,2129,2130,2131,2132,2133,2134,2135,2136,2137,2138,2139,2140,2141,2142,2143,2144,2145,2146,2147,2148,2149,2150,2151,2152,2153,2154,2155,2156,2157,2158,2159,2160,2161,2162,2163,2164,2165,2166,2167,2168,2169,2170,2171,2172,2173,2174,2175,2176,2177,2178,2179,2180,2181,2182,2183,2184,2185,2186,2187,2188,2189,2190,2191,2192,2193,2194,2195,2196,2197,2198,2199,2200,2201,2202,2203,2204,2205,2206,2207,2208,2209,2210,2211,2212,2213,2214,2215,2216,2217,2218,2219,2220,2221,2222,2223,2224,2225,2226,2227,2228,2229,2230,2231,2232,2233,2234,2235,2236,2237,2238,2239,2240,2241,2242,2243,2244,2245,2246,2247,2248,2249,2250,2251,2252,2253,2254,2255,2256,2257,2258,2259,2260,2261,2262,2263,2264,2265,2266,2267,2268,2269,2270,2271,2272,2273,2274,2275,2276,2277,2278,2279,2280,2281,2282,2283,2284,2285,2286,2287,2288,2289,2290,2291,2292,2293,2294,2295,2296,2297,2298,2299,2300,2301,2302,2303,2304,2305,2306,2307,2308,2309,2310,2311,2312,2313,2314,2315,2316,2317,2318,2319,2320,2321,2322,2323,2324,2325,2326,2327,2328,2329,2330,2331,2332,2333,2334,2335,2336,2337,2338,2339,2340,2341,2342,2343,2344,2345,2346,2347,2348,2349,2350,2351,2352,2353,2354,2355,2356,2357,2358,2359,2360,2361,2362,2363,2364,2365,2366,2367,2368,2369,2370,2371,2372,2373,2374,2375,2376,2377,2378,2379,2380,2381,2382,2383,2384,2385,2386,2387,2388,2389,2390,2391,2392,2393,2394,2395,2396,2397,2398,2399,2400,2401,2402,2403,2404,2405,2406,2407,2408,2409,2410,2411,2412,2413,2414,2415,2416,2417,2418,2419,2420,2421,2422,2423,2424,2425,2426,2427,2428,2429,2430,2431,2432,2433,2434,2435,2436,2437,2438,2439,2440,2441,2442,2443,2444,2445,2446,2447,2448,2449,2450,2451,2452,2453,2454,2455,2456,2457,2458,2459,2460,2461,2462,2463,2464,2465,2466,2467,2468,2469,2470,2471,2472,2473,2474,2475,2476,2477,2478,2479,2480,2481,2482,2483,2484,2485,2486,2487,2488,2489,2490,2491,2492,2493,2494,2495,2496,2497,2498,2499,2500,2501,2502,2503,2504,2505,2506,2507,2508,2509,2510,2511,2512,2513,2514,2515,2516,2517,2518,2519,2520,2521,2522,2523,2524,2525,2526,2527,2528,2529,2530,2531,2532,2533,2534,2535,2536,2537,2538,2539,2540,2541,2542,2543,2544,2545,2546,2547,2548,2549,2550,2551,2552,2553,2554,2555,2556,2557,2558,2559,2560,2561,2562,2563,2564,2565,2566,2567,2568,2569,2570,2571,2572,2573,2574,2575,2576,2577,2578,2579,2580,2581,2582,2583,2584,2585,2586,2587,2588,2589,2590,2591,2592,2593,2594,2595,2596,2597,2598,2599,2600,2601,2602,2603,2604,2605,2606,2607,2608,2609,2610,2611,2612,2613,2614,2615,2616,2617,2618,2619,2620,2621,2622,2623,2624,2625,2626,2627,2628,2629,2630,2631,2632,2633,2634,2635,2636,2637,2638,2639,2640,2641,2642,2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,2691,2692,2693,2694,2695,2696,2697,2698,2699,2700,2701,2702,2703,2704,2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,2721,2722,2723,2724,2725,2726,2727,2728,2729,2730,2731,2732,2733,2734,2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,2751,2752,2753,2754,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,2766,2767,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,2780,2781,2782,2783,2784,2785,2786,2787,2788,2789,2790,2791,2792,2793,2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,2810,2811,2812,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,2857,2858,2859,2860,2861,2862,2863,2864,2865,2866,2867,2868,2869,2870,2871,2872,2873,2874,2875,2876,2877,2878,2879,2880,2881,2882,2883,2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,2944,2945,2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,2962,2963,2964,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,3009,3010,3011,3012,3013,3014,3015,3016,3017,3018,3019,3020,3021,3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,3051,3052,3053,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,3067,3068,3069,3070,3071,3072,3073,3074,3075,3076,3077,3078,3079,3080,3081,3082,3083,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,3109,3110,3111,3112,3113,3114,3115,3116,3117,3118,3119,3120,3121,3122,3123,3124,3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,3173,3174,3175,3176,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,3188,3189,3190,3191,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,3234,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,3278,3279,3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,3360,3361,3362,3363,3364,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,3388,3389,3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,3406,3407,3408,3409,3410,3411,3412,3413,3414,3415,3416,3417,3418,3419,3420,3421,3422,3423,3424,3425,3426,3427,3428,3429,3430,3431,3432,3433,3434,3435,3436,3437,3438,3439,3440,3441,3442,3443,3444,3445,3446,3447,3448,3449,3450,3451,3452,3453,3454,3455,3456,3457,3458,3459,3460,3461,3462,3463,3464,3465,3466,3467,3468,3469,3470,3471,3472,3473,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,3487,3488,3489,3490,3491,3492,3493,3494,3495,3496,3497,3498,3499,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,3512,3513,3514,3515,3516,3517,3518,3519,3520,3521,3522,3523,3524,3525,3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,3574,3575,3576,3577,3578,3579,3580,3581,3582,3583,3584,3585,3586,3587,3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,3604,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,3630,3631,3632,3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,3649,3650,3651,3652,3653,3654,3655,3656,3657,3658,3659,3660,3661,3662,3663,3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,3696,3697,3698,3699,3700,3701,3702,3703,3704,3705,3706,3707,3708,3709,3710,3711,3712,3713,3714,3715,3716,3717,3718,3719,3720,3721,3722,3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,3739,3740,3741,3742,3743,3744,3745,3746,3747,3748,3749,3750,3751,3752,3753,3754,3755,3756,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,3767,3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,3779,3780,3781,3782,3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,3796,3797,3798,3799,3800,3801,3802,3803,3804,3805,3806,3807,3808,3809,3810,3811,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,3822,3823,3824,3825,3826,3827,3828,3829,3830,3831,3832,3833,3834,3835,3836,3837,3838,3839,3840,3841,3842,3843,3844,3845,3846,3847,3848,3849,3850,3851,3852,3853,3854,3855,3856,3857,3858,3859,3860,3861,3862,3863,3864,3865,3866,3867,3868,3869,3870,3871,3872,3873,3874,3875,3876,3877,3878,3879,3880,3881,3882,3883,3884,3885,3886,3887,3888,3889,3890,3891,3892,3893,3894,3895,3896,3897,3898,3899,3900,3901,3902,3903,3904,3905,3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,3922,3923,3924,3925,3926,3927,3928,3929,3930,3931,3932,3933,3934,3935,3936,3937,3938,3939,3940,3941,3942,3943,3944,3945,3946,3947,3948,3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,3965,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,3978,3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,3992,3993,3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,4024,4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,4056,4057,4058,4059,4060,4061,4062,4063,4064,4065,4066,4067,4068,4069,4070,4071,4072,4073,4074,4075,4076,4077,4078,4079,4080,4081,4082,4083,4084,4085,4086,4087,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,4109,4110,4111,4112,4113,4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,4125,4126,4127,4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,4138,4139,4140,4141,4142,4143,4144,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,4220,4221,4222,4223,4224,4225,4226,4227,4228,4229,4230,4231,4232,4233,4234,4235,4236,4237,4238,4239,4240,4241,4242,4243,4244,4245,4246,4247,4248,4249,4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,4266,4267,4268,4269,4270,4271,4272,4273,4274,4275,4276,4277,4278,4279,4280,4281,4282,4283,4284,4285,4286,4287,4288,4289,4290,4291,4292,4293,4294,4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,4327,4328,4329,4330,4331,4332,4333,4334,4335,4336,4337,4338,4339,4340,4341,4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,4358,4359,4360,4361,4362,4363,4364,4365,4366,4367,4368,4369,4370,4371,4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,4417,4418,4419,4420,4421,4422,4423,4424,4425,4426,4427,4428,4429,4430,4431,4432,4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,4443,4444,4445,4446,4447,4448,4449,4450,4451,4452,4453,4454,4455,4456,4457,4458,4459,4460,4461,4462,4463,4464,4465,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,4490,4491,4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,4519,4520,4521,4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,4536,4537,4538,4539,4540,4541,4542,4543,4544,4545,4546,4547,4548,4549,4550,4551,4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,4568,4569,4570,4571,4572,4573,4574,4575,4576,4577,4578,4579,4580,4581,4582,4583,4584,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,4612,4613,4614,4615,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,4643,4644,4645,4646,4647,4648,4649,4650,4651,4652,4653,4654,4655,4656,4657,4658,4659,4660,4661,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,4683,4684,4685,4686,4687,4688,4689,4690,4691,4692,4693,4694,4695,4696,4697,4698,4699,4700,4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,4712,4713,4714,4715,4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,4732,4733,4734,4735,4736,4737,4738,4739,4740,4741,4742,4743,4744,4745,4746,4747,4748,4749,4750,4751,4752,4753,4754,4755,4756,4757,4758,4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,4769,4770,4771,4772,4773,4774,4775,4776,4777,4778,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,4789,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,4804,4805,4806,4807,4808,4809,4810,4811,4812,4813,4814,4815,4816,4817,4818,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,4833,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,4880,4881,4882,4883,4884,4885,4886,4887,4888,4889,4890,4891,4892,4893,4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,4910,4911,4912,4913,4914,4915,4916,4917,4918,4919,4920,4921,4922,4923,4924,4925,4926,4927,4928,4929,4930,4931,4932,4933,4934,4935,4936,4937,4938,4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,4981,4982,4983,4984,4985,4986,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,5000,5001,5002,5003,5004,5005,5006,5007,5008,5009,5010,5011,5012,5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,5040,5041,5042,5043,5044,5045,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,5056,5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,5073,5074,5075,5076,5077,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,5113,5114,5115,5116,5117,5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149,5150,5151,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,5178,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,5193,5194,5195,5196,5197,5198,5199,5200,5201,5202,5203,5204,5205,5206,5207,5208,5209,5210,5211,5212,5213,5214,5215,5216,5217,5218,5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,5248,5249,5250,5251,5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299,5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440,5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488,5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552,5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600,5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632,5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648,5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664,5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712,5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728,5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744,5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776,5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132,6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180,6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212,6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259,6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334,6335,6336,6337,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,6412,6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,6630,6631,6632,6633,6634,6635,6636,6637,6638,6639,6640,6641,6642,6643,6644,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,6660,6661,6662,6663,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761,6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,6780,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,6905,6906,6907,6908,6909,6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968,6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984,6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,6998,6999,7000,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213,7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,7313,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749,8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,8764,8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780,8781,8782,8783,8784,8785,8786,8787,8788,8789,8790,8791,8792,8793,8794,8795,8796,8797,8798,8799,8800,8801,8802,8803,8804,8805,8806,8807,8808,8809,8810,8811,8812,8813,8814,8815,8816,8817,8818,8819,8820,8821,8822,8823,8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839,8840,8841,8842,8843,8844,8845,8846,8847,8848,8849,8850,8851,8852,8853,8854,8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870,8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,8885,8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901,8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917,8918,8919,8920,8921,8922,8923,8924,8925,8926,8927,8928,8929,8930,8931,8932,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,8944,8945,8946,8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962,8963,8964,8965,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977,8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008,9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,9022,9023,9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039,9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055,9056,9057,9058,9059,9060,9061,9062,9063,9064,9065,9066,9067,9068,9069,9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085,9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101,9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117,9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133,9134,9135,9136,9137,9138,9139,9140,9141,9142,9143,9144,9145,9146,9147,9148,9149,9150,9151,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163,9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,9176,9177,9178,9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194,9195,9196,9197,9198,9199,9200,9201,9202,9203,9204,9205,9206,9207,9208,9209,9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,9223,9224,9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240,9241,9242,9243,9244,9245,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255,9256,9257,9258,9259,9260,9261,9262,9263,9264,9265,9266,9267,9268,9269,9270,9271,9272,9273,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283,9284,9285,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298,9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314,9315,9316,9317,9318,9319,9320,9321,9322,9323,9324,9325,9326,9327,9328,9329,9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345,9346,9347,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360,9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,9374,9375,9376,9377,9378,9379,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389,9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405,9406,9407,9408,9409,9410,9411,9412,9413,9414,9415,9416,9417,9418,9419,9420,9421,9422,9423,9424,9425,9426,9427,9428,9429,9430,9431,9432,9433,9434,9435,9436,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446,9447,9448,9449,9450,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461,9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,9473,9474,9475,9476,9477,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,9489,9490,9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506,9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,9521,9522,9523,9524,9525,9526,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536,9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552,9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568,9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599,9600,9601,9602,9603,9604,9605,9606,9607,9608,9609,9610,9611,9612,9613,9614,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628,9629,9630,9631,9632,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658,9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,9673,9674,9675,9676,9677,9678,9679,9680,9681,9682,9683,9684,9685,9686,9687,9688,9689,9690,9691,9692,9693,9694,9695,9696,9697,9698,9699,9700,9701,9702,9703,9704,9705,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715,9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731,9732,9733,9734,9735,9736,9737,9738,9739,9740,9741,9742,9743,9744,9745,9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,9759,9760,9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776,9777,9778,9779,9780,9781,9782,9783,9784,9785,9786,9787,9788,9789,9790,9791,9792,9793,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806,9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822,9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838,9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854,9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,9869,9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885,9886,9887,9888,9889,9890,9891,9892,9893,9894,9895,9896,9897,9898,9899,9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,9915,9916,9917,9918,9919,9920,9921,9922,9923,9924,9925,9926,9927,9928,9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944,9945,9946,9947,9948,9949,9950,9951,9952,9953,9954,9955,9956,9957,9958,9959,9960,9961,9962,9963,9964,9965,9966,9967,9968,9969,9970,9971,9972,9973,9974,9975,9976,9977,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987,9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003,10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019,10020,10021,10022,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033,10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064,10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080,10081,10082,10083,10084,10085,10086,10087,10088,10089,10090,10091,10092,10093,10094,10095,10096,10097,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110,10111,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125,10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,10141,10142,10143,10144,10145,10146,10147,10148,10149,10150,10151,10152,10153,10154,10155,10156,10157,10158,10159,10160,10161,10162,10163,10164,10165,10166,10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182,10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,10193,10194,10195,10196,10197,10198,10199,10200,10201,10202,10203,10204,10205,10206,10207,10208,10209,10210,10211,10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227,10228,10229,10230,10231,10232,10233,10234,10235,10236,10237,10238,10239,10240,10241,10242,10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258,10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287,10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303,10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319,10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,10335,10336,10337,10338,10339,10340,10341,10342,10343,10344,10345,10346,10347,10348,10349,10350,10351,10352,10353,10354,10355,10356,10357,10358,10359,10360,10361,10362,10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,10376,10377,10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,10393,10394,10395,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407,10408,10409,10410,10411,10412,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422,10423,10424,10425,10426,10427,10428,10429,10430,10431,10432,10433,10434,10435,10436,10437,10438,10439,10440,10441,10442,10443,10444,10445,10446,10447,10448,10449,10450,10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466,10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482,10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498,10499,10500,10501,10502,10503,10504,10505,10506,10507,10508,10509,10510,10511,10512,10513,10514,10515,10516,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527,10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543,10544,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557,10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,10568,10569,10570,10571,10572,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586,10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602,10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618,10619,10620,10621,10622,10623,10624,10625,10626,10627,10628,10629,10630,10631,10632,10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648,10649,10650,10651,10652,10653,10654,10655,10656,10657,10658,10659,10660,10661,10662,10663,10664,10665,10666,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677,10678,10679,10680,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692,10693,10694,10695,10696,10697,10698,10699,10700,10701,10702,10703,10704,10705,10706,10707,10708,10709,10710,10711,10712,10713,10714,10715,10716,10717,10718,10719,10720,10721,10722,10723,10724,10725,10726,10727,10728,10729,10730,10731,10732,10733,10734,10735,10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751,10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,10762,10763,10764,10765,10766,10767,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780,10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796,10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812,10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828,10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844,10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860,10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876,10877,10878,10879,10880,10881,10882,10883,10884,10885,10886,10887,10888,10889,10890,10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906,10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,10920,10921,10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,10933,10934,10935,10936,10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,10950,10951,10952,10953,10954,10955,10956,10957,10958,10959,10960,10961,10962,10963,10964,10965,10966,10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982,10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998,10999,11000,11001,11002,11003,11004,11005,11006,11007,11008,11009,11010,11011,11012,11013,11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029,11030,11031,11032,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044,11045,11046,11047,11048,11049,11050,11051,11052,11053,11054,11055,11056,11057,11058,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,11072,11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088,11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104,11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,11116,11117,11118,11119,11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150,11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,11162,11163,11164,11165,11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,11181,11182,11183,11184,11185,11186,11187,11188,11189,11190,11191,11192,11193,11194,11195,11196,11197,11198,11199,11200,11201,11202,11203,11204,11205,11206,11207,11208,11209,11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225,11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241,11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,11252,11253,11254,11255,11256,11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272,11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288,11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304,11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,11315,11316,11317,11318,11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334,11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,11348,11349,11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365,11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,11378,11379,11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395,11396,11397,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410,11411,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425,11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441,11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457,11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,11470,11471,11472,11473,11474,11475,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486,11487,11488,11489,11490,11491,11492,11493,11494,11495,11496,11497,11498,11499,11500,11501,11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517,11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,11530,11531,11532,11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548,11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578,11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,11592,11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,11605,11606,11607,11608,11609,11610,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622,11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638,11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654,11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670,11671,11672,11673,11674,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685,11686,11687,11688,11689,11690,11691,11692,11693,11694,11695,11696,11697,11698,11699,11700,11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716,11717,11718,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,11741,11742,11743,11744,11745,11746,11747,11748,11749,11750,11751,11752,11753,11754,11755,11756,11757,11758,11759,11760,11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776,11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,11791,11792,11793,11794,11795,11796,11797,11798,11799,11800,11801,11802,11803,11804,11805,11806,11807,11808,11809,11810,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821,11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,11835,11836,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850,11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866,11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882,11883,11884,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897,11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913,11914,11915,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928,11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944,11945,11946,11947,11948,11949,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959,11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975,11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,11988,11989,11990,11991,11992,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005,12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021,12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037,12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053,12054,12055,12056,12057,12058,12059,12060,12061,12062,12063,12064,12065,12066,12067,12068,12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084,12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100,12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116,12117,12118,12119,12120,12121,12122,12123,12124,12125,12126,12127,12128,12129,12130,12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146,12147,12148,12149,12150,12151,12152,12153,12154,12155,12156,12157,12158,12159,12160,12161,12162,12163,12164,12165,12166,12167,12168,12169,12170,12171,12172,12173,12174,12175,12176,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188,12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204,12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220,12221,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,12246,12247,12248,12249,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264,12265,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278,12279,12280,12281,12282,12283,12284,12285,12286,12287,12288,12289,12290,12291,12292,12293,12294,12295,12296,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308,12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,12320,12321,12322,12323,12324,12325,12326,12327,12328,12329,12330,12331,12332,12333,12334,12335,12336,12337,12338,12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354,12355,12356,12357,12358,12359,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369,12370,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384,12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400,12401,12402,12403,12404,12405,12406,12407,12408,12409,12410,12411,12412,12413,12414,12415,12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431,12432,12433,12434,12435,12436,12437,12438,12439,12440,12441,12442,12443,12444,12445,12446,12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462,12463,12464,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477,12478,12479,12480,12481,12482,12483,12484,12485,12486,12487,12488,12489,12490,12491,12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507,12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523,12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539,12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,12552,12553,12554,12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570,12571,12572,12573,12574,12575,12576,12577,12578,12579,12580,12581,12582,12583,12584,12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600,12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616,12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632,12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,12647,12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663,12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679,12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695,12696,12697,12698,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710,12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726,12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742,12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758,12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774,12775,12776,12777,12778,12779,12780,12781,12782,12783,12784,12785,12786,12787,12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803,12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819,12820,12821,12822,12823,12824,12825,12826,12827,12828,12829,12830,12831,12832,12833,12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849,12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,12862,12863,12864,12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880,12881,12882,12883,12884,12885,12886,12887,12888,12889,12890,12891,12892,12893,12894,12895,12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911,12912,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926,12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942,12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,12957,12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973,12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989,12990,12991,12992,12993,12994,12995,12996,12997,12998,12999,13000,13001,13002,13003,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018,13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,13030,13031,13032,13033,13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049,13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065,13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081,13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097,13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113,13114,13115,13116,13117,13118,13119,13120,13121,13122,13123,13124,13125,13126,13127,13128,13129,13130,13131,13132,13133,13134,13135,13136,13137,13138,13139,13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155,13156,13157,13158,13159,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170,13171,13172,13173,13174,13175,13176,13177,13178,13179,13180,13181,13182,13183,13184,13185,13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201,13202,13203,13204,13205,13206,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216,13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,13228,13229,13230,13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,13241,13242,13243,13244,13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260,13261,13262,13263,13264,13265,13266,13267,13268,13269,13270,13271,13272,13273,13274,13275,13276,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290,13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,13303,13304,13305,13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321,13322,13323,13324,13325,13326,13327,13328,13329,13330,13331,13332,13333,13334,13335,13336,13337,13338,13339,13340,13341,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351,13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382,13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398,13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414,13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430,13431,13432,13433,13434,13435,13436,13437,13438,13439,13440,13441,13442,13443,13444,13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,13458,13459,13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,13471,13472,13473,13474,13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490,13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506,13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522,13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538,13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554,13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570,13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586,13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602,13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618,13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634,13635,13636,13637,13638,13639,13640,13641,13642,13643,13644,13645,13646,13647,13648,13649,13650,13651,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664,13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680,13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696,13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712,13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728,13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744,13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760,13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,13775,13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791,13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807,13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823,13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839,13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855,13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871,13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887,13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903,13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919,13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935,13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951,13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967,13968,13969,13970,13971,13972,13973,13974,13975,13976,13977,13978,13979,13980,13981,13982,13983,13984,13985,13986,13987,13988,13989,13990,13991,13992,13993,13994,13995,13996,13997,13998,13999,14000,14001,14002,14003,14004,14005,14006,14007,14008,14009,14010,14011,14012,14013,14014,14015,14016,14017,14018,14019,14020,14021,14022,14023,14024,14025,14026,14027,14028,14029,14030,14031,14032,14033,14034,14035,14036,14037,14038,14039,14040,14041,14042,14043,14044,14045,14046,14047,14048,14049,14050,14051,14052,14053,14054,14055,14056,14057,14058,14059,14060,14061,14062,14063,14064,14065,14066,14067,14068,14069,14070,14071,14072,14073,14074,14075,14076,14077,14078,14079,14080,14081,14082,14083,14084,14085,14086,14087,14088,14089,14090,14091,14092,14093,14094,14095,14096,14097,14098,14099,14100,14101,14102,14103,14104,14105,14106,14107,14108,14109,14110,14111,14112,14113,14114,14115,14116,14117,14118,14119,14120,14121,14122,14123,14124,14125,14126,14127,14128,14129,14130,14131,14132,14133,14134,14135,14136,14137,14138,14139,14140,14141,14142,14143,14144,14145,14146,14147,14148,14149,14150,14151,14152,14153,14154,14155,14156,14157,14158,14159,14160,14161,14162,14163,14164,14165,14166,14167,14168,14169,14170,14171,14172,14173,14174,14175,14176,14177,14178,14179,14180,14181,14182,14183,14184,14185,14186,14187,14188,14189,14190,14191,14192,14193,14194,14195,14196,14197,14198,14199,14200,14201,14202,14203,14204,14205,14206,14207,14208,14209,14210,14211,14212,14213,14214,14215,14216,14217,14218,14219,14220,14221,14222,14223,14224,14225,14226,14227,14228,14229,14230,14231,14232,14233,14234,14235,14236,14237,14238,14239,14240,14241,14242,14243,14244,14245,14246,14247,14248,14249,14250,14251,14252,14253,14254,14255,14256,14257,14258,14259,14260,14261,14262,14263,14264,14265,14266,14267,14268,14269,14270,14271,14272,14273,14274,14275,14276,14277,14278,14279,14280,14281,14282,14283,14284,14285,14286,14287,14288,14289,14290,14291,14292,14293,14294,14295,14296,14297,14298,14299,14300,14301,14302,14303,14304,14305,14306,14307,14308,14309,14310,14311,14312,14313,14314,14315,14316,14317,14318,14319,14320,14321,14322,14323,14324,14325,14326,14327,14328,14329,14330,14331,14332,14333,14334,14335,14336,14337,14338,14339,14340,14341,14342,14343,14344,14345,14346,14347,14348,14349,14350,14351,14352,14353,14354,14355,14356,14357,14358,14359,14360,14361,14362,14363,14364,14365,14366,14367,14368,14369,14370,14371,14372,14373,14374,14375,14376,14377,14378,14379,14380,14381,14382,14383,14384,14385,14386,14387,14388,14389,14390,14391,14392,14393,14394,14395,14396,14397,14398,14399,14400,14401,14402,14403,14404,14405,14406,14407,14408,14409,14410,14411,14412,14413,14414,14415,14416,14417,14418,14419,14420,14421,14422,14423,14424,14425,14426,14427,14428,14429,14430,14431,14432,14433,14434,14435,14436,14437,14438,14439,14440,14441,14442,14443,14444,14445,14446,14447,14448,14449,14450,14451,14452,14453,14454,14455,14456,14457,14458,14459,14460,14461,14462,14463,14464,14465,14466,14467,14468,14469,14470,14471,14472,14473,14474,14475,14476,14477,14478,14479,14480,14481,14482,14483,14484,14485,14486,14487,14488,14489,14490,14491,14492,14493,14494,14495,14496,14497,14498,14499,14500,14501,14502,14503,14504,14505,14506,14507,14508,14509,14510,14511,14512,14513,14514,14515,14516,14517,14518,14519,14520,14521,14522,14523,14524,14525,14526,14527,14528,14529,14530,14531,14532,14533,14534,14535,14536,14537,14538,14539,14540,14541,14542,14543,14544,14545,14546,14547,14548,14549,14550,14551,14552,14553,14554,14555,14556,14557,14558,14559,14560,14561,14562,14563,14564,14565,14566,14567,14568,14569,14570,14571,14572,14573,14574,14575,14576,14577,14578,14579,14580,14581,14582,14583,14584,14585,14586,14587,14588,14589,14590,14591,14592,14593,14594,14595,14596,14597,14598,14599,14600,14601,14602,14603,14604,14605,14606,14607,14608,14609,14610,14611,14612,14613,14614,14615,14616,14617,14618,14619,14620,14621,14622,14623,14624,14625,14626,14627,14628,14629,14630,14631,14632,14633,14634,14635,14636,14637,14638,14639,14640,14641,14642,14643,14644,14645,14646,14647,14648,14649,14650,14651,14652,14653,14654,14655,14656,14657,14658,14659,14660,14661,14662,14663,14664,14665,14666,14667,14668,14669,14670,14671,14672,14673,14674,14675,14676,14677,14678,14679,14680,14681,14682,14683,14684,14685,14686,14687,14688,14689,14690,14691,14692,14693,14694,14695,14696,14697,14698,14699,14700,14701,14702,14703,14704,14705,14706,14707,14708,14709,14710,14711,14712,14713,14714,14715,14716,14717,14718,14719,14720,14721,14722,14723,14724,14725,14726,14727,14728,14729,14730,14731,14732,14733,14734,14735,14736,14737,14738,14739,14740,14741,14742,14743,14744,14745,14746,14747,14748,14749,14750,14751,14752,14753,14754,14755,14756,14757,14758,14759,14760,14761,14762,14763,14764,14765,14766,14767,14768,14769,14770,14771,14772,14773,14774,14775,14776,14777,14778,14779,14780,14781,14782,14783,14784,14785,14786,14787,14788,14789,14790,14791,14792,14793,14794,14795,14796,14797,14798,14799,14800,14801,14802,14803,14804,14805,14806,14807,14808,14809,14810,14811,14812,14813,14814,14815,14816,14817,14818,14819,14820,14821,14822,14823,14824,14825,14826,14827,14828,14829,14830,14831,14832,14833,14834,14835,14836,14837,14838,14839,14840,14841,14842,14843,14844,14845,14846,14847,14848,14849,14850,14851,14852,14853,14854,14855,14856,14857,14858,14859,14860,14861,14862,14863,14864,14865,14866,14867,14868,14869,14870,14871,14872,14873,14874,14875,14876,14877,14878,14879,14880,14881,14882,14883,14884,14885,14886,14887,14888,14889,14890,14891,14892,14893,14894,14895,14896,14897,14898,14899,14900,14901,14902,14903,14904,14905,14906,14907,14908,14909,14910,14911,14912,14913,14914,14915,14916,14917,14918,14919,14920,14921,14922,14923,14924,14925,14926,14927,14928,14929,14930,14931,14932,14933,14934,14935,14936,14937,14938,14939,14940,14941,14942,14943,14944,14945,14946,14947,14948,14949,14950,14951,14952,14953,14954,14955,14956,14957,14958,14959,14960,14961,14962,14963,14964,14965,14966,14967,14968,14969,14970,14971,14972,14973,14974,14975,14976,14977,14978,14979,14980,14981,14982,14983,14984,14985,14986,14987,14988,14989,14990,14991,14992,14993,14994,14995,14996,14997,14998,14999,15000,15001,15002,15003,15004,15005,15006,15007,15008,15009,15010,15011,15012,15013,15014,15015,15016,15017,15018,15019,15020,15021,15022,15023,15024,15025,15026,15027,15028,15029,15030,15031,15032,15033,15034,15035,15036,15037,15038,15039,15040,15041,15042,15043,15044,15045,15046,15047,15048,15049,15050,15051,15052,15053,15054,15055,15056,15057,15058,15059,15060,15061,15062,15063,15064,15065,15066,15067,15068,15069,15070,15071,15072,15073,15074,15075,15076,15077,15078,15079,15080,15081,15082,15083,15084,15085,15086,15087,15088,15089,15090,15091,15092,15093,15094,15095,15096,15097,15098,15099,15100,15101,15102,15103,15104,15105,15106,15107,15108,15109,15110,15111,15112,15113,15114,15115,15116,15117,15118,15119,15120,15121,15122,15123,15124,15125,15126,15127,15128,15129,15130,15131,15132,15133,15134,15135,15136,15137,15138,15139,15140,15141,15142,15143,15144,15145,15146,15147,15148,15149,15150,15151,15152,15153,15154,15155,15156,15157,15158,15159,15160,15161,15162,15163,15164,15165,15166,15167,15168,15169,15170,15171,15172,15173,15174,15175,15176,15177,15178,15179,15180,15181,15182,15183,15184,15185,15186,15187,15188,15189,15190,15191,15192,15193,15194,15195,15196,15197,15198,15199,15200,15201,15202,15203,15204,15205,15206,15207,15208,15209,15210,15211,15212,15213,15214,15215,15216,15217,15218,15219,15220,15221,15222,15223,15224,15225,15226,15227,15228,15229,15230,15231,15232,15233,15234,15235,15236,15237,15238,15239,15240,15241,15242,15243,15244,15245,15246,15247,15248,15249,15250,15251,15252,15253,15254,15255,15256,15257,15258,15259,15260,15261,15262,15263,15264,15265,15266,15267,15268,15269,15270,15271,15272,15273,15274,15275,15276,15277,15278,15279,15280,15281,15282,15283,15284,15285,15286,15287,15288,15289,15290,15291,15292,15293,15294,15295,15296,15297,15298,15299,15300,15301,15302,15303,15304,15305,15306,15307,15308,15309,15310,15311,15312,15313,15314,15315,15316,15317,15318,15319,15320,15321,15322,15323,15324,15325,15326,15327,15328,15329,15330,15331,15332,15333,15334,15335,15336,15337,15338,15339,15340,15341,15342,15343,15344,15345,15346,15347,15348,15349,15350,15351,15352,15353,15354,15355,15356,15357,15358,15359,15360,15361,15362,15363,15364,15365,15366,15367,15368,15369,15370,15371,15372,15373,15374,15375,15376,15377,15378,15379,15380,15381,15382,15383,15384,15385,15386,15387,15388,15389,15390,15391,15392,15393,15394,15395,15396,15397,15398,15399,15400,15401,15402,15403,15404,15405,15406,15407,15408,15409,15410,15411,15412,15413,15414,15415,15416,15417,15418,15419,15420,15421,15422,15423,15424,15425,15426,15427,15428,15429,15430,15431,15432,15433,15434,15435,15436,15437,15438,15439,15440,15441,15442,15443,15444,15445,15446,15447,15448,15449,15450,15451,15452,15453,15454,15455,15456,15457,15458,15459,15460,15461,15462,15463,15464,15465,15466,15467,15468,15469,15470,15471,15472,15473,15474,15475,15476,15477,15478,15479,15480,15481,15482,15483,15484,15485,15486,15487,15488,15489,15490,15491,15492,15493,15494,15495,15496,15497,15498,15499,15500,15501,15502,15503,15504,15505,15506,15507,15508,15509,15510,15511,15512,15513,15514,15515,15516,15517,15518,15519,15520,15521,15522,15523,15524,15525,15526,15527,15528,15529,15530,15531,15532,15533,15534,15535,15536,15537,15538,15539,15540,15541,15542,15543,15544,15545,15546,15547,15548,15549,15550,15551,15552,15553,15554,15555,15556,15557,15558,15559,15560,15561,15562,15563,15564,15565,15566,15567,15568,15569,15570,15571,15572,15573,15574,15575,15576,15577,15578,15579,15580,15581,15582,15583,15584,15585,15586,15587,15588,15589,15590,15591,15592,15593,15594,15595,15596,15597,15598,15599,15600,15601,15602,15603,15604,15605,15606,15607,15608,15609,15610,15611,15612,15613,15614,15615,15616,15617,15618,15619,15620,15621,15622,15623,15624,15625,15626,15627,15628,15629,15630,15631,15632,15633,15634,15635,15636,15637,15638,15639,15640,15641,15642,15643,15644,15645,15646,15647,15648,15649,15650,15651,15652,15653,15654,15655,15656,15657,15658,15659,15660,15661,15662,15663,15664,15665,15666,15667,15668,15669,15670,15671,15672,15673,15674,15675,15676,15677,15678,15679,15680,15681,15682,15683,15684,15685,15686,15687,15688,15689,15690,15691,15692,15693,15694,15695,15696,15697,15698,15699,15700,15701,15702,15703,15704,15705,15706,15707,15708,15709,15710,15711,15712,15713,15714,15715,15716,15717,15718,15719,15720,15721,15722,15723,15724,15725,15726,15727,15728,15729,15730,15731,15732,15733,15734,15735,15736,15737,15738,15739,15740,15741,15742,15743,15744,15745,15746,15747,15748,15749,15750,15751,15752,15753,15754,15755,15756,15757,15758,15759,15760,15761,15762,15763,15764,15765,15766,15767,15768,15769,15770,15771,15772,15773,15774,15775,15776,15777,15778,15779,15780,15781,15782,15783,15784,15785,15786,15787,15788,15789,15790,15791,15792,15793,15794,15795,15796,15797,15798,15799,15800,15801,15802,15803,15804,15805,15806,15807,15808,15809,15810,15811,15812,15813,15814,15815,15816,15817,15818,15819,15820,15821,15822,15823,15824,15825,15826,15827,15828,15829,15830,15831,15832,15833,15834,15835,15836,15837,15838,15839,15840,15841,15842,15843,15844,15845,15846,15847,15848,15849,15850,15851,15852,15853,15854,15855,15856,15857,15858,15859,15860,15861,15862,15863,15864,15865,15866,15867,15868,15869,15870,15871,15872,15873,15874,15875,15876,15877,15878,15879,15880,15881,15882,15883,15884,15885,15886,15887,15888,15889,15890,15891,15892,15893,15894,15895,15896,15897,15898,15899,15900,15901,15902,15903,15904,15905,15906,15907,15908,15909,15910,15911,15912,15913,15914,15915,15916,15917,15918,15919,15920,15921,15922,15923,15924,15925,15926,15927,15928,15929,15930,15931,15932,15933,15934,15935,15936,15937,15938,15939,15940,15941,15942,15943,15944,15945,15946,15947,15948,15949,15950,15951,15952,15953,15954,15955,15956,15957,15958,15959,15960,15961,15962,15963,15964,15965,15966,15967,15968,15969,15970,15971,15972,15973,15974,15975,15976,15977,15978,15979,15980,15981,15982,15983,15984,15985,15986,15987,15988,15989,15990,15991,15992,15993,15994,15995,15996,15997,15998,15999,16000,16001,16002,16003,16004,16005,16006,16007,16008,16009,16010,16011,16012,16013,16014,16015,16016,16017,16018,16019,16020,16021,16022,16023,16024,16025,16026,16027,16028,16029,16030,16031,16032,16033,16034,16035,16036,16037,16038,16039,16040,16041,16042,16043,16044,16045,16046,16047,16048,16049,16050,16051,16052,16053,16054,16055,16056,16057,16058,16059,16060,16061,16062,16063,16064,16065,16066,16067,16068,16069,16070,16071,16072,16073,16074,16075,16076,16077,16078,16079,16080,16081,16082,16083,16084,16085,16086,16087,16088,16089,16090,16091,16092,16093,16094,16095,16096,16097,16098,16099,16100,16101,16102,16103,16104,16105,16106,16107,16108,16109,16110,16111,16112,16113,16114,16115,16116,16117,16118,16119,16120,16121,16122,16123,16124,16125,16126,16127,16128,16129,16130,16131,16132,16133,16134,16135,16136,16137,16138,16139,16140,16141,16142,16143,16144,16145,16146,16147,16148,16149,16150,16151,16152,16153,16154,16155,16156,16157,16158,16159,16160,16161,16162,16163,16164,16165,16166,16167,16168,16169,16170,16171,16172,16173,16174,16175,16176,16177,16178,16179,16180,16181,16182,16183,16184,16185,16186,16187,16188,16189,16190,16191,16192,16193,16194,16195,16196,16197,16198,16199,16200,16201,16202,16203,16204,16205,16206,16207,16208,16209,16210,16211,16212,16213,16214,16215,16216,16217,16218,16219,16220,16221,16222,16223,16224,16225,16226,16227,16228,16229,16230,16231,16232,16233,16234,16235,16236,16237,16238,16239,16240,16241,16242,16243,16244,16245,16246,16247,16248,16249,16250,16251,16252,16253,16254,16255,16256,16257,16258,16259,16260,16261,16262,16263,16264,16265,16266,16267,16268,16269,16270,16271,16272,16273,16274,16275,16276,16277,16278,16279,16280,16281,16282,16283,16284,16285,16286,16287,16288,16289,16290,16291,16292,16293,16294,16295,16296,16297,16298,16299,16300,16301,16302,16303,16304,16305,16306,16307,16308,16309,16310,16311,16312,16313,16314,16315,16316,16317,16318,16319,16320,16321,16322,16323,16324,16325,16326,16327,16328,16329,16330,16331,16332,16333,16334,16335,16336,16337,16338,16339,16340,16341,16342,16343,16344,16345,16346,16347,16348,16349,16350,16351,16352,16353,16354,16355,16356,16357,16358,16359,16360,16361,16362,16363,16364,16365,16366,16367,16368,16369,16370,16371,16372,16373,16374,16375,16376,16377,16378,16379,16380,16381,16382,16383,16384,16385,16386,16387,16388,16389,16390,16391,16392,16393,16394,16395,16396,16397,16398,16399,16400,16401,16402,16403,16404,16405,16406,16407,16408,16409,16410,16411,16412,16413,16414,16415,16416,16417,16418,16419,16420,16421,16422,16423,16424,16425,16426,16427,16428,16429,16430,16431,16432,16433,16434,16435,16436,16437,16438,16439,16440,16441,16442,16443,16444,16445,16446,16447,16448,16449,16450,16451,16452,16453,16454,16455,16456,16457,16458,16459,16460,16461,16462,16463,16464,16465,16466,16467,16468,16469,16470,16471,16472,16473,16474,16475,16476,16477,16478,16479,16480,16481,16482,16483,16484,16485,16486,16487,16488,16489,16490,16491,16492,16493,16494,16495,16496,16497,16498,16499,16500,16501,16502,16503,16504,16505,16506,16507,16508,16509,16510,16511,16512,16513,16514,16515,16516,16517,16518,16519,16520,16521,16522,16523,16524,16525,16526,16527,16528,16529,16530,16531,16532,16533,16534,16535,16536,16537,16538,16539,16540,16541,16542,16543,16544,16545,16546,16547,16548,16549,16550,16551,16552,16553,16554,16555,16556,16557,16558,16559,16560,16561,16562,16563,16564,16565,16566,16567,16568,16569,16570,16571,16572,16573,16574,16575,16576,16577,16578,16579,16580,16581,16582,16583,16584,16585,16586,16587,16588,16589,16590,16591,16592,16593,16594,16595,16596,16597,16598,16599,16600,16601,16602,16603,16604,16605,16606,16607,16608,16609,16610,16611,16612,16613,16614,16615,16616,16617,16618,16619,16620,16621,16622,16623,16624,16625,16626,16627,16628,16629,16630,16631,16632,16633,16634,16635,16636,16637,16638,16639,16640,16641,16642,16643,16644,16645,16646,16647,16648,16649,16650,16651,16652,16653,16654,16655,16656,16657,16658,16659,16660,16661,16662,16663,16664,16665,16666,16667,16668,16669,16670,16671,16672,16673,16674,16675,16676,16677,16678,16679,16680,16681,16682,16683,16684,16685,16686,16687,16688,16689,16690,16691,16692,16693,16694,16695,16696,16697,16698,16699,16700,16701,16702,16703,16704,16705,16706,16707,16708,16709,16710,16711,16712,16713,16714,16715,16716,16717,16718,16719,16720,16721,16722,16723,16724,16725,16726,16727,16728,16729,16730,16731,16732,16733,16734,16735,16736,16737,16738,16739,16740,16741,16742,16743,16744,16745,16746,16747,16748,16749,16750,16751,16752,16753,16754,16755,16756,16757,16758,16759,16760,16761,16762,16763,16764,16765,16766,16767,16768,16769,16770,16771,16772,16773,16774,16775,16776,16777,16778,16779,16780,16781,16782,16783,16784,16785,16786,16787,16788,16789,16790,16791,16792,16793,16794,16795,16796,16797,16798,16799,16800,16801,16802,16803,16804,16805,16806,16807,16808,16809,16810,16811,16812,16813,16814,16815,16816,16817,16818,16819,16820,16821,16822,16823,16824,16825,16826,16827,16828,16829,16830,16831,16832,16833,16834,16835,16836,16837,16838,16839,16840,16841,16842,16843,16844,16845,16846,16847,16848,16849,16850,16851,16852,16853,16854,16855,16856,16857,16858,16859,16860,16861,16862,16863,16864,16865,16866,16867,16868,16869,16870,16871,16872,16873,16874,16875,16876,16877,16878,16879,16880,16881,16882,16883,16884,16885,16886,16887,16888,16889,16890,16891,16892,16893,16894,16895,16896,16897,16898,16899,16900,16901,16902,16903,16904,16905,16906,16907,16908,16909,16910,16911,16912,16913,16914,16915,16916,16917,16918,16919,16920,16921,16922,16923,16924,16925,16926,16927,16928,16929,16930,16931,16932,16933,16934,16935,16936,16937,16938,16939,16940,16941,16942,16943,16944,16945,16946,16947,16948,16949,16950,16951,16952,16953,16954,16955,16956,16957,16958,16959,16960,16961,16962,16963,16964,16965,16966,16967,16968,16969,16970,16971,16972,16973,16974,16975,16976,16977,16978,16979,16980,16981,16982,16983,16984,16985,16986,16987,16988,16989,16990,16991,16992,16993,16994,16995,16996,16997,16998,16999,17000,17001,17002,17003,17004,17005,17006,17007,17008,17009,17010,17011,17012,17013,17014,17015,17016,17017,17018,17019,17020,17021,17022,17023,17024,17025,17026,17027,17028,17029,17030,17031,17032,17033,17034,17035,17036,17037,17038,17039,17040,17041,17042,17043,17044,17045,17046,17047,17048,17049,17050,17051,17052,17053,17054,17055,17056,17057,17058,17059,17060,17061,17062,17063,17064,17065,17066,17067,17068,17069,17070,17071,17072,17073,17074,17075,17076,17077,17078,17079,17080,17081,17082,17083,17084,17085,17086,17087,17088,17089,17090,17091,17092,17093,17094,17095,17096,17097,17098,17099,17100,17101,17102,17103,17104,17105,17106,17107,17108,17109,17110,17111,17112,17113,17114,17115,17116,17117,17118,17119,17120,17121,17122,17123,17124,17125,17126,17127,17128,17129,17130,17131,17132,17133,17134,17135,17136,17137,17138,17139,17140,17141,17142,17143,17144,17145,17146,17147,17148,17149,17150,17151,17152,17153,17154,17155,17156,17157,17158,17159,17160,17161,17162,17163,17164,17165,17166,17167,17168,17169,17170,17171,17172,17173,17174,17175,17176,17177,17178,17179,17180,17181,17182,17183,17184,17185,17186,17187,17188,17189,17190,17191,17192,17193,17194,17195,17196,17197,17198,17199,17200,17201,17202,17203,17204,17205,17206,17207,17208,17209,17210,17211,17212,17213,17214,17215,17216,17217,17218,17219,17220,17221,17222,17223,17224,17225,17226,17227,17228,17229,17230,17231,17232,17233,17234,17235,17236,17237,17238,17239,17240,17241,17242,17243,17244,17245,17246,17247,17248,17249,17250,17251,17252,17253,17254,17255,17256,17257,17258,17259,17260,17261,17262,17263,17264,17265,17266,17267,17268,17269,17270,17271,17272,17273,17274,17275,17276,17277,17278,17279,17280,17281,17282,17283,17284,17285,17286,17287,17288,17289,17290,17291,17292,17293,17294,17295,17296,17297,17298,17299,17300,17301,17302,17303,17304,17305,17306,17307,17308,17309,17310,17311,17312,17313,17314,17315,17316,17317,17318,17319,17320,17321,17322,17323,17324,17325,17326,17327,17328,17329,17330,17331,17332,17333,17334,17335,17336,17337,17338,17339,17340,17341,17342,17343,17344,17345,17346,17347,17348,17349,17350,17351,17352,17353,17354,17355,17356,17357,17358,17359,17360,17361,17362,17363,17364,17365,17366,17367,17368,17369,17370,17371,17372,17373,17374,17375,17376,17377,17378,17379,17380,17381,17382,17383,17384,17385,17386,17387,17388,17389,17390,17391,17392,17393,17394,17395,17396,17397,17398,17399,17400,17401,17402,17403,17404,17405,17406,17407,17408,17409,17410,17411,17412,17413,17414,17415,17416,17417,17418,17419,17420,17421,17422,17423,17424,17425,17426,17427,17428,17429,17430,17431,17432,17433,17434,17435,17436,17437,17438,17439,17440,17441,17442,17443,17444,17445,17446,17447,17448,17449,17450,17451,17452,17453,17454,17455,17456,17457,17458,17459,17460,17461,17462,17463,17464,17465,17466,17467,17468,17469,17470,17471,17472,17473,17474,17475,17476,17477,17478,17479,17480,17481,17482,17483,17484,17485,17486,17487,17488,17489,17490,17491,17492,17493,17494,17495,17496,17497,17498,17499,17500,17501,17502,17503,17504,17505,17506,17507,17508,17509,17510,17511,17512,17513,17514,17515,17516,17517,17518,17519,17520,17521,17522,17523,17524,17525,17526,17527,17528,17529,17530,17531,17532,17533,17534,17535,17536,17537,17538,17539,17540,17541,17542,17543,17544,17545,17546,17547,17548,17549,17550,17551,17552,17553,17554,17555,17556,17557,17558,17559,17560,17561,17562,17563,17564,17565,17566,17567,17568,17569,17570,17571,17572,17573,17574,17575,17576,17577,17578,17579,17580,17581,17582,17583,17584,17585,17586,17587,17588,17589,17590,17591,17592,17593,17594,17595,17596,17597,17598,17599,17600,17601,17602,17603,17604,17605,17606,17607,17608,17609,17610,17611,17612,17613,17614,17615,17616,17617,17618,17619,17620,17621,17622,17623,17624,17625,17626,17627,17628,17629,17630,17631,17632,17633,17634,17635,17636,17637,17638,17639,17640,17641,17642,17643,17644,17645,17646,17647,17648,17649,17650,17651,17652,17653,17654,17655,17656,17657,17658,17659,17660,17661,17662,17663,17664,17665,17666,17667,17668,17669,17670,17671,17672,17673,17674,17675,17676,17677,17678,17679,17680,17681,17682,17683,17684,17685,17686,17687,17688,17689,17690,17691,17692,17693,17694,17695,17696,17697,17698,17699,17700,17701,17702,17703,17704,17705,17706,17707,17708,17709,17710,17711,17712,17713,17714,17715,17716,17717,17718,17719,17720,17721,17722,17723,17724,17725,17726,17727,17728,17729,17730,17731,17732,17733,17734,17735,17736,17737,17738,17739,17740,17741,17742,17743,17744,17745,17746,17747,17748,17749,17750,17751,17752,17753,17754,17755,17756,17757,17758,17759,17760,17761,17762,17763,17764,17765,17766,17767,17768,17769,17770,17771,17772,17773,17774,17775,17776,17777,17778,17779,17780,17781,17782,17783,17784,17785,17786,17787,17788,17789,17790,17791,17792,17793,17794,17795,17796,17797,17798,17799,17800,17801,17802,17803,17804,17805,17806,17807,17808,17809,17810,17811,17812,17813,17814,17815,17816,17817,17818,17819,17820,17821,17822,17823,17824,17825,17826,17827,17828,17829,17830,17831,17832,17833,17834,17835,17836,17837,17838,17839,17840,17841,17842,17843,17844,17845,17846,17847,17848,17849,17850,17851,17852,17853,17854,17855,17856,17857,17858,17859,17860,17861,17862,17863,17864,17865,17866,17867,17868,17869,17870,17871,17872,17873,17874,17875,17876,17877,17878,17879,17880,17881,17882,17883,17884,17885,17886,17887,17888,17889,17890,17891,17892,17893,17894,17895,17896,17897,17898,17899,17900,17901,17902,17903,17904,17905,17906,17907,17908,17909,17910,17911,17912,17913,17914,17915,17916,17917,17918,17919,17920,17921,17922,17923,17924,17925,17926,17927,17928,17929,17930,17931,17932,17933,17934,17935,17936,17937,17938,17939,17940,17941,17942,17943,17944,17945,17946,17947,17948,17949,17950,17951,17952,17953,17954,17955,17956,17957,17958,17959,17960,17961,17962,17963,17964,17965,17966,17967,17968,17969,17970,17971,17972,17973,17974,17975,17976,17977,17978,17979,17980,17981,17982,17983,17984,17985,17986,17987,17988,17989,17990,17991,17992,17993,17994,17995,17996,17997,17998,17999,18000,18001,18002,18003,18004,18005,18006,18007,18008,18009,18010,18011,18012,18013,18014,18015,18016,18017,18018,18019,18020,18021,18022,18023,18024,18025,18026,18027,18028,18029,18030,18031,18032,18033,18034,18035,18036,18037,18038,18039,18040,18041,18042,18043,18044,18045,18046,18047,18048,18049,18050,18051,18052,18053,18054,18055,18056,18057,18058,18059,18060,18061,18062,18063,18064,18065,18066,18067,18068,18069,18070,18071,18072,18073,18074,18075,18076,18077,18078,18079,18080,18081,18082,18083,18084,18085,18086,18087,18088,18089,18090,18091,18092,18093,18094,18095,18096,18097,18098,18099,18100,18101,18102,18103,18104,18105,18106,18107,18108,18109,18110,18111,18112,18113,18114,18115,18116,18117,18118,18119,18120,18121,18122,18123,18124,18125,18126,18127,18128,18129,18130,18131,18132,18133,18134,18135,18136,18137,18138,18139,18140,18141,18142,18143,18144,18145,18146,18147,18148,18149,18150,18151,18152,18153,18154,18155,18156,18157,18158,18159,18160,18161,18162,18163,18164,18165,18166,18167,18168,18169,18170,18171,18172,18173,18174,18175,18176,18177,18178,18179,18180,18181,18182,18183,18184,18185,18186,18187,18188,18189,18190,18191,18192,18193,18194,18195,18196,18197,18198,18199,18200,18201,18202,18203,18204,18205,18206,18207,18208,18209,18210,18211,18212,18213,18214,18215,18216,18217,18218,18219,18220,18221,18222,18223,18224,18225,18226,18227,18228,18229,18230,18231,18232,18233,18234,18235,18236,18237,18238,18239,18240,18241,18242,18243,18244,18245,18246,18247,18248,18249,18250,18251,18252,18253,18254,18255,18256,18257,18258,18259,18260,18261,18262,18263,18264,18265,18266,18267,18268,18269,18270,18271,18272,18273,18274,18275,18276,18277,18278,18279,18280,18281,18282,18283,18284,18285,18286,18287,18288,18289,18290,18291,18292,18293,18294,18295,18296,18297,18298,18299,18300,18301,18302,18303,18304,18305,18306,18307,18308,18309,18310,18311,18312,18313,18314,18315,18316,18317,18318,18319,18320,18321,18322,18323,18324,18325,18326,18327,18328,18329,18330,18331,18332,18333,18334,18335,18336,18337,18338,18339,18340,18341,18342,18343,18344,18345,18346,18347,18348,18349,18350,18351,18352,18353,18354,18355,18356,18357,18358,18359,18360,18361,18362,18363,18364,18365,18366,18367,18368,18369,18370,18371,18372,18373,18374,18375,18376,18377,18378,18379,18380,18381,18382,18383,18384,18385,18386,18387,18388,18389,18390,18391,18392,18393,18394,18395,18396,18397,18398,18399,18400,18401,18402,18403,18404,18405,18406,18407,18408,18409,18410,18411,18412,18413,18414,18415,18416,18417,18418,18419,18420,18421,18422,18423,18424,18425,18426,18427,18428,18429,18430,18431,18432,18433,18434,18435,18436,18437,18438,18439,18440,18441,18442,18443,18444,18445,18446,18447,18448,18449,18450,18451,18452,18453,18454,18455,18456,18457,18458,18459,18460,18461,18462,18463,18464,18465,18466,18467,18468,18469,18470,18471,18472,18473,18474,18475,18476,18477,18478,18479,18480,18481,18482,18483,18484,18485,18486,18487,18488,18489,18490,18491,18492,18493,18494,18495,18496,18497,18498,18499,18500,18501,18502,18503,18504,18505,18506,18507,18508,18509,18510,18511,18512,18513,18514,18515,18516,18517,18518,18519,18520,18521,18522,18523,18524,18525,18526,18527,18528,18529,18530,18531,18532,18533,18534,18535,18536,18537,18538,18539,18540,18541,18542,18543,18544,18545,18546,18547,18548,18549,18550,18551,18552,18553,18554,18555,18556,18557,18558,18559,18560,18561,18562,18563,18564,18565,18566,18567,18568,18569,18570,18571,18572,18573,18574,18575,18576,18577,18578,18579,18580,18581,18582,18583,18584,18585,18586,18587,18588,18589,18590,18591,18592,18593,18594,18595,18596,18597,18598,18599,18600,18601,18602,18603,18604,18605,18606,18607,18608,18609,18610,18611,18612,18613,18614,18615,18616,18617,18618,18619,18620,18621,18622,18623,18624,18625,18626,18627,18628,18629,18630,18631,18632,18633,18634,18635,18636,18637,18638,18639,18640,18641,18642,18643,18644,18645,18646,18647,18648,18649,18650,18651,18652,18653,18654,18655,18656,18657,18658,18659,18660,18661,18662,18663,18664,18665,18666,18667,18668,18669,18670,18671,18672,18673,18674,18675,18676,18677,18678,18679,18680,18681,18682,18683,18684,18685,18686,18687,18688,18689,18690,18691,18692,18693,18694,18695,18696,18697,18698,18699,18700,18701,18702,18703,18704,18705,18706,18707,18708,18709,18710,18711,18712,18713,18714,18715,18716,18717,18718,18719,18720,18721,18722,18723,18724,18725,18726,18727,18728,18729,18730,18731,18732,18733,18734,18735,18736,18737,18738,18739,18740,18741,18742,18743,18744,18745,18746,18747,18748,18749,18750,18751,18752,18753,18754,18755,18756,18757,18758,18759,18760,18761,18762,18763,18764,18765,18766,18767,18768,18769,18770,18771,18772,18773,18774,18775,18776,18777,18778,18779,18780,18781,18782,18783,18784,18785,18786,18787,18788,18789,18790,18791,18792,18793,18794,18795,18796,18797,18798,18799,18800,18801,18802,18803,18804,18805,18806,18807,18808,18809,18810,18811,18812,18813,18814,18815,18816,18817,18818,18819,18820,18821,18822,18823,18824,18825,18826,18827,18828,18829,18830,18831,18832,18833,18834,18835,18836,18837,18838,18839,18840,18841,18842,18843,18844,18845,18846,18847,18848,18849,18850,18851,18852,18853,18854,18855,18856,18857,18858,18859,18860,18861,18862,18863,18864,18865,18866,18867,18868,18869,18870,18871,18872,18873,18874,18875,18876,18877,18878,18879,18880,18881,18882,18883,18884,18885,18886,18887,18888,18889,18890,18891,18892,18893,18894,18895,18896,18897,18898,18899,18900,18901,18902,18903,18904,18905,18906,18907,18908,18909,18910,18911,18912,18913,18914,18915,18916,18917,18918,18919,18920,18921,18922,18923,18924,18925,18926,18927,18928,18929,18930,18931,18932,18933,18934,18935,18936,18937,18938,18939,18940,18941,18942,18943,18944,18945,18946,18947,18948,18949,18950,18951,18952,18953,18954,18955,18956,18957,18958,18959,18960,18961,18962,18963,18964,18965,18966,18967,18968,18969,18970,18971,18972,18973,18974,18975,18976,18977,18978,18979,18980,18981,18982,18983,18984,18985,18986,18987,18988,18989,18990,18991,18992,18993,18994,18995,18996,18997,18998,18999,19000,19001,19002,19003,19004,19005,19006,19007,19008,19009,19010,19011,19012,19013,19014,19015,19016,19017,19018,19019,19020,19021,19022,19023,19024,19025,19026,19027,19028,19029,19030,19031,19032,19033,19034,19035,19036,19037,19038,19039,19040,19041,19042,19043,19044,19045,19046,19047,19048,19049,19050,19051,19052,19053,19054,19055,19056,19057,19058,19059,19060,19061,19062,19063,19064,19065,19066,19067,19068,19069,19070,19071,19072,19073,19074,19075,19076,19077,19078,19079,19080,19081,19082,19083,19084,19085,19086,19087,19088,19089,19090,19091,19092,19093,19094,19095,19096,19097,19098,19099,19100,19101,19102,19103,19104,19105,19106,19107,19108,19109,19110,19111,19112,19113,19114,19115,19116,19117,19118,19119,19120,19121,19122,19123,19124,19125,19126,19127,19128,19129,19130,19131,19132,19133,19134,19135,19136,19137,19138,19139,19140,19141,19142,19143,19144,19145,19146,19147,19148,19149,19150,19151,19152,19153,19154,19155,19156,19157,19158,19159,19160,19161,19162,19163,19164,19165,19166,19167,19168,19169,19170,19171,19172,19173,19174,19175,19176,19177,19178,19179,19180,19181,19182,19183,19184,19185,19186,19187,19188,19189,19190,19191,19192,19193,19194,19195,19196,19197,19198,19199,19200,19201,19202,19203,19204,19205,19206,19207,19208,19209,19210,19211,19212,19213,19214,19215,19216,19217,19218,19219,19220,19221,19222,19223,19224,19225,19226,19227,19228,19229,19230,19231,19232,19233,19234,19235,19236,19237,19238,19239,19240,19241,19242,19243,19244,19245,19246,19247,19248,19249,19250,19251,19252,19253,19254,19255,19256,19257,19258,19259,19260,19261,19262,19263,19264,19265,19266,19267,19268,19269,19270,19271,19272,19273,19274,19275,19276,19277,19278,19279,19280,19281,19282,19283,19284,19285,19286,19287,19288,19289,19290,19291,19292,19293,19294,19295,19296,19297,19298,19299,19300,19301,19302,19303,19304,19305,19306,19307,19308,19309,19310,19311,19312,19313,19314,19315,19316,19317,19318,19319,19320,19321,19322,19323,19324,19325,19326,19327,19328,19329,19330,19331,19332,19333,19334,19335,19336,19337,19338,19339,19340,19341,19342,19343,19344,19345,19346,19347,19348,19349,19350,19351,19352,19353,19354,19355,19356,19357,19358,19359,19360,19361,19362,19363,19364,19365,19366,19367,19368,19369,19370,19371,19372,19373,19374,19375,19376,19377,19378,19379,19380,19381,19382,19383,19384,19385,19386,19387,19388,19389,19390,19391,19392,19393,19394,19395,19396,19397,19398,19399,19400,19401,19402,19403,19404,19405,19406,19407,19408,19409,19410,19411,19412,19413,19414,19415,19416,19417,19418,19419,19420,19421,19422,19423,19424,19425,19426,19427,19428,19429,19430,19431,19432,19433,19434,19435,19436,19437,19438,19439,19440,19441,19442,19443,19444,19445,19446,19447,19448,19449,19450,19451,19452,19453,19454,19455,19456,19457,19458,19459,19460,19461,19462,19463,19464,19465,19466,19467,19468,19469,19470,19471,19472,19473,19474,19475,19476,19477,19478,19479,19480,19481,19482,19483,19484,19485,19486,19487,19488,19489,19490,19491,19492,19493,19494,19495,19496,19497,19498,19499,19500,19501,19502,19503,19504,19505,19506,19507,19508,19509,19510,19511,19512,19513,19514,19515,19516,19517,19518,19519,19520,19521,19522,19523,19524,19525,19526,19527,19528,19529,19530,19531,19532,19533,19534,19535,19536,19537,19538,19539,19540,19541,19542,19543,19544,19545,19546,19547,19548,19549,19550,19551,19552,19553,19554,19555,19556,19557,19558,19559,19560,19561,19562,19563,19564,19565,19566,19567,19568,19569,19570,19571,19572,19573,19574,19575,19576,19577,19578,19579,19580,19581,19582,19583,19584,19585,19586,19587,19588,19589,19590,19591,19592,19593,19594,19595,19596,19597,19598,19599,19600,19601,19602,19603,19604,19605,19606,19607,19608,19609,19610,19611,19612,19613,19614,19615,19616,19617,19618,19619,19620,19621,19622,19623,19624,19625,19626,19627,19628,19629,19630,19631,19632,19633,19634,19635,19636,19637,19638,19639,19640,19641,19642,19643,19644,19645,19646,19647,19648,19649,19650,19651,19652,19653,19654,19655,19656,19657,19658,19659,19660,19661,19662,19663,19664,19665,19666,19667,19668,19669,19670,19671,19672,19673,19674,19675,19676,19677,19678,19679,19680,19681,19682,19683,19684,19685,19686,19687,19688,19689,19690,19691,19692,19693,19694,19695,19696,19697,19698,19699,19700,19701,19702,19703,19704,19705,19706,19707,19708,19709,19710,19711,19712,19713,19714,19715,19716,19717,19718,19719,19720,19721,19722,19723,19724,19725,19726,19727,19728,19729,19730,19731,19732,19733,19734,19735,19736,19737,19738,19739,19740,19741,19742,19743,19744,19745,19746,19747,19748,19749,19750,19751,19752,19753,19754,19755,19756,19757,19758,19759,19760,19761,19762,19763,19764,19765,19766,19767,19768,19769,19770,19771,19772,19773,19774,19775,19776,19777,19778,19779,19780,19781,19782,19783,19784,19785,19786,19787,19788,19789,19790,19791,19792,19793,19794,19795,19796,19797,19798,19799,19800,19801,19802,19803,19804,19805,19806,19807,19808,19809,19810,19811,19812,19813,19814,19815,19816,19817,19818,19819,19820,19821,19822,19823,19824,19825,19826,19827,19828,19829,19830,19831,19832,19833,19834,19835,19836,19837,19838,19839,19840,19841,19842,19843,19844,19845,19846,19847,19848,19849,19850,19851,19852,19853,19854,19855,19856,19857,19858,19859,19860,19861,19862,19863,19864,19865,19866,19867,19868,19869,19870,19871,19872,19873,19874,19875,19876,19877,19878,19879,19880,19881,19882,19883,19884,19885,19886,19887,19888,19889,19890,19891,19892,19893,19894,19895,19896,19897,19898,19899,19900,19901,19902,19903,19904,19905,19906,19907,19908,19909,19910,19911,19912,19913,19914,19915,19916,19917,19918,19919,19920,19921,19922,19923,19924,19925,19926,19927,19928,19929,19930,19931,19932,19933,19934,19935,19936,19937,19938,19939,19940,19941,19942,19943,19944,19945,19946,19947,19948,19949,19950,19951,19952,19953,19954,19955,19956,19957,19958,19959,19960,19961,19962,19963,19964,19965,19966,19967,19968,19969,19970,19971,19972,19973,19974,19975,19976,19977,19978,19979,19980,19981,19982,19983,19984,19985,19986,19987,19988,19989,19990,19991,19992,19993,19994,19995,19996,19997,19998,19999,20000,20001,20002,20003,20004,20005,20006,20007,20008,20009,20010,20011,20012,20013,20014,20015,20016,20017,20018,20019,20020,20021,20022,20023,20024,20025,20026,20027,20028,20029,20030,20031,20032,20033,20034,20035,20036,20037,20038,20039,20040,20041,20042,20043,20044,20045,20046,20047,20048,20049,20050,20051,20052,20053,20054,20055,20056,20057,20058,20059,20060,20061,20062,20063,20064,20065,20066,20067,20068,20069,20070,20071,20072,20073,20074,20075,20076,20077,20078,20079,20080,20081,20082,20083,20084,20085,20086,20087,20088,20089,20090,20091,20092,20093,20094,20095,20096,20097,20098,20099,20100,20101,20102,20103,20104,20105,20106,20107,20108,20109,20110,20111,20112,20113,20114,20115,20116,20117,20118,20119,20120,20121,20122,20123,20124,20125,20126,20127,20128,20129,20130,20131,20132,20133,20134,20135,20136,20137,20138,20139,20140,20141,20142,20143,20144,20145,20146,20147,20148,20149,20150,20151,20152,20153,20154,20155,20156,20157,20158,20159,20160,20161,20162,20163,20164,20165,20166,20167,20168,20169,20170,20171,20172,20173,20174,20175,20176,20177,20178,20179,20180,20181,20182,20183,20184,20185,20186,20187,20188,20189,20190,20191,20192,20193,20194,20195,20196,20197,20198,20199,20200,20201,20202,20203,20204,20205,20206,20207,20208,20209,20210,20211,20212,20213,20214,20215,20216,20217,20218,20219,20220,20221,20222,20223,20224,20225,20226,20227,20228,20229,20230,20231,20232,20233,20234,20235,20236,20237,20238,20239,20240,20241,20242,20243,20244,20245,20246,20247,20248,20249,20250,20251,20252,20253,20254,20255,20256,20257,20258,20259,20260,20261,20262,20263,20264,20265,20266,20267,20268,20269,20270,20271,20272,20273,20274,20275,20276,20277,20278,20279,20280,20281,20282,20283,20284,20285,20286,20287,20288,20289,20290,20291,20292,20293,20294,20295,20296,20297,20298,20299,20300,20301,20302,20303,20304,20305,20306,20307,20308,20309,20310,20311,20312,20313,20314,20315,20316,20317,20318,20319,20320,20321,20322,20323,20324,20325,20326,20327,20328,20329,20330,20331,20332,20333,20334,20335,20336,20337,20338,20339,20340,20341,20342,20343,20344,20345,20346,20347,20348,20349,20350,20351,20352,20353,20354,20355,20356,20357,20358,20359,20360,20361,20362,20363,20364,20365,20366,20367,20368,20369,20370,20371,20372,20373,20374,20375,20376,20377,20378,20379,20380,20381,20382,20383,20384,20385,20386,20387,20388,20389,20390,20391,20392,20393,20394,20395,20396,20397,20398,20399,20400,20401,20402,20403,20404,20405,20406,20407,20408,20409,20410,20411,20412,20413,20414,20415,20416,20417,20418,20419,20420,20421,20422,20423,20424,20425,20426,20427,20428,20429,20430,20431,20432,20433,20434,20435,20436,20437,20438,20439,20440,20441,20442,20443,20444,20445,20446,20447,20448,20449,20450,20451,20452,20453,20454,20455,20456,20457,20458,20459,20460,20461,20462,20463,20464,20465,20466,20467,20468,20469,20470,20471,20472,20473,20474,20475,20476,20477,20478,20479,20480,20481,20482,20483,20484,20485,20486,20487,20488,20489,20490,20491,20492,20493,20494,20495,20496,20497,20498,20499,20500,20501,20502,20503,20504,20505,20506,20507,20508,20509,20510,20511,20512,20513,20514,20515,20516,20517,20518,20519,20520,20521,20522,20523,20524,20525,20526,20527,20528,20529,20530,20531,20532,20533,20534,20535,20536,20537,20538,20539,20540,20541,20542,20543,20544,20545,20546,20547,20548,20549,20550,20551,20552,20553,20554,20555,20556,20557,20558,20559,20560,20561,20562,20563,20564,20565,20566,20567,20568,20569,20570,20571,20572,20573,20574,20575,20576,20577,20578,20579,20580,20581,20582,20583,20584,20585,20586,20587,20588,20589,20590,20591,20592,20593,20594,20595,20596,20597,20598,20599,20600,20601,20602,20603,20604,20605,20606,20607,20608,20609,20610,20611,20612,20613,20614,20615,20616,20617,20618,20619,20620,20621,20622,20623,20624,20625,20626,20627,20628,20629,20630,20631,20632,20633,20634,20635,20636,20637,20638,20639,20640,20641,20642,20643,20644,20645,20646,20647,20648,20649,20650,20651,20652,20653,20654,20655,20656,20657,20658,20659,20660,20661,20662,20663,20664,20665,20666,20667,20668,20669,20670,20671,20672,20673,20674,20675,20676,20677,20678,20679,20680,20681,20682,20683,20684,20685,20686,20687,20688,20689,20690,20691,20692,20693,20694,20695,20696,20697,20698,20699,20700,20701,20702,20703,20704,20705,20706,20707,20708,20709,20710,20711,20712,20713,20714,20715,20716,20717,20718,20719,20720,20721,20722,20723,20724,20725,20726,20727,20728,20729,20730,20731,20732,20733,20734,20735,20736,20737,20738,20739,20740,20741,20742,20743,20744,20745,20746,20747,20748,20749,20750,20751,20752,20753,20754,20755,20756,20757,20758,20759,20760,20761,20762,20763,20764,20765,20766,20767,20768,20769,20770,20771,20772,20773,20774,20775,20776,20777,20778,20779,20780,20781,20782,20783,20784,20785,20786,20787,20788,20789,20790,20791,20792,20793,20794,20795,20796,20797,20798,20799,20800,20801,20802,20803,20804,20805,20806,20807,20808,20809,20810,20811,20812,20813,20814,20815,20816,20817,20818,20819,20820,20821,20822,20823,20824,20825,20826,20827,20828,20829,20830,20831,20832,20833,20834,20835,20836,20837,20838,20839,20840,20841,20842,20843,20844,20845,20846,20847,20848,20849,20850,20851,20852,20853,20854,20855,20856,20857,20858,20859,20860,20861,20862,20863,20864,20865,20866,20867,20868,20869,20870,20871,20872,20873,20874,20875,20876,20877,20878,20879,20880,20881,20882,20883,20884,20885,20886,20887,20888,20889,20890,20891,20892,20893,20894,20895,20896,20897,20898,20899,20900,20901,20902,20903,20904,20905,20906,20907,20908,20909,20910,20911,20912,20913,20914,20915,20916,20917,20918,20919,20920,20921,20922,20923,20924,20925,20926,20927,20928,20929,20930,20931,20932,20933,20934,20935,20936,20937,20938,20939,20940,20941,20942,20943,20944,20945,20946,20947,20948,20949,20950,20951,20952,20953,20954,20955,20956,20957,20958,20959,20960,20961,20962,20963,20964,20965,20966,20967,20968,20969,20970,20971,20972,20973,20974,20975,20976,20977,20978,20979,20980,20981,20982,20983,20984,20985,20986,20987,20988,20989,20990,20991,20992,20993,20994,20995,20996,20997,20998,20999,21000,21001,21002,21003,21004,21005,21006,21007,21008,21009,21010,21011,21012,21013,21014,21015,21016,21017,21018,21019,21020,21021,21022,21023,21024,21025,21026,21027,21028,21029,21030,21031,21032,21033,21034,21035,21036,21037,21038,21039,21040,21041,21042,21043,21044,21045,21046,21047,21048,21049,21050,21051,21052,21053,21054,21055,21056,21057,21058,21059,21060,21061,21062,21063,21064,21065,21066,21067,21068,21069,21070,21071,21072,21073,21074,21075,21076,21077,21078,21079,21080,21081,21082,21083,21084,21085,21086,21087,21088,21089,21090,21091,21092,21093,21094,21095,21096,21097,21098,21099,21100,21101,21102,21103,21104,21105,21106,21107,21108,21109,21110,21111,21112,21113,21114,21115,21116,21117,21118,21119,21120,21121,21122,21123,21124,21125,21126,21127,21128,21129,21130,21131,21132,21133,21134,21135,21136,21137,21138,21139,21140,21141,21142,21143,21144,21145,21146,21147,21148,21149,21150,21151,21152,21153,21154,21155,21156,21157,21158,21159,21160,21161,21162,21163,21164,21165,21166,21167,21168,21169,21170,21171,21172,21173,21174,21175,21176,21177,21178,21179,21180,21181,21182,21183,21184,21185,21186,21187,21188,21189,21190,21191,21192,21193,21194,21195,21196,21197,21198,21199,21200,21201,21202,21203,21204,21205,21206,21207,21208,21209,21210,21211,21212,21213,21214,21215,21216,21217,21218,21219,21220,21221,21222,21223,21224,21225,21226,21227,21228,21229,21230,21231,21232,21233,21234,21235,21236,21237,21238,21239,21240,21241,21242,21243,21244,21245,21246,21247,21248,21249,21250,21251,21252,21253,21254,21255,21256,21257,21258,21259,21260,21261,21262,21263,21264,21265,21266,21267,21268,21269,21270,21271,21272,21273,21274,21275,21276,21277,21278,21279,21280,21281,21282,21283,21284,21285,21286,21287,21288,21289,21290,21291,21292,21293,21294,21295,21296,21297,21298,21299,21300,21301,21302,21303,21304,21305,21306,21307,21308,21309,21310,21311,21312,21313,21314,21315,21316,21317,21318,21319,21320,21321,21322,21323,21324,21325,21326,21327,21328,21329,21330,21331,21332,21333,21334,21335,21336,21337,21338,21339,21340,21341,21342,21343,21344,21345,21346,21347,21348,21349,21350,21351,21352,21353,21354,21355,21356,21357,21358,21359,21360,21361,21362,21363,21364,21365,21366,21367,21368,21369,21370,21371,21372,21373,21374,21375,21376,21377,21378,21379,21380,21381,21382,21383,21384,21385,21386,21387,21388,21389,21390,21391,21392,21393,21394,21395,21396,21397,21398,21399,21400,21401,21402,21403,21404,21405,21406,21407,21408,21409,21410,21411,21412,21413,21414,21415,21416,21417,21418,21419,21420,21421,21422,21423,21424,21425,21426,21427,21428,21429,21430,21431,21432,21433,21434,21435,21436,21437,21438,21439,21440,21441,21442,21443,21444,21445,21446,21447,21448,21449,21450,21451,21452,21453,21454,21455,21456,21457,21458,21459,21460,21461,21462,21463,21464,21465,21466,21467,21468,21469,21470,21471,21472,21473,21474,21475,21476,21477,21478,21479,21480,21481,21482,21483,21484,21485,21486,21487,21488,21489,21490,21491,21492,21493,21494,21495,21496,21497,21498,21499,21500,21501,21502,21503,21504,21505,21506,21507,21508,21509,21510,21511,21512,21513,21514,21515,21516,21517,21518,21519,21520,21521,21522,21523,21524,21525,21526,21527,21528,21529,21530,21531,21532,21533,21534,21535,21536,21537,21538,21539,21540,21541,21542,21543,21544,21545,21546,21547,21548,21549,21550,21551,21552,21553,21554,21555,21556,21557,21558,21559,21560,21561,21562,21563,21564,21565,21566,21567,21568,21569,21570,21571,21572,21573,21574,21575,21576,21577,21578,21579,21580,21581,21582,21583,21584,21585,21586,21587,21588,21589,21590,21591,21592,21593,21594,21595,21596,21597,21598,21599,21600,21601,21602,21603,21604,21605,21606,21607,21608,21609,21610,21611,21612,21613,21614,21615,21616,21617,21618,21619,21620,21621,21622,21623,21624,21625,21626,21627,21628,21629,21630,21631,21632,21633,21634,21635,21636,21637,21638,21639,21640,21641,21642,21643,21644,21645,21646,21647,21648,21649,21650,21651,21652,21653,21654,21655,21656,21657,21658,21659,21660,21661,21662,21663,21664,21665,21666,21667,21668,21669,21670,21671,21672,21673,21674,21675,21676,21677,21678,21679,21680,21681,21682,21683,21684,21685,21686,21687,21688,21689,21690,21691,21692,21693,21694,21695,21696,21697,21698,21699,21700,21701,21702,21703,21704,21705,21706,21707,21708,21709,21710,21711,21712,21713,21714,21715,21716,21717,21718,21719,21720,21721,21722,21723,21724,21725,21726,21727,21728,21729,21730,21731,21732,21733,21734,21735,21736,21737,21738,21739,21740,21741,21742,21743,21744,21745,21746,21747,21748,21749,21750,21751,21752,21753,21754,21755,21756,21757,21758,21759,21760,21761,21762,21763,21764,21765,21766,21767,21768,21769,21770,21771,21772,21773,21774,21775,21776,21777,21778,21779,21780,21781,21782,21783,21784,21785,21786,21787,21788,21789,21790,21791,21792,21793,21794,21795,21796,21797,21798,21799,21800,21801,21802,21803,21804,21805,21806,21807,21808,21809,21810,21811,21812,21813,21814,21815,21816,21817,21818,21819,21820,21821,21822,21823,21824,21825,21826,21827,21828,21829,21830,21831,21832,21833,21834,21835,21836,21837,21838,21839,21840,21841,21842,21843,21844,21845,21846,21847,21848,21849,21850,21851,21852,21853,21854,21855,21856,21857,21858,21859,21860,21861,21862,21863,21864,21865,21866,21867,21868,21869,21870,21871,21872,21873,21874,21875,21876,21877,21878,21879,21880,21881,21882,21883,21884,21885,21886,21887,21888,21889,21890,21891,21892,21893,21894,21895,21896,21897,21898,21899,21900,21901,21902,21903,21904,21905,21906,21907,21908,21909,21910,21911,21912,21913,21914,21915,21916,21917,21918,21919,21920,21921,21922,21923,21924,21925,21926,21927,21928,21929,21930,21931,21932,21933,21934,21935,21936,21937,21938,21939,21940,21941,21942,21943,21944,21945,21946,21947,21948,21949,21950,21951,21952,21953,21954,21955,21956,21957,21958,21959,21960,21961,21962,21963,21964,21965,21966,21967,21968,21969,21970,21971,21972,21973,21974,21975,21976,21977,21978,21979,21980,21981,21982,21983,21984,21985,21986,21987,21988,21989,21990,21991,21992,21993,21994,21995,21996,21997,21998,21999,22000,22001,22002,22003,22004,22005,22006,22007,22008,22009,22010,22011,22012,22013,22014,22015,22016,22017,22018,22019,22020,22021,22022,22023,22024,22025,22026,22027,22028,22029,22030,22031,22032,22033,22034,22035,22036,22037,22038,22039,22040,22041,22042,22043,22044,22045,22046,22047,22048,22049,22050,22051,22052,22053,22054,22055,22056,22057,22058,22059,22060,22061,22062,22063,22064,22065,22066,22067,22068,22069,22070,22071,22072,22073,22074,22075,22076,22077,22078,22079,22080,22081,22082,22083,22084,22085,22086,22087,22088,22089,22090,22091,22092,22093,22094,22095,22096,22097,22098,22099,22100,22101,22102,22103,22104,22105,22106,22107,22108,22109,22110,22111,22112,22113,22114,22115,22116,22117,22118,22119,22120,22121,22122,22123,22124,22125,22126,22127,22128,22129,22130,22131,22132,22133,22134,22135,22136,22137,22138,22139,22140,22141,22142,22143,22144,22145,22146,22147,22148,22149,22150,22151,22152,22153,22154,22155,22156,22157,22158,22159,22160,22161,22162,22163,22164,22165,22166,22167,22168,22169,22170,22171,22172,22173,22174,22175,22176,22177,22178,22179,22180,22181,22182,22183,22184,22185,22186,22187,22188,22189,22190,22191,22192,22193,22194,22195,22196,22197,22198,22199,22200,22201,22202,22203,22204,22205,22206,22207,22208,22209,22210,22211,22212,22213,22214,22215,22216,22217,22218,22219,22220,22221,22222,22223,22224,22225,22226,22227,22228,22229,22230,22231,22232,22233,22234,22235,22236,22237,22238,22239,22240,22241,22242,22243,22244,22245,22246,22247,22248,22249,22250,22251,22252,22253,22254,22255,22256,22257,22258,22259,22260,22261,22262,22263,22264,22265,22266,22267,22268,22269,22270,22271,22272,22273,22274,22275,22276,22277,22278,22279,22280,22281,22282,22283,22284,22285,22286,22287,22288,22289,22290,22291,22292,22293,22294,22295,22296,22297,22298,22299,22300,22301,22302,22303,22304,22305,22306,22307,22308,22309,22310,22311,22312,22313,22314,22315,22316,22317,22318,22319,22320,22321,22322,22323,22324,22325,22326,22327,22328,22329,22330,22331,22332,22333,22334,22335,22336,22337,22338,22339,22340,22341,22342,22343,22344,22345,22346,22347,22348,22349,22350,22351,22352,22353,22354,22355,22356,22357,22358,22359,22360,22361,22362,22363,22364,22365,22366,22367,22368,22369,22370,22371,22372,22373,22374,22375,22376,22377,22378,22379,22380,22381,22382,22383,22384,22385,22386,22387,22388,22389,22390,22391,22392,22393,22394,22395,22396,22397,22398,22399,22400,22401,22402,22403,22404,22405,22406,22407,22408,22409,22410,22411,22412,22413,22414,22415,22416,22417,22418,22419,22420,22421,22422,22423,22424,22425,22426,22427,22428,22429,22430,22431,22432,22433,22434,22435,22436,22437,22438,22439,22440,22441,22442,22443,22444,22445,22446,22447,22448,22449,22450,22451,22452,22453,22454,22455,22456,22457,22458,22459,22460,22461,22462,22463,22464,22465,22466,22467,22468,22469,22470,22471,22472,22473,22474,22475,22476,22477,22478,22479,22480,22481,22482,22483,22484,22485,22486,22487,22488,22489,22490,22491,22492,22493,22494,22495,22496,22497,22498,22499,22500,22501,22502,22503,22504,22505,22506,22507,22508,22509,22510,22511,22512,22513,22514,22515,22516,22517,22518,22519,22520,22521,22522,22523,22524,22525,22526,22527,22528,22529,22530,22531,22532,22533,22534,22535,22536,22537,22538,22539,22540,22541,22542,22543,22544,22545,22546,22547,22548,22549,22550,22551,22552,22553,22554,22555,22556,22557,22558,22559,22560,22561,22562,22563,22564,22565,22566,22567,22568,22569,22570,22571,22572,22573,22574,22575,22576,22577,22578,22579,22580,22581,22582,22583,22584,22585,22586,22587,22588,22589,22590,22591,22592,22593,22594,22595,22596,22597,22598,22599,22600,22601,22602,22603,22604,22605,22606,22607,22608,22609,22610,22611,22612,22613,22614,22615,22616,22617,22618,22619,22620,22621,22622,22623,22624,22625,22626,22627,22628,22629,22630,22631,22632,22633,22634,22635,22636,22637,22638,22639,22640,22641,22642,22643,22644,22645,22646,22647,22648,22649,22650,22651,22652,22653,22654,22655,22656,22657,22658,22659,22660,22661,22662,22663,22664,22665,22666,22667,22668,22669,22670,22671,22672,22673,22674,22675,22676,22677,22678,22679,22680,22681,22682,22683,22684,22685,22686,22687,22688,22689,22690,22691,22692,22693,22694,22695,22696,22697,22698,22699,22700,22701,22702,22703,22704,22705,22706,22707,22708,22709,22710,22711,22712,22713,22714,22715,22716,22717,22718,22719,22720,22721,22722,22723,22724,22725,22726,22727,22728,22729,22730,22731,22732,22733,22734,22735,22736,22737,22738,22739,22740,22741,22742,22743,22744,22745,22746,22747,22748,22749,22750,22751,22752,22753,22754,22755,22756,22757,22758,22759,22760,22761,22762,22763,22764,22765,22766,22767,22768,22769,22770,22771,22772,22773,22774,22775,22776,22777,22778,22779,22780,22781,22782,22783,22784,22785,22786,22787,22788,22789,22790,22791,22792,22793,22794,22795,22796,22797,22798,22799,22800,22801,22802,22803,22804,22805,22806,22807,22808,22809,22810,22811,22812,22813,22814,22815,22816,22817,22818,22819,22820,22821,22822,22823,22824,22825,22826,22827,22828,22829,22830,22831,22832,22833,22834,22835,22836,22837,22838,22839,22840,22841,22842,22843,22844,22845,22846,22847,22848,22849,22850,22851,22852,22853,22854,22855,22856,22857,22858,22859,22860,22861,22862,22863,22864,22865,22866,22867,22868,22869,22870,22871,22872,22873,22874,22875,22876,22877,22878,22879,22880,22881,22882,22883,22884,22885,22886,22887,22888,22889,22890,22891,22892,22893,22894,22895,22896,22897,22898,22899,22900,22901,22902,22903,22904,22905,22906,22907,22908,22909,22910,22911,22912,22913,22914,22915,22916,22917,22918,22919,22920,22921,22922,22923,22924,22925,22926,22927,22928,22929,22930,22931,22932,22933,22934,22935,22936,22937,22938,22939,22940,22941,22942,22943,22944,22945,22946,22947,22948,22949,22950,22951,22952,22953,22954,22955,22956,22957,22958,22959,22960,22961,22962,22963,22964,22965,22966,22967,22968,22969,22970,22971,22972,22973,22974,22975,22976,22977,22978,22979,22980,22981,22982,22983,22984,22985,22986,22987,22988,22989,22990,22991,22992,22993,22994,22995,22996,22997,22998,22999,23000,23001,23002,23003,23004,23005,23006,23007,23008,23009,23010,23011,23012,23013,23014,23015,23016,23017,23018,23019,23020,23021,23022,23023,23024,23025,23026,23027,23028,23029,23030,23031,23032,23033,23034,23035,23036,23037,23038,23039,23040,23041,23042,23043,23044,23045,23046,23047,23048,23049,23050,23051,23052,23053,23054,23055,23056,23057,23058,23059,23060,23061,23062,23063,23064,23065,23066,23067,23068,23069,23070,23071,23072,23073,23074,23075,23076,23077,23078,23079,23080,23081,23082,23083,23084,23085,23086,23087,23088,23089,23090,23091,23092,23093,23094,23095,23096,23097,23098,23099,23100,23101,23102,23103,23104,23105,23106,23107,23108,23109,23110,23111,23112,23113,23114,23115,23116,23117,23118,23119,23120,23121,23122,23123,23124,23125,23126,23127,23128,23129,23130,23131,23132,23133,23134,23135,23136,23137,23138,23139,23140,23141,23142,23143,23144,23145,23146,23147,23148,23149,23150,23151,23152,23153,23154,23155,23156,23157,23158,23159,23160,23161,23162,23163,23164,23165,23166,23167,23168,23169,23170,23171,23172,23173,23174,23175,23176,23177,23178,23179,23180,23181,23182,23183,23184,23185,23186,23187,23188,23189,23190,23191,23192,23193,23194,23195,23196,23197,23198,23199,23200,23201,23202,23203,23204,23205,23206,23207,23208,23209,23210,23211,23212,23213,23214,23215,23216,23217,23218,23219,23220,23221,23222,23223,23224,23225,23226,23227,23228,23229,23230,23231,23232,23233,23234,23235,23236,23237,23238,23239,23240,23241,23242,23243,23244,23245,23246,23247,23248,23249,23250,23251,23252,23253,23254,23255,23256,23257,23258,23259,23260,23261,23262,23263,23264,23265,23266,23267,23268,23269,23270,23271,23272,23273,23274,23275,23276,23277,23278,23279,23280,23281,23282,23283,23284,23285,23286,23287,23288,23289,23290,23291,23292,23293,23294,23295,23296,23297,23298,23299,23300,23301,23302,23303,23304,23305,23306,23307,23308,23309,23310,23311,23312,23313,23314,23315,23316,23317,23318,23319,23320,23321,23322,23323,23324,23325,23326,23327,23328,23329,23330,23331,23332,23333,23334,23335,23336,23337,23338,23339,23340,23341,23342,23343,23344,23345,23346,23347,23348,23349,23350,23351,23352,23353,23354,23355,23356,23357,23358,23359,23360,23361,23362,23363,23364,23365,23366,23367,23368,23369,23370,23371,23372,23373,23374,23375,23376,23377,23378,23379,23380,23381,23382,23383,23384,23385,23386,23387,23388,23389,23390,23391,23392,23393,23394,23395,23396,23397,23398,23399,23400,23401,23402,23403,23404,23405,23406,23407,23408,23409,23410,23411,23412,23413,23414,23415,23416,23417,23418,23419,23420,23421,23422,23423,23424,23425,23426,23427,23428,23429,23430,23431,23432,23433,23434,23435,23436,23437,23438,23439,23440,23441,23442,23443,23444,23445,23446,23447,23448,23449,23450,23451,23452,23453,23454,23455,23456,23457,23458,23459,23460,23461,23462,23463,23464,23465,23466,23467,23468,23469,23470,23471,23472,23473,23474,23475,23476,23477,23478,23479,23480,23481,23482,23483,23484,23485,23486,23487,23488,23489,23490,23491,23492,23493,23494,23495,23496,23497,23498,23499,23500,23501,23502,23503,23504,23505,23506,23507,23508,23509,23510,23511,23512,23513,23514,23515,23516,23517,23518,23519,23520,23521,23522,23523,23524,23525,23526,23527,23528,23529,23530,23531,23532,23533,23534,23535,23536,23537,23538,23539,23540,23541,23542,23543,23544,23545,23546,23547,23548,23549,23550,23551,23552,23553,23554,23555,23556,23557,23558,23559,23560,23561,23562,23563,23564,23565,23566,23567,23568,23569,23570,23571,23572,23573,23574,23575,23576,23577,23578,23579,23580,23581,23582,23583,23584,23585,23586,23587,23588,23589,23590,23591,23592,23593,23594,23595,23596,23597,23598,23599,23600,23601,23602,23603,23604,23605,23606,23607,23608,23609,23610,23611,23612,23613,23614,23615,23616,23617,23618,23619,23620,23621,23622,23623,23624,23625,23626,23627,23628,23629,23630,23631,23632,23633,23634,23635,23636,23637,23638,23639,23640,23641,23642,23643,23644,23645,23646,23647,23648,23649,23650,23651,23652,23653,23654,23655,23656,23657,23658,23659,23660,23661,23662,23663,23664,23665,23666,23667,23668,23669,23670,23671,23672,23673,23674,23675,23676,23677,23678,23679,23680,23681,23682,23683,23684,23685,23686,23687,23688,23689,23690,23691,23692,23693,23694,23695,23696,23697,23698,23699,23700,23701,23702,23703,23704,23705,23706,23707,23708,23709,23710,23711,23712,23713,23714,23715,23716,23717,23718,23719,23720,23721,23722,23723,23724,23725,23726,23727,23728,23729,23730,23731,23732,23733,23734,23735,23736,23737,23738,23739,23740,23741,23742,23743,23744,23745,23746,23747,23748,23749,23750,23751,23752,23753,23754,23755,23756,23757,23758,23759,23760,23761,23762,23763,23764,23765,23766,23767,23768,23769,23770,23771,23772,23773,23774,23775,23776,23777,23778,23779,23780,23781,23782,23783,23784,23785,23786,23787,23788,23789,23790,23791,23792,23793,23794,23795,23796,23797,23798,23799,23800,23801,23802,23803,23804,23805,23806,23807,23808,23809,23810,23811,23812,23813,23814,23815,23816,23817,23818,23819,23820,23821,23822,23823,23824,23825,23826,23827,23828,23829,23830,23831,23832,23833,23834,23835,23836,23837,23838,23839,23840,23841,23842,23843,23844,23845,23846,23847,23848,23849,23850,23851,23852,23853,23854,23855,23856,23857,23858,23859,23860,23861,23862,23863,23864,23865,23866,23867,23868,23869,23870,23871,23872,23873,23874,23875,23876,23877,23878,23879,23880,23881,23882,23883,23884,23885,23886,23887,23888,23889,23890,23891,23892,23893,23894,23895,23896,23897,23898,23899,23900,23901,23902,23903,23904,23905,23906,23907,23908,23909,23910,23911,23912,23913,23914,23915,23916,23917,23918,23919,23920,23921,23922,23923,23924,23925,23926,23927,23928,23929,23930,23931,23932,23933,23934,23935,23936,23937,23938,23939,23940,23941,23942,23943,23944,23945,23946,23947,23948,23949,23950,23951,23952,23953,23954,23955,23956,23957,23958,23959,23960,23961,23962,23963,23964,23965,23966,23967,23968,23969,23970,23971,23972,23973,23974,23975,23976,23977,23978,23979,23980,23981,23982,23983,23984,23985,23986,23987,23988,23989,23990,23991,23992,23993,23994,23995,23996,23997,23998,23999,24000,24001,24002,24003,24004,24005,24006,24007,24008,24009,24010,24011,24012,24013,24014,24015,24016,24017,24018,24019,24020,24021,24022,24023,24024,24025,24026,24027,24028,24029,24030,24031,24032,24033,24034,24035,24036,24037,24038,24039,24040,24041,24042,24043,24044,24045,24046,24047,24048,24049,24050,24051,24052,24053,24054,24055,24056,24057,24058,24059,24060,24061,24062,24063,24064,24065,24066,24067,24068,24069,24070,24071,24072,24073,24074,24075,24076,24077,24078,24079,24080,24081,24082,24083,24084,24085,24086,24087,24088,24089,24090,24091,24092,24093,24094,24095,24096,24097,24098,24099,24100,24101,24102,24103,24104,24105,24106,24107,24108,24109,24110,24111,24112,24113,24114,24115,24116,24117,24118,24119,24120,24121,24122,24123,24124,24125,24126,24127,24128,24129,24130,24131,24132,24133,24134,24135,24136,24137,24138,24139,24140,24141,24142,24143,24144,24145,24146,24147,24148,24149,24150,24151,24152,24153,24154,24155,24156,24157,24158,24159,24160,24161,24162,24163,24164,24165,24166,24167,24168,24169,24170,24171,24172,24173,24174,24175,24176,24177,24178,24179,24180,24181,24182,24183,24184,24185,24186,24187,24188,24189,24190,24191,24192,24193,24194,24195,24196,24197,24198,24199,24200,24201,24202,24203,24204,24205,24206,24207,24208,24209,24210,24211,24212,24213,24214,24215,24216,24217,24218,24219,24220,24221,24222,24223,24224,24225,24226,24227,24228,24229,24230,24231,24232,24233,24234,24235,24236,24237,24238,24239,24240,24241,24242,24243,24244,24245,24246,24247,24248,24249,24250,24251,24252,24253,24254,24255,24256,24257,24258,24259,24260,24261,24262,24263,24264,24265,24266,24267,24268,24269,24270,24271,24272,24273,24274,24275,24276,24277,24278,24279,24280,24281,24282,24283,24284,24285,24286,24287,24288,24289,24290,24291,24292,24293,24294,24295,24296,24297,24298,24299,24300,24301,24302,24303,24304,24305,24306,24307,24308,24309,24310,24311,24312,24313,24314,24315,24316,24317,24318,24319,24320,24321,24322,24323,24324,24325,24326,24327,24328,24329,24330,24331,24332,24333,24334,24335,24336,24337,24338,24339,24340,24341,24342,24343,24344,24345,24346,24347,24348,24349,24350,24351,24352,24353,24354,24355,24356,24357,24358,24359,24360,24361,24362,24363,24364,24365,24366,24367,24368,24369,24370,24371,24372,24373,24374,24375,24376,24377,24378,24379,24380,24381,24382,24383,24384,24385,24386,24387,24388,24389,24390,24391,24392,24393,24394,24395,24396,24397,24398,24399,24400,24401,24402,24403,24404,24405,24406,24407,24408,24409,24410,24411,24412,24413,24414,24415,24416,24417,24418,24419,24420,24421,24422,24423,24424,24425,24426,24427,24428,24429,24430,24431,24432,24433,24434,24435,24436,24437,24438,24439,24440,24441,24442,24443,24444,24445,24446,24447,24448,24449,24450,24451,24452,24453,24454,24455,24456,24457,24458,24459,24460,24461,24462,24463,24464,24465,24466,24467,24468,24469,24470,24471,24472,24473,24474,24475,24476,24477,24478,24479,24480,24481,24482,24483,24484,24485,24486,24487,24488,24489,24490,24491,24492,24493,24494,24495,24496,24497,24498,24499,24500,24501,24502,24503,24504,24505,24506,24507,24508,24509,24510,24511,24512,24513,24514,24515,24516,24517,24518,24519,24520,24521,24522,24523,24524,24525,24526,24527,24528,24529,24530,24531,24532,24533,24534,24535,24536,24537,24538,24539,24540,24541,24542,24543,24544,24545,24546,24547,24548,24549,24550,24551,24552,24553,24554,24555,24556,24557,24558,24559,24560,24561,24562,24563,24564,24565,24566,24567,24568,24569,24570,24571,24572,24573,24574,24575,24576,24577,24578,24579,24580,24581,24582,24583,24584,24585,24586,24587,24588,24589,24590,24591,24592,24593,24594,24595,24596,24597,24598,24599,24600,24601,24602,24603,24604,24605,24606,24607,24608,24609,24610,24611,24612,24613,24614,24615,24616,24617,24618,24619,24620,24621,24622,24623,24624,24625,24626,24627,24628,24629,24630,24631,24632,24633,24634,24635,24636,24637,24638,24639,24640,24641,24642,24643,24644,24645,24646,24647,24648,24649,24650,24651,24652,24653,24654,24655,24656,24657,24658,24659,24660,24661,24662,24663,24664,24665,24666,24667,24668,24669,24670,24671,24672,24673,24674,24675,24676,24677,24678,24679,24680,24681,24682,24683,24684,24685,24686,24687,24688,24689,24690,24691,24692,24693,24694,24695,24696,24697,24698,24699,24700,24701,24702,24703,24704,24705,24706,24707,24708,24709,24710,24711,24712,24713,24714,24715,24716,24717,24718,24719,24720,24721,24722,24723,24724,24725,24726,24727,24728,24729,24730,24731,24732,24733,24734,24735,24736,24737,24738,24739,24740,24741,24742,24743,24744,24745,24746,24747,24748,24749,24750,24751,24752,24753,24754,24755,24756,24757,24758,24759,24760,24761,24762,24763,24764,24765,24766,24767,24768,24769,24770,24771,24772,24773,24774,24775,24776,24777,24778,24779,24780,24781,24782,24783,24784,24785,24786,24787,24788,24789,24790,24791,24792,24793,24794,24795,24796,24797,24798,24799,24800,24801,24802,24803,24804,24805,24806,24807,24808,24809,24810,24811,24812,24813,24814,24815,24816,24817,24818,24819,24820,24821,24822,24823,24824,24825,24826,24827,24828,24829,24830,24831,24832,24833,24834,24835,24836,24837,24838,24839,24840,24841,24842,24843,24844,24845,24846,24847,24848,24849,24850,24851,24852,24853,24854,24855,24856,24857,24858,24859,24860,24861,24862,24863,24864,24865,24866,24867,24868,24869,24870,24871,24872,24873,24874,24875,24876,24877,24878,24879,24880,24881,24882,24883,24884,24885,24886,24887,24888,24889,24890,24891,24892,24893,24894,24895,24896,24897,24898,24899,24900,24901,24902,24903,24904,24905,24906,24907,24908,24909,24910,24911,24912,24913,24914,24915,24916,24917,24918,24919,24920,24921,24922,24923,24924,24925,24926,24927,24928,24929,24930,24931,24932,24933,24934,24935,24936,24937,24938,24939,24940,24941,24942,24943,24944,24945,24946,24947,24948,24949,24950,24951,24952,24953,24954,24955,24956,24957,24958,24959,24960,24961,24962,24963,24964,24965,24966,24967,24968,24969,24970,24971,24972,24973,24974,24975,24976,24977,24978,24979,24980,24981,24982,24983,24984,24985,24986,24987,24988,24989,24990,24991,24992,24993,24994,24995,24996,24997,24998,24999,25000,25001,25002,25003,25004,25005,25006,25007,25008,25009,25010,25011,25012,25013,25014,25015,25016,25017,25018,25019,25020,25021,25022,25023,25024,25025,25026,25027,25028,25029,25030,25031,25032,25033,25034,25035,25036,25037,25038,25039,25040,25041,25042,25043,25044,25045,25046,25047,25048,25049,25050,25051,25052,25053,25054,25055,25056,25057,25058,25059,25060,25061,25062,25063,25064,25065,25066,25067,25068,25069,25070,25071,25072,25073,25074,25075,25076,25077,25078,25079,25080,25081,25082,25083,25084,25085,25086,25087,25088,25089,25090,25091,25092,25093,25094,25095,25096,25097,25098,25099,25100,25101,25102,25103,25104,25105,25106,25107,25108,25109,25110,25111,25112,25113,25114,25115,25116,25117,25118,25119,25120,25121,25122,25123,25124,25125,25126,25127,25128,25129,25130,25131,25132,25133,25134,25135,25136,25137,25138,25139,25140,25141,25142,25143,25144,25145,25146,25147,25148,25149,25150,25151,25152,25153,25154,25155,25156,25157,25158,25159,25160,25161,25162,25163,25164,25165,25166,25167,25168,25169,25170,25171,25172,25173,25174,25175,25176,25177,25178,25179,25180,25181,25182,25183,25184,25185,25186,25187,25188,25189,25190,25191,25192,25193,25194,25195,25196,25197,25198,25199,25200,25201,25202,25203,25204,25205,25206,25207,25208,25209,25210,25211,25212,25213,25214,25215,25216,25217,25218,25219,25220,25221,25222,25223,25224,25225,25226,25227,25228,25229,25230,25231,25232,25233,25234,25235,25236,25237,25238,25239,25240,25241,25242,25243,25244,25245,25246,25247,25248,25249,25250,25251,25252,25253,25254,25255,25256,25257,25258,25259,25260,25261,25262,25263,25264,25265,25266,25267,25268,25269,25270,25271,25272,25273,25274,25275,25276,25277,25278,25279,25280,25281,25282,25283,25284,25285,25286,25287,25288,25289,25290,25291,25292,25293,25294,25295,25296,25297,25298,25299,25300,25301,25302,25303,25304,25305,25306,25307,25308,25309,25310,25311,25312,25313,25314,25315,25316,25317,25318,25319,25320,25321,25322,25323,25324,25325,25326,25327,25328,25329,25330,25331,25332,25333,25334,25335,25336,25337,25338,25339,25340,25341,25342,25343,25344,25345,25346,25347,25348,25349,25350,25351,25352,25353,25354,25355,25356,25357,25358,25359,25360,25361,25362,25363,25364,25365,25366,25367,25368,25369,25370,25371,25372,25373,25374,25375,25376,25377,25378,25379,25380,25381,25382,25383,25384,25385,25386,25387,25388,25389,25390,25391,25392,25393,25394,25395,25396,25397,25398,25399,25400,25401,25402,25403,25404,25405,25406,25407,25408,25409,25410,25411,25412,25413,25414,25415,25416,25417,25418,25419,25420,25421,25422,25423,25424,25425,25426,25427,25428,25429,25430,25431,25432,25433,25434,25435,25436,25437,25438,25439,25440,25441,25442,25443,25444,25445,25446,25447,25448,25449,25450,25451,25452,25453,25454,25455,25456,25457,25458,25459,25460,25461,25462,25463,25464,25465,25466,25467,25468,25469,25470,25471,25472,25473,25474,25475,25476,25477,25478,25479,25480,25481,25482,25483,25484,25485,25486,25487,25488,25489,25490,25491,25492,25493,25494,25495,25496,25497,25498,25499,25500,25501,25502,25503,25504,25505,25506,25507,25508,25509,25510,25511,25512,25513,25514,25515,25516,25517,25518,25519,25520,25521,25522,25523,25524,25525,25526,25527,25528,25529,25530,25531,25532,25533,25534,25535,25536,25537,25538,25539,25540,25541,25542,25543,25544,25545,25546,25547,25548,25549,25550,25551,25552,25553,25554,25555,25556,25557,25558,25559,25560,25561,25562,25563,25564,25565,25566,25567,25568,25569,25570,25571,25572,25573,25574,25575,25576,25577,25578,25579,25580,25581,25582,25583,25584,25585,25586,25587,25588,25589,25590,25591,25592,25593,25594,25595,25596,25597,25598,25599,25600,25601,25602,25603,25604,25605,25606,25607,25608,25609,25610,25611,25612,25613,25614,25615,25616,25617,25618,25619,25620,25621,25622,25623,25624,25625,25626,25627,25628,25629,25630,25631,25632,25633,25634,25635,25636,25637,25638,25639,25640,25641,25642,25643,25644,25645,25646,25647,25648,25649,25650,25651,25652,25653,25654,25655,25656,25657,25658,25659,25660,25661,25662,25663,25664,25665,25666,25667,25668,25669,25670,25671,25672,25673,25674,25675,25676,25677,25678,25679,25680,25681,25682,25683,25684,25685,25686,25687,25688,25689,25690,25691,25692,25693,25694,25695,25696,25697,25698,25699,25700,25701,25702,25703,25704,25705,25706,25707,25708,25709,25710,25711,25712,25713,25714,25715,25716,25717,25718,25719,25720,25721,25722,25723,25724,25725,25726,25727,25728,25729,25730,25731,25732,25733,25734,25735,25736,25737,25738,25739,25740,25741,25742,25743,25744,25745,25746,25747,25748,25749,25750,25751,25752,25753,25754,25755,25756,25757,25758,25759,25760,25761,25762,25763,25764,25765,25766,25767,25768,25769,25770,25771,25772,25773,25774,25775,25776,25777,25778,25779,25780,25781,25782,25783,25784,25785,25786,25787,25788,25789,25790,25791,25792,25793,25794,25795,25796,25797,25798,25799,25800,25801,25802,25803,25804,25805,25806,25807,25808,25809,25810,25811,25812,25813,25814,25815,25816,25817,25818,25819,25820,25821,25822,25823,25824,25825,25826,25827,25828,25829,25830,25831,25832,25833,25834,25835,25836,25837,25838,25839,25840,25841,25842,25843,25844,25845,25846,25847,25848,25849,25850,25851,25852,25853,25854,25855,25856,25857,25858,25859,25860,25861,25862,25863,25864,25865,25866,25867,25868,25869,25870,25871,25872,25873,25874,25875,25876,25877,25878,25879,25880,25881,25882,25883,25884,25885,25886,25887,25888,25889,25890,25891,25892,25893,25894,25895,25896,25897,25898,25899,25900,25901,25902,25903,25904,25905,25906,25907,25908,25909,25910,25911,25912,25913,25914,25915,25916,25917,25918,25919,25920,25921,25922,25923,25924,25925,25926,25927,25928,25929,25930,25931,25932,25933,25934,25935,25936,25937,25938,25939,25940,25941,25942,25943,25944,25945,25946,25947,25948,25949,25950,25951,25952,25953,25954,25955,25956,25957,25958,25959,25960,25961,25962,25963,25964,25965,25966,25967,25968,25969,25970,25971,25972,25973,25974,25975,25976,25977,25978,25979,25980,25981,25982,25983,25984,25985,25986,25987,25988,25989,25990,25991,25992,25993,25994,25995,25996,25997,25998,25999,26000,26001,26002,26003,26004,26005,26006,26007,26008,26009,26010,26011,26012,26013,26014,26015,26016,26017,26018,26019,26020,26021,26022,26023,26024,26025,26026,26027,26028,26029,26030,26031,26032,26033,26034,26035,26036,26037,26038,26039,26040,26041,26042,26043,26044,26045,26046,26047,26048,26049,26050,26051,26052,26053,26054,26055,26056,26057,26058,26059,26060,26061,26062,26063,26064,26065,26066,26067,26068,26069,26070,26071,26072,26073,26074,26075,26076,26077,26078,26079,26080,26081,26082,26083,26084,26085,26086,26087,26088,26089,26090,26091,26092,26093,26094,26095,26096,26097,26098,26099,26100,26101,26102,26103,26104,26105,26106,26107,26108,26109,26110,26111,26112,26113,26114,26115,26116,26117,26118,26119,26120,26121,26122,26123,26124,26125,26126,26127,26128,26129,26130,26131,26132,26133,26134,26135,26136,26137,26138,26139,26140,26141,26142,26143,26144,26145,26146,26147,26148,26149,26150,26151,26152,26153,26154,26155,26156,26157,26158,26159,26160,26161,26162,26163,26164,26165,26166,26167,26168,26169,26170,26171,26172,26173,26174,26175,26176,26177,26178,26179,26180,26181,26182,26183,26184,26185,26186,26187,26188,26189,26190,26191,26192,26193,26194,26195,26196,26197,26198,26199,26200,26201,26202,26203,26204,26205,26206,26207,26208,26209,26210,26211,26212,26213,26214,26215,26216,26217,26218,26219,26220,26221,26222,26223,26224,26225,26226,26227,26228,26229,26230,26231,26232,26233,26234,26235,26236,26237,26238,26239,26240,26241,26242,26243,26244,26245,26246,26247,26248,26249,26250,26251,26252,26253,26254,26255,26256,26257,26258,26259,26260,26261,26262,26263,26264,26265,26266,26267,26268,26269,26270,26271,26272,26273,26274,26275,26276,26277,26278,26279,26280,26281,26282,26283,26284,26285,26286,26287,26288,26289,26290,26291,26292,26293,26294,26295,26296,26297,26298,26299,26300,26301,26302,26303,26304,26305,26306,26307,26308,26309,26310,26311,26312,26313,26314,26315,26316,26317,26318,26319,26320,26321,26322,26323,26324,26325,26326,26327,26328,26329,26330,26331,26332,26333,26334,26335,26336,26337,26338,26339,26340,26341,26342,26343,26344,26345,26346,26347,26348,26349,26350,26351,26352,26353,26354,26355,26356,26357,26358,26359,26360,26361,26362,26363,26364,26365,26366,26367,26368,26369,26370,26371,26372,26373,26374,26375,26376,26377,26378,26379,26380,26381,26382,26383,26384,26385,26386,26387,26388,26389,26390,26391,26392,26393,26394,26395,26396,26397,26398,26399,26400,26401,26402,26403,26404,26405,26406,26407,26408,26409,26410,26411,26412,26413,26414,26415,26416,26417,26418,26419,26420,26421,26422,26423,26424,26425,26426,26427,26428,26429,26430,26431,26432,26433,26434,26435,26436,26437,26438,26439,26440,26441,26442,26443,26444,26445,26446,26447,26448,26449,26450,26451,26452,26453,26454,26455,26456,26457,26458,26459,26460,26461,26462,26463,26464,26465,26466,26467,26468,26469,26470,26471,26472,26473,26474,26475,26476,26477,26478,26479,26480,26481,26482,26483,26484,26485,26486,26487,26488,26489,26490,26491,26492,26493,26494,26495,26496,26497,26498,26499,26500,26501,26502,26503,26504,26505,26506,26507,26508,26509,26510,26511,26512,26513,26514,26515,26516,26517,26518,26519,26520,26521,26522,26523,26524,26525,26526,26527,26528,26529,26530,26531,26532,26533,26534,26535,26536,26537,26538,26539,26540,26541,26542,26543,26544,26545,26546,26547,26548,26549,26550,26551,26552,26553,26554,26555,26556,26557,26558,26559,26560,26561,26562,26563,26564,26565,26566,26567,26568,26569,26570,26571,26572,26573,26574,26575,26576,26577,26578,26579,26580,26581,26582,26583,26584,26585,26586,26587,26588,26589,26590,26591,26592,26593,26594,26595,26596,26597,26598,26599,26600,26601,26602,26603,26604,26605,26606,26607,26608,26609,26610,26611,26612,26613,26614,26615,26616,26617,26618,26619,26620,26621,26622,26623,26624,26625,26626,26627,26628,26629,26630,26631,26632,26633,26634,26635,26636,26637,26638,26639,26640,26641,26642,26643,26644,26645,26646,26647,26648,26649,26650,26651,26652,26653,26654,26655,26656,26657,26658,26659,26660,26661,26662,26663,26664,26665,26666,26667,26668,26669,26670,26671,26672,26673,26674,26675,26676,26677,26678,26679,26680,26681,26682,26683,26684,26685,26686,26687,26688,26689,26690,26691,26692,26693,26694,26695,26696,26697,26698,26699,26700,26701,26702,26703,26704,26705,26706,26707,26708,26709,26710,26711,26712,26713,26714,26715,26716,26717,26718,26719,26720,26721,26722,26723,26724,26725,26726,26727,26728,26729,26730,26731,26732,26733,26734,26735,26736,26737,26738,26739,26740,26741,26742,26743,26744,26745,26746,26747,26748,26749,26750,26751,26752,26753,26754,26755,26756,26757,26758,26759,26760,26761,26762,26763,26764,26765,26766,26767,26768,26769,26770,26771,26772,26773,26774,26775,26776,26777,26778,26779,26780,26781,26782,26783,26784,26785,26786,26787,26788,26789,26790,26791,26792,26793,26794,26795,26796,26797,26798,26799,26800,26801,26802,26803,26804,26805,26806,26807,26808,26809,26810,26811,26812,26813,26814,26815,26816,26817,26818,26819,26820,26821,26822,26823,26824,26825,26826,26827,26828,26829,26830,26831,26832,26833,26834,26835,26836,26837,26838,26839,26840,26841,26842,26843,26844,26845,26846,26847,26848,26849,26850,26851,26852,26853,26854,26855,26856,26857,26858,26859,26860,26861,26862,26863,26864,26865,26866,26867,26868,26869,26870,26871,26872,26873,26874,26875,26876,26877,26878,26879,26880,26881,26882,26883,26884,26885,26886,26887,26888,26889,26890,26891,26892,26893,26894,26895,26896,26897,26898,26899,26900,26901,26902,26903,26904,26905,26906,26907,26908,26909,26910,26911,26912,26913,26914,26915,26916,26917,26918,26919,26920,26921,26922,26923,26924,26925,26926,26927,26928,26929,26930,26931,26932,26933,26934,26935,26936,26937,26938,26939,26940,26941,26942,26943,26944,26945,26946,26947,26948,26949,26950,26951,26952,26953,26954,26955,26956,26957,26958,26959,26960,26961,26962,26963,26964,26965,26966,26967,26968,26969,26970,26971,26972,26973,26974,26975,26976,26977,26978,26979,26980,26981,26982,26983,26984,26985,26986,26987,26988,26989,26990,26991,26992,26993,26994,26995,26996,26997,26998,26999,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015,27016,27017,27018,27019,27020,27021,27022,27023,27024,27025,27026,27027,27028,27029,27030,27031,27032,27033,27034,27035,27036,27037,27038,27039,27040,27041,27042,27043,27044,27045,27046,27047,27048,27049,27050,27051,27052,27053,27054,27055,27056,27057,27058,27059,27060,27061,27062,27063,27064,27065,27066,27067,27068,27069,27070,27071,27072,27073,27074,27075,27076,27077,27078,27079,27080,27081,27082,27083,27084,27085,27086,27087,27088,27089,27090,27091,27092,27093,27094,27095,27096,27097,27098,27099,27100,27101,27102,27103,27104,27105,27106,27107,27108,27109,27110,27111,27112,27113,27114,27115,27116,27117,27118,27119,27120,27121,27122,27123,27124,27125,27126,27127,27128,27129,27130,27131,27132,27133,27134,27135,27136,27137,27138,27139,27140,27141,27142,27143,27144) have mixed types.Specify dtype option on import or set low_memory=False.\n if (await self.run_code(code, result, async_=asy)):\n"
],
[
"crossval = RepeatedKFold(n_repeats=3, n_splits=10)\n#r_train_data = robjects.conversion.py2rpy(train_data.transpose())",
"_____no_output_____"
],
[
"#TODO: Modify to get lables later\ndef get_labels(data_for_labels):\n labels_age = data_for_labels.index.get_level_values('age').values\n #print(len(data_for_labels))\n #display(train_labels_age)\n\n labels = []\n #ages = labels_age[train]\n for age in labels_age:\n if age>=1 and age<20:\n labels.append('age_1_20')\n elif age>=20 and age<40:\n labels.append('age_20_40')\n elif age>=40 and age<60:\n labels.append('age_40_60')\n elif age>=60 and age<80:\n labels.append('age_60_80')\n elif age>=80 and age<100:\n labels.append('age_80_100')\n\n #display(train_labels)\n labels = np.array(labels)\n print(len(labels))\n return labels\n\n #r_train_labels = robjects.conversion.py2rpy(train_labels)",
"_____no_output_____"
],
[
"def get_age(results):\n pred = []\n for result in results:\n pred.append(result[5])\n \n return pred",
"_____no_output_____"
],
[
"#From stackoverflow\ndef plot_confusion_matrix(true, pred):\n #Convert to a method\n cm = confusion_matrix(true, pred)\n labels = ['age_1_20', 'age_20_40', 'age_40_60', 'age_60_80', 'age_80_100']\n\n fig = plt.figure(figsize=(8, 7))\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax, fmt = 'g'); \n # labels, title and ticks\n ax.set_xlabel('Predicted', fontsize=20)\n ax.xaxis.set_label_position('bottom')\n plt.xticks(rotation=90)\n ax.xaxis.set_ticklabels(labels, fontsize = 10)\n ax.xaxis.tick_bottom()\n\n ax.set_ylabel('True', fontsize=20)\n ax.yaxis.set_ticklabels(labels, fontsize = 10)\n plt.yticks(rotation=0)\n\n plt.title('Refined Confusion Matrix', fontsize=20)\n\n plt.savefig('ConMat24.png')\n plt.show()",
"_____no_output_____"
]
],
[
[
"# Part I: kTSP classifier on entire data\nThis segment of code implements an entire kTSP classifier. It trains and predicts on the entire dataset by 10-fold CV repeated 3 times. It generates a graph that demonstrate the confusion matrix of the predicted and true classes. \n\nNotice that kTSP is a classification algorithm, so quantative predictions of age is not generated. The sample ages are divided into 5 classes: 0-20, 20-40, 40-60, 60-80, and 80-100 year old. The predictions fall into the 5 classes listed. ",
"_____no_output_____"
]
],
[
[
"#Initialize empty data structure to store predicted and true age classes\ntrue_age = []\npred_age = []\n\n#10-fold CV repeated 3 times\nfor train, test in crossval.split(data):\n \n #Get the labels for training\n train_labels = get_labels(data.iloc[train,:])\n #Save true age classes\n true_age.append(get_labels(data.iloc[test,:]))\n #Get test data\n test_data = data.iloc[test,:]\n \n #Create object for training kTSP classifier\n object = r['ReadData']((data.iloc[train,:]).transpose(), train_labels)\n \n #Filter genes and train kTSP classifier to get representative gene pairs\n filtered_genes = r['filter_genes_TSP'](data_object = object, filter = \"one_vs_rest\", \n platform_wise = False, featureNo = 1000, UpDown = True)\n \n classifier = r['train_one_vs_rest_TSP'](data_object = object, filtered_genes = filtered_genes,\n include_pivot = False,one_vs_one_scores = False, \n platform_wise_scores = False,seed = 1234, verbose = False)\n \n #Use one-vs-rest scheme to do multiclassification using kTSP\n raw_results_test = r['predict_one_vs_rest_TSP'](classifier = classifier, Data = test_data.transpose(), \n tolerate_missed_genes = False, weighted_votes = True)\n \n #Unravel R data structue to get the prediction of age class\n results_test = get_age(raw_results_test)\n pred_age.append(results_test)\n",
"119\n14\n"
],
[
"#Plot the confusion matrix\npred = flatten(pred_age)\ntrue = flatten(true_age)\nplot_confusion_matrix(true, pred)",
"_____no_output_____"
]
],
[
[
"# Part II: Feature selection using kTSP algorithm\nThis segment of code completes feature selection for the ensemble using the filter method. 33 samples are chosen randomly from the entire sample. kTSP algorithm is performed on the 33 sample to select representative gene expression pairs. \n\nA new csv file is generated based on the features selected. Each pair is a column of the csv file in the form \"geneA>geneB\". For each person in the reserved 100 samples, if the expression of geneA is greater than geneB, 1 is put in the \"geneA>geneB\" column. Otherwise, 0 is put in the \"geneA>geneB\" column. Following this rule, a binary expression matrix is generated. \n\nThe csv file containing the binary expression matrix will be put into the ensemble. (However, the binary expression matrix does not work well with LDA ensemble. Possible reason is that the binary expression matrix does not keep the Gaussian distribution of the data. Another possible reason is that too few samples used in feature selection, so gene pairs that are not representative of age are chosen.)",
"_____no_output_____"
]
],
[
[
"#Prepare data for filter method\ndata = shuffle(data)\n\n#Get train data and feature selection set\ntrain = range(33,133)\nFS = range(0,33)\nFS_labels = get_labels(data.iloc[FS,:])\n#FS_age = (get_labels(data.iloc[FS,:]))\nFS_data = data.iloc[FS,:]\n\n#Get reserved data\nU_data = data.iloc[train, :]\ndisplay(FS_data)",
"33\n"
],
[
"np.unique(U_data.dtypes, return_counts=True)",
"_____no_output_____"
],
[
"U_data.index.get_level_values('meta')",
"_____no_output_____"
],
[
"#Train the kTSP classifier\nobject = r['ReadData'](FS_data.transpose(), FS_labels)\nfiltered_genes = r['filter_genes_TSP'](data_object = object, filter = \"one_vs_rest\", platform_wise = False, featureNo = 1000, UpDown = True)\n \nclassifier = r['train_one_vs_rest_TSP'](data_object = object, filtered_genes = filtered_genes,\n include_pivot = False,one_vs_one_scores = False, \n platform_wise_scores = False,seed = 1234, verbose = False)\n",
"R[write to console]: Creating Data object...\n\nR[write to console]: Number of samples: 33\n\nR[write to console]: Number of genes/features: 27142\n\nR[write to console]: Classes: age_20_40 age_60_80 age_80_100 age_40_60 age_1_20\n\nR[write to console]: Platforms/studies: NULL\n\nR[write to console]: Gene names in the data have '-' symbol! This may generate errors during the training process of random forest! It is recommended to change these '-' to '_' or '.'\n\nR[write to console]: Creating new filtered genes object for one-vs-rest Scheme\n\nR[write to console]: filtering...\n\nR[write to console]: Class: age_20_40\n\nR[write to console]: Class: age_60_80\n\nR[write to console]: Class: age_80_100\n\nR[write to console]: Class: age_40_60\n\nR[write to console]: Class: age_1_20\n\nR[write to console]: DONE!\n\n"
],
[
"#From Stackoverflow: \n#This method unravels the kTSP classifier that is stored as convoluted R vector.\ndef r_list_to_py_dict(r_list):\n converted = {}\n for name in r_list.names:\n val = r_list.rx(name)[0]\n if isinstance(val, robjects.vectors.DataFrame):\n converted[name] = pandas2ri.ri2py_dataframe(val)\n elif isinstance(val, robjects.vectors.ListVector):\n converted[name] = r_list_to_py_dict(val)\n elif isinstance(val, robjects.vectors.FloatVector) or isinstance(val, robjects.vectors.StrVector):\n if len(val) == 1:\n converted[name] = val[0]\n else:\n converted[name] = list(val)\n else: # single value\n converted[name] = val\n return converted",
"_____no_output_____"
],
[
"#R to python data structure\npy_classifier = r_list_to_py_dict(classifier)\ndisplay(py_classifier) #TODO: Check on how the genes are paired",
"_____no_output_____"
],
[
"#Unravel python data structure to list\nlist_pairs = list(py_classifier.values())\ndisplay(list_pairs)",
"_____no_output_____"
],
[
"#Unravel python list\ntype(list_pairs)\ndisplay(list_pairs[0])",
"_____no_output_____"
],
[
"#Unravel and cast python list to panda dataFrame\ndf = pd.DataFrame(list_pairs[0])\ndisplay(df.columns)",
"_____no_output_____"
],
[
"#Unravel panda dataFrame\ndisplay(df['age_20_40'].TSPs)",
"_____no_output_____"
],
[
"#This method unravels the kTSP classifier and gets all the representative gene pairs\n#Notice that still need to read the source code of switchBox and multiclassPairs to\n#ensure that the pairs are parsed in the correct way\n\n#Each gene pair is stored in a tuple\n#All the tuples are stored in a list\ndef get_pairs (classifier):\n \n #Unravel the R classifier into a python data structure\n py_classifier = r_list_to_py_dict(classifier)\n list_pairs = list(py_classifier.values())\n df = pd.DataFrame(list_pairs[0])\n\n pairs = []\n for age_class in df.columns:\n kTSP = df[age_class].TSPs\n \n i = 0\n while (i < len(kTSP)):\n pair = (kTSP[i], kTSP[i+1])\n pairs.append(pair)\n \n i += 2\n \n #Remove possible duplicates and cast back to list\n pairs = list(set(pairs)) \n return pairs",
"_____no_output_____"
],
[
"#Get the gene pairs\npairs = get_pairs(classifier)\ndisplay(pairs)\ndisplay(len(pairs))",
"_____no_output_____"
],
[
"#This method gets the names of the gene pairs\ndef get_pairs_name(pairs):\n names = []\n \n for pair in pairs:\n name = \">\".join([str(pair[0]), str(pair[1])])\n names.append(name)\n \n return names",
"_____no_output_____"
],
[
"#Get names of all the pairs\npair_names = get_pairs_name(pairs)\ndisplay(pair_names)",
"_____no_output_____"
],
[
"#Generate a dummy data frame that contains 0 for all entries\n#The columns are the names of each gene pair\ndummy_binary = [0] * len(U_data)\ndf_bi = {}\n\nfor pair in pair_names:\n df_bi[pair] = dummy_binary\n\nbinary_xdata = pd.DataFrame(df_bi) \ndisplay(binary_xdata)",
"_____no_output_____"
],
[
"#For each person compare the expression level of the genes in each gene pair\n#If geneA>geneB of a person as specified in the column, 1 is put into the according entry\n\n#Iterate through each person\nfor i in range(0, len(U_data)):\n \n #Iterate through each pair for each person\n for j in range(0, len(pairs)):\n #Get gene1 and gene2\n gene1 = pairs[j][0]\n gene2 = pairs[j][1]\n \n #Compare expression level of gene1 and gene2\n if U_data.iloc[i][gene1] > U_data.iloc[i][gene2]:\n binary_xdata.at[i, pair_names[j]] = 1\ndisplay(binary_xdata)",
"_____no_output_____"
],
[
"#Fiddle with the indices to align with the requirements for csv file in ensemble\ncols = list(binary_xdata.columns)\nage = U_data.index.get_level_values('age').values\nuid = U_data.index.get_level_values('uid').values\nmeta = U_data.index.get_level_values('meta').values\nbinary_xdata['age'] = age\nbinary_xdata['uid'] = uid\nbinary_xdata['meta'] = meta\nbinary_xdata = binary_xdata.reindex(columns=(['uid','age','meta'] + cols))\ndisplay(binary_xdata)",
"_____no_output_____"
],
[
"binary_xdata.reset_index()\nbinary_xdata.set_index(['uid'])",
"_____no_output_____"
],
[
"#Export the csv file containing the binary matrix\nbinary_xdata.to_csv('binary_gene_labels.csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Part 3: kTSP classifier with filter method feature selection\n\nThis segment of code will use kTSP classifier to predict on the reserved 100 samples from the last part to serve as the baseline comparision. ",
"_____no_output_____"
]
],
[
[
"display(classifier)",
"_____no_output_____"
],
[
"#Try removing indices, which are causing type errors\nraw_ktsp_results = r['predict_one_vs_rest_TSP'](classifier = classifier, Data = U_data.transpose(), \n tolerate_missed_genes = False, weighted_votes = True)",
"R[write to console]: Get scores/votes from class: age_20_40\n\nR[write to console]: Get scores/votes from class: age_60_80\n\nR[write to console]: Get scores/votes from class: age_80_100\n\nR[write to console]: Get scores/votes from class: age_40_60\n\nR[write to console]: Get scores/votes from class: age_1_20\n\nR[write to console]: Checking the ties\n\nR[write to console]: No ties found\n\n"
],
[
"%debug",
"> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/conversion.py\u001b[0m(121)\u001b[0;36m_str_to_cchar\u001b[0;34m()\u001b[0m\n\u001b[0;32m 119 \u001b[0;31m\u001b[0;32mdef\u001b[0m \u001b[0m_str_to_cchar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'utf-8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120 \u001b[0;31m \u001b[0;31m# TODO: use isString and installTrChar\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 121 \u001b[0;31m \u001b[0mb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mencoding\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 122 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mffi\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnew\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'char[]'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 123 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/conversion.py\u001b[0m(142)\u001b[0;36m_str_to_charsxp\u001b[0;34m()\u001b[0m\n\u001b[0;32m 140 \u001b[0;31m \u001b[0ms\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mR_NaString\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 141 \u001b[0;31m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 142 \u001b[0;31m \u001b[0mcchar\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_str_to_cchar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'utf-8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 143 \u001b[0;31m \u001b[0ms\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRf_mkCharCE\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcchar\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopenrlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCE_UTF8\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 144 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/sexp.py\u001b[0m(677)\u001b[0;36m_as_charsxp_cdata\u001b[0;34m()\u001b[0m\n\u001b[0;32m 675 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__sexp__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_cdata\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 676 \u001b[0;31m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 677 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconversion\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_str_to_charsxp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 678 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 679 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/sexp.py\u001b[0m(474)\u001b[0;36m_populate_r_vector\u001b[0;34m()\u001b[0m\n\u001b[0;32m 472 \u001b[0;31m\u001b[0;32mdef\u001b[0m \u001b[0m_populate_r_vector\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mr_vector\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_elt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcast_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 473 \u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 474 \u001b[0;31m \u001b[0mset_elt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mr_vector\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcast_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 475 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 476 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/sexp.py\u001b[0m(552)\u001b[0;36mfrom_iterable\u001b[0;34m()\u001b[0m\n\u001b[0;32m 550 \u001b[0;31m cls._R_TYPE, n)\n\u001b[0m\u001b[0;32m 551 \u001b[0;31m )\n\u001b[0m\u001b[0;32m--> 552 \u001b[0;31m \u001b[0mpopulate_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mr_vector\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_elt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcast_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 553 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mr_vector\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 554 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/conversion.py\u001b[0m(45)\u001b[0;36m_\u001b[0;34m()\u001b[0m\n\u001b[0;32m 43 \u001b[0;31m\u001b[0;32mdef\u001b[0m \u001b[0m_cdata_res_to_rinterface\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunction\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 44 \u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m---> 45 \u001b[0;31m \u001b[0mcdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 46 \u001b[0;31m \u001b[0;31m# TODO: test cdata is of the expected CType\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 47 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_cdata_to_rinterface\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/sexp.py\u001b[0m(614)\u001b[0;36mfrom_object\u001b[0;34m()\u001b[0m\n\u001b[0;32m 612 \u001b[0;31m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 613 \u001b[0;31m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 614 \u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_iterable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 615 \u001b[0;31m \u001b[0;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 616 \u001b[0;31m msg = ('The class methods from_memoryview() and '\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/rinterface_lib/sexp.py\u001b[0m(523)\u001b[0;36m__init__\u001b[0;34m()\u001b[0m\n\u001b[0;32m 521 \u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 522 \u001b[0;31m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcollections\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mabc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 523 \u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_object\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__sexp__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 524 \u001b[0;31m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 525 \u001b[0;31m raise TypeError('The constructor must be called '\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/robjects/vectors.py\u001b[0m(385)\u001b[0;36m__init__\u001b[0;34m()\u001b[0m\n\u001b[0;32m 383 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 384 \u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 385 \u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 386 \u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_add_rops\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 387 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/site-packages/rpy2/robjects/pandas2ri.py\u001b[0m(63)\u001b[0;36mpy2rpy_pandasdataframe\u001b[0;34m()\u001b[0m\n\u001b[0;32m 61 \u001b[0;31m \u001b[0;34m'The error is: %s'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 62 \u001b[0;31m % (name, str(e)))\n\u001b[0m\u001b[0;32m---> 63 \u001b[0;31m \u001b[0mod\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mStrVector\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 64 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 65 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mod\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> up\n> \u001b[0;32m/opt/anaconda3/lib/python3.8/functools.py\u001b[0m(875)\u001b[0;36mwrapper\u001b[0;34m()\u001b[0m\n\u001b[0;32m 873 \u001b[0;31m '1 positional argument')\n\u001b[0m\u001b[0;32m 874 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m--> 875 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__class__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 876 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 877 \u001b[0;31m \u001b[0mfuncname\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'__name__'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'singledispatch function'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\n"
],
[
"#Visualize kTSP baseline performance\nktsp_results = get_age(raw_ktsp_results)\ntrue_class = get_labels(U_data)\nplot_confusion_matrix(true_class, ktsp_results)",
"100\n"
]
],
[
[
"# Note:\nPart 2 and part 3 are subject to high variability because of the random sampling process in filter method. The features chosen in part 2 are not always reproducible. Part 3 is subject to unstable runtime errors that do not occur every time because of the unstable selection of features in part 2. Some features will cause runtime error, but others will not. ",
"_____no_output_____"
],
[
"# Future plan: \nTo counter the variability in part 2 and 3, following plan will be implemented in the futre to join the two part together:\nfor i in range(k):\n shuffle data\n train-test split\n calc features on train\n calc performance of kTSP on test set\n 3x10 cross validate ensemble classifier on test\n for the future save everything:\n the feature set, the kTSP performance, the ensemble votes\n and the ensemble performance (R2, MAE, MED)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a2061b69b45971da67b8860141000fbf852b093
| 8,147 |
ipynb
|
Jupyter Notebook
|
position-opening2vec/location_vectors.ipynb
|
johan-andries/data-experiments
|
126f6e38fafd40d4507cf9b694469eaa25d78dfe
|
[
"Apache-2.0"
] | 1 |
2021-06-24T02:58:21.000Z
|
2021-06-24T02:58:21.000Z
|
position-opening2vec/location_vectors.ipynb
|
johan-andries/data-experiments
|
126f6e38fafd40d4507cf9b694469eaa25d78dfe
|
[
"Apache-2.0"
] | null | null | null |
position-opening2vec/location_vectors.ipynb
|
johan-andries/data-experiments
|
126f6e38fafd40d4507cf9b694469eaa25d78dfe
|
[
"Apache-2.0"
] | null | null | null | 30.62782 | 126 | 0.595188 |
[
[
[
"import gzip\nimport itertools\nimport collections\nimport numpy as np\nimport item2vec",
"_____no_output_____"
],
[
"np.random.seed(101)",
"_____no_output_____"
],
[
"list_of_sessions_with_position_opening_ids = []\nwith gzip.open('data/view_position_opening_sessions.txt.gz', 'rb') as f:\n for line in f:\n list_of_sessions_with_position_opening_ids.append(list(set(line.strip().split())))",
"_____no_output_____"
],
[
"postion_opening_id_2_location_name = {}\nwith gzip.open('data/postion_opening_categorical_variables.csv.gz', 'rb') as f:\n for line in f:\n vac_id, _, location_name = line.strip().split(\",\")\n postion_opening_id_2_location_name[vac_id] = location_name",
"_____no_output_____"
],
[
"list_of_sessions_with_location_names_unfiltered = \\\n [set(map(lambda pos_opening_id: postion_opening_id_2_location_name[pos_opening_id], session) )\n for session in list_of_sessions_with_position_opening_ids]",
"_____no_output_____"
],
[
"list_of_sessions_with_location_names_flattened = \\\n itertools.chain.from_iterable(list_of_sessions_with_location_names_unfiltered)",
"_____no_output_____"
],
[
"frequently_viewed_locations = [location_name for location_name, count in \\\n collections.Counter(list_of_sessions_with_location_names_flattened).items() if count > 200]",
"_____no_output_____"
],
[
"location_name_to_index = dict(zip(frequently_viewed_locations, range(len(frequently_viewed_locations))))",
"_____no_output_____"
],
[
"list_of_sessions_with_location_indices = []\nfor session in list_of_sessions_with_location_names_unfiltered:\n only_frequently_viewed_in_session = session.intersection(frequently_viewed_locations)\n if len(only_frequently_viewed_in_session) > 1:\n list_of_sessions_with_location_indices.append(\n map(lambda location_name: location_name_to_index[location_name], list(only_frequently_viewed_in_session))\n )",
"_____no_output_____"
],
[
"batch_size = 128\nnext_batch = item2vec.create_batch_generator(list_of_sessions_with_location_indices, batch_size)\nx = item2vec.run(750001, next_batch, 0.5, batch_size, 100, len(frequently_viewed_locations), 15)",
"2016-09-09 16:03:41.279313 Average loss at step 0: 191.926345825\n\n2016-09-09 16:06:02.716560 Average loss at step 20000: 4.12253415415\n\n2016-09-09 16:08:22.346069 Average loss at step 40000: 3.55853750117\n\n2016-09-09 16:10:47.872898 Average loss at step 60000: 3.54658460959\n\n2016-09-09 16:13:13.622302 Average loss at step 80000: 3.53928644782\n\n2016-09-09 16:15:38.641889 Average loss at step 100000: 3.53617465142\n\n2016-09-09 16:18:04.482675 Average loss at step 120000: 3.53224128134\n\n2016-09-09 16:20:30.840844 Average loss at step 140000: 3.53035159198\n\n2016-09-09 16:22:55.308304 Average loss at step 160000: 3.528927601\n\n2016-09-09 16:25:14.637997 Average loss at step 180000: 3.52931641765\n\n2016-09-09 16:27:37.995939 Average loss at step 200000: 3.52876839567\n\n2016-09-09 16:29:59.223364 Average loss at step 220000: 3.52790833496\n\n2016-09-09 16:32:19.822820 Average loss at step 240000: 3.52827636063\n\n2016-09-09 16:34:39.572953 Average loss at step 260000: 3.52589493797\n\n2016-09-09 16:36:58.068257 Average loss at step 280000: 3.52449259816\n\n2016-09-09 16:39:16.189434 Average loss at step 300000: 3.52427948246\n\n2016-09-09 16:41:39.095324 Average loss at step 320000: 3.52277514919\n\n2016-09-09 16:43:57.337412 Average loss at step 340000: 3.52372467991\n\n2016-09-09 16:46:21.262179 Average loss at step 360000: 3.5211983761\n\n2016-09-09 16:48:44.276361 Average loss at step 380000: 3.52407722836\n\n2016-09-09 16:51:03.703099 Average loss at step 400000: 3.52409390768\n\n2016-09-09 16:53:22.281034 Average loss at step 420000: 3.52118962266\n\n2016-09-09 16:55:46.174832 Average loss at step 440000: 3.52297553306\n\n2016-09-09 16:58:20.695974 Average loss at step 460000: 3.52335663141\n\n2016-09-09 17:00:40.182529 Average loss at step 480000: 3.52207386893\n\n2016-09-09 17:03:01.261622 Average loss at step 500000: 3.5231700241\n\n2016-09-09 17:05:20.034693 Average loss at step 520000: 3.52015685856\n\n2016-09-09 17:07:38.857609 Average loss at step 540000: 3.5238054087\n\n2016-09-09 17:10:00.347238 Average loss at step 560000: 3.52356249758\n\n2016-09-09 17:12:17.607117 Average loss at step 580000: 3.52332293549\n\n2016-09-09 17:14:40.959069 Average loss at step 600000: 3.51994599702\n\n2016-09-09 17:17:04.448210 Average loss at step 620000: 3.51866870985\n\n2016-09-09 17:19:26.482411 Average loss at step 640000: 3.5190994827\n\n2016-09-09 17:21:44.067725 Average loss at step 660000: 3.52046821408\n\n2016-09-09 17:24:01.356786 Average loss at step 680000: 3.52033028643\n\n2016-09-09 17:26:23.026995 Average loss at step 700000: 3.52289807901\n\n2016-09-09 17:28:46.269321 Average loss at step 720000: 3.51911323829\n\n2016-09-09 17:31:09.684438 Average loss at step 740000: 3.51843560983\n\n"
],
[
"with open(\"work/location_vectors.csv\",\"w\") as f:\n for i in range(x.shape[0]):\n f.write(str(frequently_viewed_locations[i]) + \",\")\n f.write(\",\".join(map(str,x[i])))\n f.write(\"\\n\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a206a92747714744d487409793d248b6d52bb82
| 6,796 |
ipynb
|
Jupyter Notebook
|
nbs/05_layers.loss.ipynb
|
qAp/kgl_humanprotein
|
05dc0a493c7545b59c4a20547f885b13d9ea1a6a
|
[
"Apache-2.0"
] | null | null | null |
nbs/05_layers.loss.ipynb
|
qAp/kgl_humanprotein
|
05dc0a493c7545b59c4a20547f885b13d9ea1a6a
|
[
"Apache-2.0"
] | null | null | null |
nbs/05_layers.loss.ipynb
|
qAp/kgl_humanprotein
|
05dc0a493c7545b59c4a20547f885b13d9ea1a6a
|
[
"Apache-2.0"
] | null | null | null | 33.477833 | 96 | 0.508976 |
[
[
[
"# `layers.loss`",
"_____no_output_____"
]
],
[
[
"%reload_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"# %load ../../HPA-competition-solutions/bestfitting/src/layers/loss.py",
"_____no_output_____"
],
[
"#default_exp layers.loss",
"_____no_output_____"
],
[
"#export\n\nimport math\n\nfrom torch import nn\nimport torch.nn.functional as F\nfrom kgl_humanprotein.config.config import *\nfrom kgl_humanprotein.layers.hard_example import *\nfrom kgl_humanprotein.layers.lovasz_losses import *",
"run on collie.local\n"
],
[
"#export\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2):\n super().__init__()\n self.gamma = gamma\n\n def forward(self, logit, target, epoch=0):\n target = target.float()\n max_val = (-logit).clamp(min=0)\n loss = logit - logit * target + max_val + \\\n ((-max_val).exp() + (-logit - max_val).exp()).log()\n\n invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))\n loss = (invprobs * self.gamma).exp() * loss\n if len(loss.size())==2:\n loss = loss.sum(dim=1)\n return loss.mean()\n\nclass HardLogLoss(nn.Module):\n def __init__(self):\n super(HardLogLoss, self).__init__()\n self.bce_loss = nn.BCEWithLogitsLoss()\n self.__classes_num = NUM_CLASSES\n\n def forward(self, logits, labels,epoch=0):\n labels = labels.float()\n loss=0\n for i in range(NUM_CLASSES):\n logit_ac=logits[:,i]\n label_ac=labels[:,i]\n logit_ac, label_ac=get_hard_samples(logit_ac,label_ac)\n loss+=self.bce_loss(logit_ac,label_ac)\n loss = loss/NUM_CLASSES\n return loss\n\n# https://github.com/bermanmaxim/LovaszSoftmax/tree/master/pytorch\ndef lovasz_hinge(logits, labels, ignore=None, per_class=True):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [B, C] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, C] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n \"\"\"\n if per_class:\n loss = 0\n for i in range(NUM_CLASSES):\n logit_ac = logits[:, i]\n label_ac = labels[:, i]\n loss += lovasz_hinge_flat(logit_ac, label_ac)\n loss = loss / NUM_CLASSES\n else:\n logits = logits.view(-1)\n labels = labels.view(-1)\n loss = lovasz_hinge_flat(logits, labels)\n return loss\n\n# https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/69053\nclass SymmetricLovaszLoss(nn.Module):\n def __init__(self):\n super(SymmetricLovaszLoss, self).__init__()\n self.__classes_num = NUM_CLASSES\n\n def forward(self, logits, labels,epoch=0):\n labels = labels.float()\n loss=((lovasz_hinge(logits, labels)) + (lovasz_hinge(-logits, 1 - labels))) / 2\n return loss\n\nclass FocalSymmetricLovaszHardLogLoss(nn.Module):\n def __init__(self):\n super(FocalSymmetricLovaszHardLogLoss, self).__init__()\n self.focal_loss = FocalLoss()\n self.slov_loss = SymmetricLovaszLoss()\n self.log_loss = HardLogLoss()\n def forward(self, logit, labels,epoch=0):\n labels = labels.float()\n focal_loss = self.focal_loss.forward(logit, labels, epoch)\n slov_loss = self.slov_loss.forward(logit, labels, epoch)\n log_loss = self.log_loss.forward(logit, labels, epoch)\n loss = focal_loss*0.5 + slov_loss*0.5 +log_loss * 0.5\n return loss\n\n# https://github.com/ronghuaiyang/arcface-pytorch\nclass ArcFaceLoss(nn.modules.Module):\n def __init__(self,s=30.0,m=0.5):\n super(ArcFaceLoss, self).__init__()\n self.classify_loss = nn.CrossEntropyLoss()\n self.s = s\n self.easy_margin = False\n self.cos_m = math.cos(m)\n self.sin_m = math.sin(m)\n self.th = math.cos(math.pi - m)\n self.mm = math.sin(math.pi - m) * m\n\n def forward(self, logits, labels, epoch=0):\n cosine = logits\n sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n phi = cosine * self.cos_m - sine * self.sin_m\n if self.easy_margin:\n phi = torch.where(cosine > 0, phi, cosine)\n else:\n phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n\n one_hot = torch.zeros(cosine.size(), device='cuda')\n one_hot.scatter_(1, labels.view(-1, 1).long(), 1)\n # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------\n output = (one_hot * phi) + ((1.0 - one_hot) * cosine)\n output *= self.s\n loss1 = self.classify_loss(output, labels)\n loss2 = self.classify_loss(cosine, labels)\n gamma=1\n loss=(loss1+gamma*loss2)/(1+gamma)\n return loss",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a207543f1a7e943a5a9819b56ffb525b98e856d
| 139,592 |
ipynb
|
Jupyter Notebook
|
10_Recommendation System/BookRecomSolution.ipynb
|
kunalk3/eR_task
|
4d717680576bc62793c42840e772a28fb3f6c223
|
[
"MIT"
] | 1 |
2021-02-25T13:58:57.000Z
|
2021-02-25T13:58:57.000Z
|
10_Recommendation System/BookRecomSolution.ipynb
|
kunalk3/eR_task
|
4d717680576bc62793c42840e772a28fb3f6c223
|
[
"MIT"
] | null | null | null |
10_Recommendation System/BookRecomSolution.ipynb
|
kunalk3/eR_task
|
4d717680576bc62793c42840e772a28fb3f6c223
|
[
"MIT"
] | null | null | null | 40.828312 | 13,592 | 0.339625 |
[
[
[
"## Load Library And Data",
"_____no_output_____"
]
],
[
[
"# importing the library\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"# to know the ecoding type\nimport chardet\nwith open('E:\\\\Recommendation System\\\\book.csv', 'rb') as rawdata:\n result = chardet.detect(rawdata.read(100000))\nresult",
"_____no_output_____"
]
],
[
[
"- The encoding standard used in the input file is ISO-8859-1\n- Hence, to minimize the error while loading the input data, we are passing this encoding standard",
"_____no_output_____"
]
],
[
[
"# load the dataset 1\nbooks_data = pd.read_csv('E:\\\\1_ExcelR_data\\\\0_assignmentsData\\\\10_Recommendation System\\\\book.csv', encoding='ISO-8859-1')\nbooks_data",
"_____no_output_____"
]
],
[
[
"## Data Cleaning And EDA",
"_____no_output_____"
]
],
[
[
"# drop unnecessary column\nbooks_data.drop(['Unnamed: 0'], axis = 1, inplace=True)\nbooks_data.head()",
"_____no_output_____"
],
[
"books_data.sort_values(by=['User.ID'])",
"_____no_output_____"
],
[
"# data dimenssion\nbooks_data.shape",
"_____no_output_____"
],
[
"# data description\nbooks_data.describe().T",
"_____no_output_____"
],
[
"# dataframes types\nbooks_data.dtypes",
"_____no_output_____"
],
[
"# informartion of the data\nbooks_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10000 entries, 0 to 9999\nData columns (total 3 columns):\nUser.ID 10000 non-null int64\nBook.Title 10000 non-null object\nBook.Rating 10000 non-null int64\ndtypes: int64(2), object(1)\nmemory usage: 234.5+ KB\n"
]
],
[
[
"- No null values\n- two features are numeric\n- one feature is categorical",
"_____no_output_____"
]
],
[
[
"books_data.describe()['Book.Rating']",
"_____no_output_____"
]
],
[
[
"- max rating = 10\n- min rating = 1\n- average rating = 7.5",
"_____no_output_____"
]
],
[
[
"# find the minimum and maximum ratings\nprint('Minimum rating is:', (books_data['Book.Rating'].min()))\nprint('Maximum rating is:', (books_data['Book.Rating'].max()))",
"Minimum rating is: 1\nMaximum rating is: 10\n"
]
],
[
[
"- Most of the books are getting max ratings as 8\n- Minimum ratings as 1 are very few books",
"_____no_output_____"
]
],
[
[
"# Unique Users and ratings\nprint(\"Total data \\n\")\nprint(\"Total no of ratings :\",books_data.shape[0])\nprint(\"Total No of Users :\", len(np.unique(books_data['User.ID'])))\nprint(\"Total No of products :\", len(np.unique(books_data['Book.Rating'])))",
"Total data \n\nTotal no of ratings : 10000\nTotal No of Users : 2182\nTotal No of products : 10\n"
],
[
"# find out the average rating for each and every books\nAverage_ratings = pd.DataFrame(books_data.groupby('Book.Title')['Book.Rating'].mean())\nAverage_ratings.head(3)",
"_____no_output_____"
]
],
[
[
"- Average ratings received by readers as,\n\n1) 8.0 - Jason, Madison &\n\n\n2) 6.0 - Other Stories;Merril;1985;McClelland &\n\n\n3) 4.0 - Repairing PC Drives &\t",
"_____no_output_____"
],
[
"## Visualize The Data",
"_____no_output_____"
]
],
[
[
"# Check the distribution of the rating\nplt.figure(figsize=(10, 5))\nsns.countplot(\"Book.Rating\", data = books_data)\nplt.title('Rating distrubutions', fontsize = 20)\nplt.xlabel(\"Book ratings\", fontsize = 15)\nplt.ylabel(\"Total counts\", fontsize = 15)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Building The Recommender",
"_____no_output_____"
]
],
[
[
"# make pivot table\nbook_users = books_data.pivot_table( index='User.ID', columns = books_data['Book.Title'], values='Book.Rating')\nbook_users",
"_____no_output_____"
],
[
"# find correlation between \"10 Commandments Of Dating\" and other books\nbook_read = book_users[\"10 Commandments Of Dating\"]\nsimilarity_with_other_books = book_users.corrwith(book_read) \nsimilarity_with_other_books = similarity_with_other_books.sort_values(ascending=False)\nsimilarity_with_other_books.head(10)",
"_____no_output_____"
],
[
"# imputer NaN with 0\nbook_users.fillna(0, inplace = True)\nbook_users",
"_____no_output_____"
],
[
"# collecting unique user id\nbook_users.index = books_data['User.ID'].unique()\nbook_users.head()",
"_____no_output_____"
]
],
[
[
"## Computation with Cosine Distance",
"_____no_output_____"
]
],
[
[
"# calculating Cosine Similarities between Users\nfrom sklearn.metrics import pairwise_distances\nfrom scipy.spatial.distance import cosine, correlation",
"_____no_output_____"
],
[
"# Cosine similarities values (using distance matrics)\nuser_sim = 1 - pairwise_distances(book_users.values, metric = 'cosine')\nuser_sim",
"_____no_output_____"
],
[
"# store the result (Cosine Similarities values) in a dataframe\nuser_similarity_df = pd.DataFrame(user_sim)",
"_____no_output_____"
],
[
"user_similarity_df",
"_____no_output_____"
],
[
"# set the index and columns to userId\nuser_similarity_df.index = books_data['User.ID'].unique()\nuser_similarity_df.columns = books_data['User.ID'].unique()",
"_____no_output_____"
],
[
"books_data",
"_____no_output_____"
],
[
"user_similarity_df.iloc[0:7, 0:7]",
"_____no_output_____"
],
[
"np.fill_diagonal(user_sim, 0)\nuser_similarity_df.iloc[0:7, 0:7]",
"_____no_output_____"
]
],
[
[
"## Most Similarity",
"_____no_output_____"
]
],
[
[
"# Most similar readers\nuser_similarity_df.idxmax(axis = 1)[0:20]",
"_____no_output_____"
],
[
"# find out book read by two users 276780 and 276726\nbooks_data[(books_data['User.ID'] == 276780) | (books_data['User.ID'] == 276726)]",
"_____no_output_____"
],
[
"# user 276780 books\nuser_276780 = books_data[books_data['User.ID'] == 276780]\nuser_276780",
"_____no_output_____"
],
[
"# user 276726 books\nuser_276726 = books_data[books_data['User.ID'] == 276726]\nuser_276726",
"_____no_output_____"
]
],
[
[
"## Recommendations",
"_____no_output_____"
]
],
[
[
"# meging two user book data into single one\npd.merge(user_276780, user_276726, on = 'Book.Title', how = 'outer')",
"_____no_output_____"
]
],
[
[
"- User __176780__ read two books titled __'Wild Animus'__ and __'Airframe'__ which is rated as __7.0__\n- User __276726__ read only one book titled __'Classical Mythology'__ which is rated as __5.0__\n- So based on ratings given by readers, _the book 'Classical Mythology' is recommended to User 176780 and the books 'Wild Animus' and 'Airframe' are recommended to User 276726_",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a207dba210b6d43fb6056ad769abc7c79cc5f1a
| 124,018 |
ipynb
|
Jupyter Notebook
|
notebooks/query-examples.ipynb
|
4dn-dcic/higlass-multicontact
|
39b62880b99e69ac70015ee29e9fb3a6446ec0ed
|
[
"MIT"
] | null | null | null |
notebooks/query-examples.ipynb
|
4dn-dcic/higlass-multicontact
|
39b62880b99e69ac70015ee29e9fb3a6446ec0ed
|
[
"MIT"
] | null | null | null |
notebooks/query-examples.ipynb
|
4dn-dcic/higlass-multicontact
|
39b62880b99e69ac70015ee29e9fb3a6446ec0ed
|
[
"MIT"
] | null | null | null | 184.550595 | 21,664 | 0.905417 |
[
[
[
"# Example Feature-Based Cluster Queries",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nimport os\nimport sys\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"import h5py\nimport math\nimport numpy as np",
"_____no_output_____"
],
[
"chrom = 'chr7'\nbin_size = 100000\n\ncluster_tsv_file = '../data/hg19/gm12878_triplets_chr7_100kb_pooled.tsv.gz'\ncluster_h5_file = '../data/hg19/gm12878_triplets_chr7_100kb_pooled.h5'\n\nchrom_sizes_file = '../data/hg19/hg19.chrom.sizes'\n\ntads_arrowhead_bed_file = '../data/hg19/Rao_RepH_GM12878_Arrowhead.sorted.bed'\ntads_arrowhead_sqlite_file = '../data/hg19/Rao_RepH_GM12878_Arrowhead.sorted.sqlite'\n\nchromhmm_bed_file = '../data/hg19/wgEncodeBroadHmmGm12878HMM.bed.gz'\nchromhmm_sqlite_file = '../data/hg19/wgEncodeBroadHmmGm12878HMM.sqlite'\n\nsubcompartments_bed_file = '../data/hg19/GSE63525_GM12878_subcompartments.bed.gz'\nsubcompartments_sqlite_file = '../data/hg19/GSE63525_GM12878_subcompartments.sqlite'\n\nloop_extents_bed_file = '../data/hg19/GSE63525_GM12878_replicate_HiCCUPS_loop_extent_list.bed.gz'\nloop_extents_sqlite_file = '../data/hg19/GSE63525_GM12878_replicate_HiCCUPS_loop_extent_list.sqlite'",
"_____no_output_____"
],
[
"from hgmc.utils import get_chrom_sizes\n\nchrom_size = get_chrom_sizes(chrom_sizes_file).get(chrom)\nnum_bins = math.ceil(chrom_size / bin_size)",
"_____no_output_____"
]
],
[
[
"# Load Features",
"_____no_output_____"
]
],
[
[
"from hgmc.bed import sql_features\nfrom utils import natural_sort\n\nchromhmm_features = natural_sort(sql_features(chromhmm_sqlite_file))\nchromhmm_features",
"_____no_output_____"
],
[
"from hgmc.bed import sql_coverage\n\ntad_coverage_100kb = sql_coverage(\n tads_arrowhead_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n # At least 80% of the TAD needs to be in the bin to count\n count_at_feat_cov=0.8,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{tad_coverage_100kb.astype(bool).sum()} bins contain TADs with at most {tad_coverage_100kb.max()} TADs per bin')\n\nactive_promoter_coverage_100kb = sql_coverage(\n chromhmm_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n features='1_Active_Promoter',\n # The entire promoter needs to be in the bin to count\n count_at_feat_cov=1.0,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{active_promoter_coverage_100kb.astype(bool).sum()} bins contain active promoters with at most {active_promoter_coverage_100kb.max()} active promoters per bin')\n\nstrong_enhancer_coverage_100kb = sql_coverage(\n chromhmm_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n features=['4_Strong_Enhancer', '5_Strong_Enhancer'],\n # The entire enhancer needs to be in the bin to count\n count_at_feat_cov=1.0,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{strong_enhancer_coverage_100kb.astype(bool).sum()} bins contain enhancers with at most {strong_enhancer_coverage_100kb.max()} enhancers per bin')\n\na_compartment_coverage_100kb = sql_coverage(\n subcompartments_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n features=['A1', 'A2'],\n # At least 80% of the bin need to be an A compartment to count\n count_at_bin_cov=0.8,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{a_compartment_coverage_100kb.astype(bool).sum()} bins are A compartment')\n\nb_compartment_coverage_100kb = sql_coverage(\n subcompartments_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n features=['B1', 'B2', 'B3', 'B4'],\n # At least 80% of the bin need to be an A compartment to count\n count_at_bin_cov=0.8,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{b_compartment_coverage_100kb.astype(bool).sum()} bins are B compartment')\n\nloop_extent_coverage_100kb = sql_coverage(\n loop_extents_sqlite_file,\n chrom=chrom,\n bin_size=bin_size,\n # Only count if the entire loop extent is in the bin\n count_at_feat_cov=1.0,\n rel_count_at_bin_cov=True,\n timeit=True\n)\nprint(f'{loop_extent_coverage_100kb.astype(bool).sum()} bins contain loops with at most {loop_extent_coverage_100kb.max()} loops per bin')",
"Took 0.054 sec\n883 bins contain TADs with at most 3 TADs per bin\nTook 3.931 sec\n400 bins contain active promoters with at most 8 active promoters per bin\nTook 4.632 sec\n517 bins contain enhancers with at most 38 enhancers per bin\nTook 0.082 sec\n576 bins are A compartment\nTook 0.084 sec\n936 bins are B compartment\nTook 0.097 sec\n1107 bins contain loops with at most 7 loops per bin\n"
],
[
"all_features = [\n ('TADs', tad_coverage_100kb),\n ('Active Promoters', active_promoter_coverage_100kb),\n ('Strong Enhancers', strong_enhancer_coverage_100kb),\n ('A Compartments', a_compartment_coverage_100kb),\n ('B Compartments', b_compartment_coverage_100kb),\n ('Loops', loop_extent_coverage_100kb),\n]",
"_____no_output_____"
]
],
[
[
"# Queries\n## Find all triplets that span A-only compartments",
"_____no_output_____"
]
],
[
[
"from hgmc.clusters import clusters_to_bins, query_by_features, verify_queried_clusters\nfrom hgmc.plots import plot_cluster_feature_distribution\n\nwith h5py.File(cluster_h5_file, 'r') as h5:\n #####\n query = [(a_compartment_coverage_100kb.astype(bool), 3)]\n #####\n \n a_cluster_ids = query_by_features(h5, query, verbose=True, verify=True, timeit=True)\n print(f'Found {ab_cluster_ids.size} clusters')",
"Mask creation took 0.54 sec\nbin_to_cluster and cluster_to_bin extraction took 6.56 sec\nBin starts and ends extraction took 0.00 sec\nGetting the unique cluster IDs took 1.14 sec\nTotal query took 8.24 sec\nVerify results...\nMask generation took 0.4 sec\nCluster start/stop extraction took 2.5 sec\nVrange calculation took 0.3 sec\nCluster-to-bin extraction took 3.4 sec\nBin extraction took 0.0 sec\nTotal took 6.6 sec\nHooray! The clusters conform to the query.\nFound 22481057 clusters\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n a_cluster_bins = clusters_to_bins(h5, a_cluster_ids)\n unique_a_cluster_bins = np.unique(a_cluster_bins[:, 1])\n print(f'Found {a_cluster_bins.shape[0]} bins ({unique_a_cluster_bins.size} bins)')",
"Found 9740508 bins (576 bins)\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n plot_cluster_feature_distribution(h5, a_cluster_bins, all_features, figsize=(12, 6))",
"_____no_output_____"
]
],
[
[
"## Find all triplets that span A&B compartments",
"_____no_output_____"
]
],
[
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n #####\n query = [(a_compartment_coverage_100kb.astype(bool), 1), (b_compartment_coverage_100kb.astype(bool), 1)]\n #####\n \n ab_cluster_ids = query_by_features(h5, query, verbose=True, verify=True)\n print(f'Found {ab_cluster_ids.size} clusters')",
"Verify results...\nHooray! The clusters conform to the query.\nFound 22481057 clusters\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n ab_cluster_bins = clusters_to_bins(h5, ab_cluster_ids)\n unique_ab_cluster_bins = np.unique(ab_cluster_bins[:, 1])\n print(f'Found {ab_cluster_bins.shape[0]} bins ({unique_ab_cluster_bins.size} bins)')",
"Found 67443171 bins (1562 bins)\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n plot_cluster_feature_distribution(h5, ab_cluster_bins, all_features, figsize=(12, 6))",
"_____no_output_____"
]
],
[
[
"## Find all triplets that anchor in 3 peaks/loops",
"_____no_output_____"
]
],
[
[
"print(f'There are {loop_extent_coverage_100kb.astype(bool).sum()} bins with peaks')\n\nwith h5py.File(cluster_h5_file, 'r') as h5:\n #####\n query = [(loop_extent_coverage_100kb.astype(bool), 3)]\n #####\n \n loop_cluster_ids = query_by_features(h5, query, verbose=True, verify=True)\n print(f'Found {loop_cluster_ids.size} clusters')",
"There are 1107 bins with peaks\nVerify results...\nHooray! The clusters conform to the query.\nFound 15403326 clusters\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n loop_cluster_bins = clusters_to_bins(h5, loop_cluster_ids)\n unique_loop_cluster_bins = np.unique(loop_cluster_bins[:, 1])\n print(f'Found {loop_cluster_bins.shape[0]} bins ({unique_loop_cluster_bins.size} bins)')",
"Found 46209978 bins (1107 bins)\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n plot_cluster_feature_distribution(h5, loop_cluster_bins, all_features, figsize=(12,6))",
"_____no_output_____"
]
],
[
[
"## Find all triplets that anchor in 3 TADs",
"_____no_output_____"
]
],
[
[
"print(f'There are {tad_coverage_100kb.astype(bool).sum()} bins with TADs')\n\nwith h5py.File(cluster_h5_file, 'r') as h5:\n #####\n query = [(tad_coverage_100kb.astype(bool), 3)]\n #####\n \n tad_cluster_ids = query_by_features(h5, query, verbose=True, verify=True)\n print(f'Found {tad_cluster_ids.size} clusters')",
"There are 883 bins with TADs\nVerify results...\nHooray! The clusters conform to the query.\nFound 9052583 clusters\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n tad_cluster_bins = clusters_to_bins(h5, tad_cluster_ids)\n unique_tad_cluster_bins = np.unique(tad_cluster_bins[:, 1])\n print(f'Found {tad_cluster_bins.shape[0]} bins ({unique_tad_cluster_bins.size} bins)')",
"Found 27157749 bins (882 bins)\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n plot_cluster_feature_distribution(h5, tad_cluster_bins, all_features, figsize=(12,6))",
"_____no_output_____"
]
],
[
[
"## Find all triplets that anchor in at least 1 promoter and at least 2 active enhancers",
"_____no_output_____"
]
],
[
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n query = [\n (active_promoter_coverage_100kb.astype(bool), 1),\n (strong_enhancer_coverage_100kb.astype(bool), 2)\n ]\n promoter_enhancer_cluster_ids = query_by_features(h5, query, verbose=True, verify=True)\n print(f'Found {promoter_enhancer_cluster_ids.size} clusters')",
"Verify results...\nHooray! The clusters conform to the query.\nFound 1040286 clusters\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n promoter_enhancer_cluster_bins = clusters_to_bins(h5, promoter_enhancer_cluster_ids)\n unique_promoter_enhancer_cluster_bins = np.unique(promoter_enhancer_cluster_bins[:, 1])\n print(f'Found {promoter_enhancer_cluster_bins.shape[0]} bins ({unique_promoter_enhancer_cluster_bins.size} bins)')",
"Found 3120858 bins (617 bins)\n"
],
[
"with h5py.File(cluster_h5_file, 'r') as h5:\n plot_cluster_feature_distribution(h5, promoter_enhancer_cluster_bins, all_features, figsize=(12,6))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a20817f0998abd21e8ad473bfcea8e28c22e827
| 318,028 |
ipynb
|
Jupyter Notebook
|
fumanelli-ez/ew-tWin11-inf.ipynb
|
rljack2002/infExampleCovidEW
|
351e0605c80a51a2cd285136d7a05d969ac6c6fd
|
[
"MIT"
] | 2 |
2020-10-28T17:01:05.000Z
|
2020-10-30T11:07:20.000Z
|
fumanelli-ez/ew-tWin11-inf.ipynb
|
rljack2002/infExampleCovidEW
|
351e0605c80a51a2cd285136d7a05d969ac6c6fd
|
[
"MIT"
] | null | null | null |
fumanelli-ez/ew-tWin11-inf.ipynb
|
rljack2002/infExampleCovidEW
|
351e0605c80a51a2cd285136d7a05d969ac6c6fd
|
[
"MIT"
] | null | null | null | 435.654795 | 130,388 | 0.931251 |
[
[
[
"## Eng+Wales well-mixed example model \n\nThis is the inference notebook with increased inference window. There are various model variants as encoded by `expt_params_local` and `model_local`, which are shared by the notebooks in a given directory.\n\nOutputs of this notebook:\n(same as `inf` notebook with added `tWin` label in filename)\n\nNOTE carefully : `Im` compartment is cumulative deaths, this is called `D` elsewhere",
"_____no_output_____"
],
[
"### Start notebook\n(the following line is for efficient parallel processing)",
"_____no_output_____"
]
],
[
[
"%env OMP_NUM_THREADS=1",
"env: OMP_NUM_THREADS=1\n"
],
[
"%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pyross\nimport time \nimport pandas as pd\nimport matplotlib.image as mpimg\nimport pickle\nimport os\nimport pprint\nimport scipy.stats",
"_____no_output_____"
],
[
"# comment these before commit\n#print(pyross.__file__)\n#print(os.getcwd())",
"_____no_output_____"
],
[
"from ew_fns import *\nimport expt_params_local\nimport model_local",
"_____no_output_____"
]
],
[
[
"### switches etc",
"_____no_output_____"
]
],
[
[
"verboseMod=False ## print ancillary info about the model? (would usually be False, for brevity)\n\n## Calculate things, or load from files ?\ndoInf = False ## do inference, or load it ?\ndoHes = True ## Hessian may take a few minutes !! does this get removed? what to do?\n\n## time unit is one week\ndaysPerWeek = 7.0\n\n## these are params that might be varied in different expts\nexptParams = expt_params_local.getLocalParams() \n\n## over-ride params for inference window\nexptParams['timeLast'] = 11\nexptParams['forecastTime'] = 11-exptParams['timeLast']\nexptParams['pikFileRoot'] += '-tWin11'\n\npprint.pprint(exptParams)\n\n## this is used for filename handling throughout\npikFileRoot = exptParams['pikFileRoot']",
"{'careFile': '../data/CareHomes.csv',\n 'chooseCM': 'fumanelliEtAl',\n 'dataFile': '../data/OnsData.csv',\n 'estimatorTol': 1e-08,\n 'exCare': True,\n 'forecastTime': 0,\n 'freeInitPriors': ['E', 'A', 'Is1', 'Is2', 'Is3'],\n 'infOptions': {'cma_population': 32,\n 'cma_processes': None,\n 'ftol': 5e-05,\n 'global_atol': 1.0,\n 'global_max_iter': 1500,\n 'local_max_iter': 400},\n 'inferBetaNotAi': True,\n 'numCohorts': 16,\n 'numCohortsPopData': 19,\n 'pikFileRoot': 'ewMod-tWin11',\n 'popFile': '../data/EWAgeDistributedNew.csv',\n 'timeLast': 11,\n 'timeZero': 0}\n"
]
],
[
[
"### convenient settings",
"_____no_output_____"
]
],
[
[
"np.set_printoptions(precision=3) \npltAuto = True\nplt.rcParams.update({'figure.autolayout': pltAuto})\nplt.rcParams.update({'font.size': 14})",
"_____no_output_____"
]
],
[
[
"## LOAD MODEL",
"_____no_output_____"
]
],
[
[
"loadModel = model_local.loadModel(exptParams,daysPerWeek,verboseMod) \n\n## should use a dictionary but...\n[ numCohorts, fi, N, Ni, model_spec, estimator, contactBasis, interventionFn,\n modParams, priorsAll, initPriorsLinMode, obsDeath, fltrDeath, \n simTime, deathCumulativeDat ] = loadModel",
"** model\n\n{'A': {'infection': [], 'linear': [['E', 'gammaE'], ['A', '-gammaA']]},\n 'E': {'infection': [['A', 'beta'],\n ['Is1', 'beta'],\n ['Is2', 'betaLate'],\n ['Is3', 'betaLate']],\n 'linear': [['E', '-gammaE']]},\n 'Im': {'infection': [], 'linear': [['Is3', 'cfr*gammaIs3']]},\n 'Is1': {'infection': [],\n 'linear': [['A', 'gammaA'],\n ['Is1', '-alphabar*gammaIs1'],\n ['Is1', '-alpha*gammaIs1']]},\n 'Is2': {'infection': [],\n 'linear': [['Is1', 'alphabar*gammaIs1'], ['Is2', '-gammaIs2']]},\n 'Is3': {'infection': [],\n 'linear': [['Is2', 'gammaIs2'],\n ['Is3', '-cfrbar*gammaIs3'],\n ['Is3', '-cfr*gammaIs3']]},\n 'S': {'infection': [['A', '-beta'],\n ['Is1', '-beta'],\n ['Is2', '-betaLate'],\n ['Is3', '-betaLate']],\n 'linear': []},\n 'classes': ['S', 'E', 'A', 'Is1', 'Is2', 'Is3', 'Im']}\n\n** using getPriorsControlGMob\n"
]
],
[
[
"### Inspect most likely trajectory for model with prior mean params",
"_____no_output_____"
]
],
[
[
"x0_lin = estimator.get_mean_inits(initPriorsLinMode, obsDeath[0], fltrDeath)\nguessTraj = estimator.integrate( x0_lin, exptParams['timeZero'], simTime, simTime+1)\n\n## plots\n\nyesPlot = model_spec['classes'].copy()\nyesPlot.remove('S')\nplt.yscale('log')\nfor lab in yesPlot :\n indClass = model_spec['classes'].index(lab) \n totClass = np.sum(guessTraj[:,indClass*numCohorts:(indClass+1)*numCohorts],axis=1)\n plt.plot( N * totClass,'-',lw=3,label=lab)\nplt.plot(N*np.sum(obsDeath,axis=1),'X',label='data')\nplt.legend(fontsize=14,bbox_to_anchor=(1, 1.0))\nplt.xlabel('time in weeks')\nplt.ylabel('class population')\nplt.show() ; plt.close()\n\nindClass = model_spec['classes'].index('Im')\nplt.yscale('log')\nfor coh in range(numCohorts):\n plt.plot( N*guessTraj[:,coh+indClass*numCohorts],label='m{c:d}'.format(c=coh) )\nplt.xlabel('time in weeks')\nplt.ylabel('cumul deaths by age cohort')\nplt.legend(fontsize=8,bbox_to_anchor=(1, 1.0))\nplt.show() ; plt.close()",
"_____no_output_____"
]
],
[
[
"## INFERENCE\n\nparameter count\n\n* 32 for age-dependent Ai and Af (or beta and Af)\n* 2 (step-like) or 3 (NPI-with-easing) for lockdown time and width (+easing param)\n* 1 for projection of initial condition along mode\n* 5 for initial condition in oldest cohort\n* 5 for the gammas\n* 1 for beta in late stage\n\ntotal: 46 (step-like) or 47 (with-easing)\n\nThe following computation with CMA-ES takes some minutes depending on compute power, it should use multiple CPUs efficiently, if available. The result will vary (slightly) according to the random seed, can be controlled by passing `cma_random_seed` to `latent_infer`",
"_____no_output_____"
]
],
[
[
"def runInf() : \n\n infResult = estimator.latent_infer(obsDeath, fltrDeath, simTime, \n priorsAll, \n initPriorsLinMode, \n generator=contactBasis, \n intervention_fun=interventionFn, \n tangent=False, \n verbose=True, \n enable_global=True,\n enable_local =True,\n **exptParams['infOptions'],\n )\n \n return infResult \n\nif doInf:\n ## do the computation\n elapsedInf = time.time() \n\n infResult = runInf() \n\n elapsedInf = time.time() - elapsedInf\n print('** elapsed time',elapsedInf/60.0,'mins')\n\n # save the answer\n opFile = pikFileRoot + \"-inf.pik\"\n print('opf',opFile)\n with open(opFile, 'wb') as f: \n pickle.dump([infResult,elapsedInf],f)\n\nelse:\n ## load a saved computation\n print(' Load data')\n \n# here we load the data \n# (this may be the file that we just saved, it is deliberately outside the if: else:)\nipFile = pikFileRoot + \"-inf.pik\"\nprint('ipf',ipFile)\nwith open(ipFile, 'rb') as f: \n [infResult,elapsedInf] = pickle.load(f)",
" Load data\nipf ewMod-tWin11-inf.pik\n"
]
],
[
[
"#### unpack results",
"_____no_output_____"
]
],
[
[
"epiParamsMAP = infResult['params_dict']\nconParamsMAP = infResult['control_params_dict']\nx0_MAP = infResult['x0']\n\nCM_MAP = contactBasis.intervention_custom_temporal( interventionFn, \n **conParamsMAP)\n\nlogPinf = -estimator.minus_logp_red(epiParamsMAP, x0_MAP, obsDeath, fltrDeath, simTime, \n CM_MAP, tangent=False)\nprint('** measuredLikelihood',logPinf)\nprint('** logPosterior ',infResult['log_posterior'])\nprint('** logLikelihood',infResult['log_likelihood'])",
"** measuredLikelihood -369.75118298386315\n** logPosterior -284.01393909105116\n** logLikelihood -369.75118298386315\n"
]
],
[
[
"#### MAP dominant trajectory",
"_____no_output_____"
]
],
[
[
"estimator.set_params(epiParamsMAP)\nestimator.set_contact_matrix(CM_MAP)\ntrajMAP = estimator.integrate( x0_MAP, exptParams['timeZero'], simTime, simTime+1)\n\nyesPlot = model_spec['classes'].copy()\nyesPlot.remove('S')\nplt.yscale('log')\nfor lab in yesPlot :\n indClass = model_spec['classes'].index(lab) \n totClass = np.sum(trajMAP[:,indClass*numCohorts:(indClass+1)*numCohorts],axis=1)\n plt.plot( N * totClass,'-',lw=3,label=lab)\nplt.plot(N*np.sum(obsDeath,axis=1),'X',label='data')\nplt.xlabel('time in weeks')\nplt.ylabel('class population')\nplt.legend(fontsize=14,bbox_to_anchor=(1, 1.0))\nplt.show() ; plt.close()\n\nfig,axs = plt.subplots(1,2,figsize=(10,4.5))\n\ncohRanges = [ [x,x+4] for x in range(0,75,5) ]\n#print(cohRanges)\ncohLabs = [\"{l:d}-{u:d}\".format(l=low,u=up) for [low,up] in cohRanges ]\ncohLabs.append(\"75+\")\n\nax = axs[0]\nax.set_title('MAP (average dynamics)')\nmSize = 3\nminY = 0.12\nmaxY = 1.0\nindClass = model_spec['classes'].index('Im')\nax.set_yscale('log')\nax.set_ylabel('cumulative M (by cohort)')\nax.set_xlabel('time/weeks')\nfor coh in reversed(list(range(numCohorts))) :\n ax.plot( N*trajMAP[:,coh+indClass*numCohorts],'o-',label=cohLabs[coh],ms=mSize )\n maxY = np.maximum( maxY, np.max(N*trajMAP[:,coh+indClass*numCohorts]))\n#ax.legend(fontsize=8,bbox_to_anchor=(1, 1.0))\nmaxY *= 1.6\nax.set_ylim(bottom=minY,top=maxY)\n#plt.show() ; plt.close()\n\nax = axs[1]\nax.set_title('data')\nax.set_xlabel('time/weeks')\nindClass = model_spec['classes'].index('Im')\nax.set_yscale('log')\nfor coh in reversed(list(range(numCohorts))) :\n ax.plot( N*obsDeath[:,coh],'o-',label=cohLabs[coh],ms=mSize )\n## keep the same as other panel\nax.set_ylim(bottom=minY,top=maxY)\n\nax.legend(fontsize=10,bbox_to_anchor=(1, 1.0))\n#plt.show() ; plt.close()\n#plt.savefig('ageMAPandData.png')\nplt.show(fig)",
"_____no_output_____"
]
],
[
[
"#### sanity check : plot the prior and inf value for one or two params",
"_____no_output_____"
]
],
[
[
"(likFun,priFun,dim) = pyross.evidence.latent_get_parameters(estimator,\n obsDeath, fltrDeath, simTime, \n priorsAll, \n initPriorsLinMode, \n generator=contactBasis, \n intervention_fun=interventionFn, \n tangent=False, \n )\n\ndef showInfPrior(xLab) :\n fig = plt.figure(figsize=(4,4))\n dimFlat = np.size(infResult['flat_params'])\n ## magic to work out the index of this param in flat_params\n jj = infResult['param_keys'].index(xLab)\n xInd = infResult['param_guess_range'][jj] \n\n ## get the range\n xVals = np.linspace( *priorsAll[xLab]['bounds'], 100 )\n\n #print(infResult['flat_params'][xInd])\n pVals = []\n checkVals = []\n for xx in xVals :\n flatP = np.zeros( dimFlat )\n flatP[xInd] = xx\n pdfAll = np.exp( priFun.logpdf(flatP) )\n pVals.append( pdfAll[xInd] )\n #checkVals.append( scipy.stats.norm.pdf(xx,loc=0.2,scale=0.1) )\n\n plt.plot(xVals,pVals,'-',label='prior')\n infVal = infResult['flat_params'][xInd] \n infPdf = np.exp( priFun.logpdf(infResult['flat_params']) )[xInd]\n plt.plot([infVal],[infPdf],'ro',label='inf')\n plt.xlabel(xLab)\n upperLim = 1.05*np.max(pVals)\n plt.ylim(0,upperLim)\n #plt.plot(xVals,checkVals)\n plt.legend()\n plt.show(fig) ; plt.close()\n\n#print('**params\\n',infResult['flat_params'])\n#print('**logPrior\\n',priFun.logpdf(infResult['flat_params']))\n \nshowInfPrior('gammaE')",
"_____no_output_____"
]
],
[
[
"## Hessian matrix of log-posterior\n(this can take a few minutes, it does not make use of multiple cores)",
"_____no_output_____"
]
],
[
[
"if doHes:\n \n ## this eps amounts to a perturbation of approx 1% on each param\n ## (1/4) power of machine epsilon is standard for second deriv\n xx = infResult['flat_params']\n eps = 100 * xx*( np.spacing(xx)/xx )**(0.25) \n \n #print('**params\\n',infResult['flat_params'])\n #print('** rel eps\\n',eps/infResult['flat_params']) \n \n CM_MAP = contactBasis.intervention_custom_temporal( interventionFn, \n **conParamsMAP)\n estimator.set_params(epiParamsMAP)\n estimator.set_contact_matrix(CM_MAP)\n \n start = time.time()\n hessian = estimator.latent_hessian(obs=obsDeath, fltr=fltrDeath,\n Tf=simTime, generator=contactBasis, \n infer_result=infResult, \n intervention_fun=interventionFn,\n eps=eps, tangent=False, fd_method=\"central\",\n inter_steps=0)\n end = time.time()\n print('time',(end-start)/60,'mins')\n \n opFile = pikFileRoot + \"-hess.npy\"\n print('opf',opFile)\n with open(opFile, 'wb') as f: \n np.save(f,hessian)\n\nelse : \n print('Load hessian')\n\n# reload in all cases (even if we just saved it)\nipFile = pikFileRoot + \"-hess.npy\"\ntry:\n print('ipf',ipFile)\n with open(ipFile, 'rb') as f: \n hessian = np.load(f)\nexcept (OSError, IOError) : \n print('... error loading hessian')\n hessian = None\n ",
"epsilon used for differentiation: [1.539e-03 6.026e-04 9.339e-04 2.437e-03 3.779e-03 7.915e-03 7.016e-03\n 5.029e-03 6.237e-03 7.772e-03 9.351e-03 1.017e-02 1.278e-02 1.460e-02\n 1.853e-02 3.264e-02 1.189e-03 2.512e-02 3.251e-02 2.842e-02 1.241e-02\n 1.250e-02 1.630e-03 1.247e-03 1.279e-03 1.571e-03 1.882e-03 1.758e-03\n 1.661e-03 1.921e-03 2.450e-03 2.113e-03 2.501e-03 3.252e-03 2.527e-03\n 3.583e-03 3.687e-03 3.179e-03 2.655e-02 3.903e-03 1.884e-02 6.590e-06\n 7.715e-07 3.007e-07 5.534e-08 7.331e-09 6.458e-09]\ntime 8.305729162693023 mins\nopf ewMod-tWin11-hess.npy\nipf ewMod-tWin11-hess.npy\n"
],
[
"#print(hessian)\nprint(\"** param vals\")\nprint(infResult['flat_params'],'\\n')\nif np.all(hessian) != None : \n print(\"** naive uncertainty v1 : reciprocal sqrt diagonal elements (x2)\")\n print( 2/np.sqrt(np.diagonal(hessian)) ,'\\n')\n print(\"** naive uncertainty v2 : sqrt diagonal elements of inverse (x2)\")\n print( 2*np.sqrt(np.diagonal(np.linalg.inv(hessian))) ,'\\n')",
"** param vals\n[1.264e-01 5.750e-02 8.184e-02 2.333e-01 3.324e-01 7.071e-01 6.021e-01\n 4.866e-01 5.146e-01 6.901e-01 8.831e-01 9.882e-01 1.063e+00 1.270e+00\n 1.745e+00 2.946e+00 1.129e-01 2.077e+00 2.930e+00 2.449e+00 1.022e+00\n 1.032e+00 1.365e-01 1.203e-01 1.245e-01 1.300e-01 1.653e-01 1.509e-01\n 1.400e-01 1.700e-01 2.350e-01 1.929e-01 2.416e-01 2.721e-01 2.449e-01\n 3.096e-01 3.217e-01 2.639e-01 2.237e+00 3.471e-01 1.784e+00 5.582e-04\n 6.394e-05 2.889e-05 4.802e-06 6.485e-07 5.477e-07] \n\n** naive uncertainty v1 : reciprocal sqrt diagonal elements (x2)\n[7.910e-02 4.843e-02 5.106e-02 3.129e-02 3.252e-02 4.287e-02 4.837e-02\n 3.428e-02 2.533e-02 3.069e-02 3.552e-02 3.204e-02 3.168e-02 4.010e-02\n 4.747e-02 3.700e-02 5.698e-03 1.849e-02 4.638e-02 3.370e-02 2.895e-02\n 2.944e-02 1.256e-01 1.064e-01 1.111e-01 9.987e-02 9.363e-02 6.075e-02\n 6.869e-02 6.440e-02 4.657e-02 4.001e-02 3.408e-02 2.624e-02 2.269e-02\n 2.335e-02 2.009e-02 7.642e-03 8.212e-03 1.718e-02 8.257e-02 1.146e-05\n 7.577e-06 4.279e-06 3.841e-06 5.798e-07 5.463e-07] \n\n** naive uncertainty v2 : sqrt diagonal elements of inverse (x2)\n[8.612e-02 4.966e-02 5.786e-02 9.097e-02 1.237e-01 1.906e-01 1.576e-01\n 1.247e-01 1.226e-01 1.331e-01 1.636e-01 1.654e-01 1.647e-01 1.878e-01\n 2.413e-01 3.852e-01 5.582e-02 3.599e-01 5.065e-01 4.171e-01 1.711e-01\n 1.714e-01 1.269e-01 1.066e-01 1.116e-01 1.136e-01 1.483e-01 1.248e-01\n 1.128e-01 1.401e-01 1.612e-01 1.148e-01 1.181e-01 1.039e-01 8.814e-02\n 8.523e-02 7.445e-02 5.192e-02 1.054e-01 1.010e-01 2.665e-01 1.476e-04\n 7.649e-05 4.034e-05 5.281e-06 5.814e-07 5.502e-07] \n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a209354e8020536844a92a49550882bdd0aae57
| 6,839 |
ipynb
|
Jupyter Notebook
|
notebook/procs-tool-langs.ipynb
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3 |
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
notebook/procs-tool-langs.ipynb
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
notebook/procs-tool-langs.ipynb
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1 |
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
| 29.226496 | 166 | 0.489253 |
[
[
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"from sagas.nlu.ruleset_procs import cached_chunks, get_main_domains\nget_main_domains('彼のパソコンは便利じゃない。', 'ja', 'knp')",
"_____no_output_____"
],
[
"from sagas.tool.dynamic_rules import dynamic_rule\ndata = {'lang': 'ja', \"sents\": '彼のパソコンは便利じゃない。'}\ndynamic_rule(data, \"\"\"subj('adj',ガ=kindof('artifact', 'n'))\"\"\", \n engine='knp')",
"predicate 便利じゃない。 便利だ ['pos', 'rel', 'lemma', 'word', 'stems', 'lang', 'sents']\n[['ガ', 1, 'パソコンは', 'パソコン', ['パソコン'], ['c_noun', 'x_n']]]\n\u001b[31m✔ (_none_) subj with pos is ('adj',): True, ガ is kind_of(artifact,n): True\u001b[0m\n\u001b[32m.. results 1\u001b[0m\n\u001b[33m{'kind_of/default/ガ'}\u001b[0m\n"
],
[
"from sagas.tool.dynamic_rules import dynamic_rule\n\ndata = {'lang': 'en', \"sents\": 'what will be the weather in three days?'}\ndynamic_rule(data, \"\"\"root(predict_aux(\n ud.__text('will') >> [ud.nsubj('what'), ud.dc_cat('weather')]))\"\"\")",
"aux_domains will will ['pos', 'head', 'lemma', 'word', 'stems', 'lang', 'sents']\n[('nsubj', '1', 'what', 'what', ['what'], ['c_pron', 'x_wp']),\n ('aux', '2', 'will', 'will', ['will'], ['c_aux', 'x_md']),\n ('cop', '3', 'be', 'be', ['be'], ['c_aux', 'x_vb']),\n ('det', '4', 'the', 'the', ['the'], ['c_det', 'x_dt']),\n ('nmod', '8', 'days', 'day', ['in', 'three', 'days'], ['c_noun', 'x_nns']),\n ('punct', '9', '?', '?', ['?'], ['c_punct', 'x_.'])]\n\u001b[31m✔ (_none_) root with pos is predicts: True\u001b[0m\naux_domains be be ['pos', 'head', 'lemma', 'word', 'stems', 'lang', 'sents']\n[('nsubj', '1', 'what', 'what', ['what'], ['c_pron', 'x_wp']),\n ('aux', '2', 'will', 'will', ['will'], ['c_aux', 'x_md']),\n ('cop', '3', 'be', 'be', ['be'], ['c_aux', 'x_vb']),\n ('det', '4', 'the', 'the', ['the'], ['c_det', 'x_dt']),\n ('nmod', '8', 'days', 'day', ['in', 'three', 'days'], ['c_noun', 'x_nns']),\n ('punct', '9', '?', '?', ['?'], ['c_punct', 'x_.'])]\n\u001b[31m✔ (_none_) root with pos is predicts: True\u001b[0m\n"
],
[
"from sagas.tool.dynamic_rules import dynamic_rule\n# \"大象是保护动物。\"\ndata = {'lang': 'id', \"sents\": 'Gajah adalah hewan yang dilindungi.'}\ndynamic_rule(data, \"\"\"verb(behaveof('protect', 'v'), head_acl=kindof('animal', 'n'), nsubj_pass=matchins('yang'))\"\"\")",
"verb_domains dilindungi dilindungi ['rel', 'lemma', 'word', 'stems', 'lang', 'sents']\n[('nsubj:pass', '4', 'yang', 'yang', ['yang'], ['c_pron', 'x_s--']),\n ('head_acl', '3', 'hewan', 'hewan', ['hewan'], ['c_noun', 'x_nsd'])]\n\u001b[31m✔ (_none_) verb with pos is behave_of(protect,v): True, head_acl is kind_of(animal,n): True, nsubj:pass is ins_match(equals: yang): True\u001b[0m\n\u001b[32m.. results 2\u001b[0m\n\u001b[33m{'kind_of/default/head_acl', 'behave_of/default/predicate'}\u001b[0m\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a20a2d47cd912964632d5aeeb346a3d27a2e5a0
| 8,411 |
ipynb
|
Jupyter Notebook
|
floydhub/HTML/HTML_preloaded_weights.ipynb
|
mmCAtE/ss
|
62a4e1aad7df25da4865637616abdb2886288724
|
[
"MIT"
] | 2 |
2018-04-27T06:02:48.000Z
|
2018-05-02T11:59:41.000Z
|
floydhub/HTML/HTML_preloaded_weights.ipynb
|
mmCAtE/ss
|
62a4e1aad7df25da4865637616abdb2886288724
|
[
"MIT"
] | null | null | null |
floydhub/HTML/HTML_preloaded_weights.ipynb
|
mmCAtE/ss
|
62a4e1aad7df25da4865637616abdb2886288724
|
[
"MIT"
] | 1 |
2018-04-01T19:19:40.000Z
|
2018-04-01T19:19:40.000Z
| 36.411255 | 195 | 0.601355 |
[
[
[
"#Use this command to run it on floydhub: floyd run --gpu --env tensorflow-1.4 --data emilwallner/datasets/imagetocode/2:data --data emilwallner/datasets/html_models/1:weights --mode jupyter",
"_____no_output_____"
],
[
"from os import listdir\nfrom numpy import array\nfrom keras.preprocessing.text import Tokenizer, one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\nfrom keras.utils import to_categorical\nfrom keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense, Flatten\nfrom keras.preprocessing.image import array_to_img, img_to_array, load_img\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input\nimport numpy as np",
"_____no_output_____"
],
[
"# Load the images and preprocess them for inception-resnet\nimages = []\nall_filenames = listdir('resources/images/')\nall_filenames.sort()\nfor filename in all_filenames:\n images.append(img_to_array(load_img('resources/images/'+filename, target_size=(299, 299))))\nimages = np.array(images, dtype=float)\nimages = preprocess_input(images)\n\n# Run the images through inception-resnet and extract the features without the classification layer\nIR2 = InceptionResNetV2(weights=None, include_top=False, pooling='avg')\nIR2.load_weights('/data/models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5')\nfeatures = IR2.predict(images)",
"_____no_output_____"
],
[
"# We will cap each input sequence to 100 tokens\nmax_caption_len = 100\n# Initialize the function that will create our vocabulary \ntokenizer = Tokenizer(filters='', split=\" \", lower=False)\n\n# Read a document and return a string\ndef load_doc(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n return text\n\n# Load all the HTML files\nX = []\nall_filenames = listdir('resources/html/')\nall_filenames.sort()\nfor filename in all_filenames:\n X.append(load_doc('resources/html/'+filename))\n\n# Create the vocabulary from the html files\ntokenizer.fit_on_texts(X)\n\n# Add +1 to leave space for empty words\nvocab_size = len(tokenizer.word_index) + 1\n# Translate each word in text file to the matching vocabulary index\nsequences = tokenizer.texts_to_sequences(X)\n# The longest HTML file\nmax_length = max(len(s) for s in sequences)\n\n# Intialize our final input to the model\nX, y, image_data = list(), list(), list()\nfor img_no, seq in enumerate(sequences):\n for i in range(1, len(seq)):\n # Add the entire sequence to the input and only keep the next word for the output\n in_seq, out_seq = seq[:i], seq[i]\n # If the sentence is shorter than max_length, fill it up with empty words\n in_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n # Map the output to one-hot encoding\n out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n # Add and image corresponding to the HTML file\n image_data.append(features[img_no])\n # Cut the input sentence to 100 tokens, and add it to the input data\n X.append(in_seq[-100:])\n y.append(out_seq)\n\nX, y, image_data = np.array(X), np.array(y), np.array(image_data)",
"_____no_output_____"
],
[
"# Create the encoder\nimage_features = Input(shape=(1536,))\nimage_flat = Dense(128, activation='relu')(image_features)\nir2_out = RepeatVector(max_caption_len)(image_flat)\n\n# Create the decoder\nlanguage_input = Input(shape=(max_caption_len,))\nlanguage_model = Embedding(vocab_size, 200, input_length=max_caption_len)(language_input)\nlanguage_model = LSTM(256, return_sequences=True)(language_model)\nlanguage_model = LSTM(256, return_sequences=True)(language_model)\nlanguage_model = TimeDistributed(Dense(128, activation='relu'))(language_model)\n\n# Create the decoder\ndecoder = concatenate([ir2_out, language_model])\ndecoder = LSTM(512, return_sequences=True)(decoder)\ndecoder = LSTM(512, return_sequences=False)(decoder)\ndecoder_output = Dense(vocab_size, activation='softmax')(decoder)\n\n# Compile the model\nmodel = Model(inputs=[image_features, language_input], outputs=decoder_output)\n#model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\nmodel.load_weights(\"/weights/org-weights-epoch-0900---loss-0.0000.hdf5\")",
"_____no_output_____"
],
[
"# Train the neural network\n#model.fit([image_data, X], y, batch_size=64, shuffle=False, epochs=2)",
"_____no_output_____"
],
[
"# map an integer to a word\ndef word_for_id(integer, tokenizer):\n for word, index in tokenizer.word_index.items():\n if index == integer:\n return word\n return None",
"_____no_output_____"
],
[
"# generate a description for an image\ndef generate_desc(model, tokenizer, photo, max_length):\n # seed the generation process\n in_text = 'START'\n # iterate over the whole length of the sequence\n for i in range(900):\n # integer encode input sequence\n sequence = tokenizer.texts_to_sequences([in_text])[0][-100:]\n # pad input\n sequence = pad_sequences([sequence], maxlen=max_length)\n # predict next word\n yhat = model.predict([photo,sequence], verbose=0)\n # convert probability to integer\n yhat = np.argmax(yhat)\n # map integer to word\n word = word_for_id(yhat, tokenizer)\n # stop if we cannot map the word\n if word is None:\n break\n # append as input for generating the next word\n in_text += ' ' + word\n # Print the prediction\n print(' ' + word, end='')\n # stop if we predict the end of the sequence\n if word == 'END':\n break\n return",
"_____no_output_____"
],
[
"# Load and image, preprocess it for IR2, extract features and generate the HTML\ntest_image = img_to_array(load_img('resources/images/86.jpg', target_size=(299, 299)))\ntest_image = np.array(test_image, dtype=float)\ntest_image = preprocess_input(test_image)\ntest_features = IR2.predict(np.array([test_image]))\ngenerate_desc(model, tokenizer, np.array(test_features), 100)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a20a63cdbcd344bebc167a7b89ce4f5ab748441
| 312,498 |
ipynb
|
Jupyter Notebook
|
Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.ipynb
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | 2 |
2022-03-12T04:53:03.000Z
|
2022-03-27T12:39:21.000Z
|
Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.ipynb
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | null | null | null |
Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.ipynb
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | 2 |
2022-03-12T04:52:21.000Z
|
2022-03-27T12:45:32.000Z
| 180.114121 | 70,392 | 0.872537 |
[
[
[
"# import required library",
"_____no_output_____"
]
],
[
[
"# Import numpy, pandas for data manipulation\nimport numpy as np\nimport pandas as pd\n\n# Import matplotlib, seaborn for visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"# Import the data\nweather_data = pd.read_csv('weather.csv')\nweather_data.head()",
"_____no_output_____"
],
[
"rain_df = weather_data[['Date','Rainfall']]\nrain_df.head()",
"_____no_output_____"
],
[
"rain_df.shape",
"_____no_output_____"
],
[
"rain_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 145460 entries, 0 to 145459\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 145460 non-null object \n 1 Rainfall 142199 non-null float64\ndtypes: float64(1), object(1)\nmemory usage: 2.2+ MB\n"
]
],
[
[
"**Using 50 values**",
"_____no_output_____"
]
],
[
[
"rain_df = rain_df.loc[:49]\nrain_df.head()",
"_____no_output_____"
],
[
"rain_df.shape",
"_____no_output_____"
],
[
"# Convert the time column into datetime\nrain_df['Date'] = pd.to_datetime(rain_df['Date'])\nrain_df['Date'].head()",
"_____no_output_____"
],
[
"rain_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50 entries, 0 to 49\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 50 non-null datetime64[ns]\n 1 Rainfall 49 non-null float64 \ndtypes: datetime64[ns](1), float64(1)\nmemory usage: 928.0 bytes\n"
],
[
"# fill the empty row\nrain_df = rain_df.fillna(rain_df['Rainfall'].mean())\nrain_df.head()",
"_____no_output_____"
]
],
[
[
"### Dataset Explanation",
"_____no_output_____"
]
],
[
[
"rain_df.describe()",
"_____no_output_____"
],
[
"# Output the maximum and minimum rain date\nprint(rain_df.loc[rain_df[\"Rainfall\"] == rain_df[\"Rainfall\"].max()])\nprint(rain_df.loc[rain_df[\"Rainfall\"] == rain_df[\"Rainfall\"].min()])",
" Date Rainfall\n17 2008-12-18 16.8\n Date Rainfall\n1 2008-12-02 0.0\n2 2008-12-03 0.0\n3 2008-12-04 0.0\n6 2008-12-07 0.0\n7 2008-12-08 0.0\n8 2008-12-09 0.0\n10 2008-12-11 0.0\n14 2008-12-15 0.0\n16 2008-12-17 0.0\n19 2008-12-20 0.0\n20 2008-12-21 0.0\n21 2008-12-22 0.0\n22 2008-12-23 0.0\n23 2008-12-24 0.0\n24 2008-12-25 0.0\n25 2008-12-26 0.0\n26 2008-12-27 0.0\n27 2008-12-28 0.0\n28 2008-12-29 0.0\n31 2009-01-01 0.0\n32 2009-01-02 0.0\n33 2009-01-03 0.0\n34 2009-01-04 0.0\n35 2009-01-05 0.0\n36 2009-01-06 0.0\n37 2009-01-07 0.0\n38 2009-01-08 0.0\n39 2009-01-09 0.0\n40 2009-01-10 0.0\n41 2009-01-11 0.0\n42 2009-01-12 0.0\n43 2009-01-13 0.0\n44 2009-01-14 0.0\n45 2009-01-15 0.0\n46 2009-01-16 0.0\n47 2009-01-17 0.0\n48 2009-01-18 0.0\n49 2009-01-19 0.0\n"
],
[
"# Reset the index \nrain_df.set_index(\"Date\", inplace=True)",
"_____no_output_____"
]
],
[
[
"### Data Visualization",
"_____no_output_____"
]
],
[
[
"# Plot the daily temperature change \nplt.figure(figsize=(16,10), dpi=100)\nplt.plot(rain_df.index, rain_df.Rainfall, color='tab:red')\nplt.gca().set(title=\"Daily Rain\", xlabel='Date', ylabel=\"rain value\")\nplt.show()",
"_____no_output_____"
],
[
"# Apply the Moving Average function by a subset of size 10 days.\nrain_df_mean = rain_df.Rainfall.rolling(window=10).mean()\nrain_df_mean.plot(figsize=(16,10))\nplt.show()",
"_____no_output_____"
],
[
"from statsmodels.tsa.seasonal import seasonal_decompose\n\n# Additive Decomposition\nresult_add = seasonal_decompose(rain_df.Rainfall, model='additive', extrapolate_trend=0)\n\n# Plot\nplt.rcParams.update({'figure.figsize': (10,10)})\nresult_add.plot().suptitle('Additive Decomposition', fontsize=22)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Baseline Model",
"_____no_output_____"
]
],
[
[
"# Shift the current rain to the next day. \npredicted_df = rain_df[\"Rainfall\"].to_frame().shift(1).rename(columns = {\"Rainfall\": \"rain_pred\" })\nactual_df = rain_df[\"Rainfall\"].to_frame().rename(columns = {\"Rainfall\": \"rain_actual\" })\n\n# Concatenate the actual and predicted rain\none_step_df = pd.concat([actual_df,predicted_df],axis=1)\n\n# Select from the second row, because there is no prediction for today due to shifting.\none_step_df = one_step_df[1:]\none_step_df.head(10)",
"_____no_output_____"
]
],
[
[
"> Here you can the we have two column one is our **actual rain** column and othe is **predicted rain** column that we use next model ",
"_____no_output_____"
],
[
"We could validate how well our model is by looking at the Root Mean Squared Error(RMSE) between the predicted and actual rain",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import mean_squared_error as MSE\nfrom math import sqrt\n\n# Calculate the RMSE\nrain_pred_err = MSE(one_step_df.rain_actual, one_step_df.rain_pred, squared=False)\nprint(\"The RMSE is\",rain_pred_err)",
"The RMSE is 4.002624108444867\n"
]
],
[
[
"> Our RMSE value is 4.002 is arround 4 that are pretty good for model.",
"_____no_output_____"
],
[
"## Using SARIMA model",
"_____no_output_____"
],
[
"### Parameter Selection\n#### Grid Search\nWe are going to apply one of the most commonly used method for time-series forecasting, known as SARIMA, which stands for Seasonal Autoregressive Integrated Moving Average. SARIMA models are denoted with the notation SARIMA(p,d,q)(P,D,Q,s). These three parameters account for seasonality, trend, and noise in data:\n\nWe will use a “grid search” to iteratively explore different combinations of parameters. For each combination of parameters, we fit a new seasonal SARIMA model with the SARIMAX() function from the statsmodels module and assess its overall quality.",
"_____no_output_____"
]
],
[
[
"import itertools\n\n# Define the p, d and q parameters to take any value between 0 and 2\np = d = q = range(0, 2)\n\n# Generate all different combinations of p, q and q triplets\npdq = list(itertools.product(p, d, q))\n\n# Generate all different combinations of seasonal p, q and q triplets\nseasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\n\nprint('Examples of parameter combinations for Seasonal ARIMA...')\nprint('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))\nprint('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))\nprint('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))\nprint('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))",
"Examples of parameter combinations for Seasonal ARIMA...\nSARIMAX: (0, 0, 1) x (0, 0, 1, 12)\nSARIMAX: (0, 0, 1) x (0, 1, 0, 12)\nSARIMAX: (0, 1, 0) x (0, 1, 1, 12)\nSARIMAX: (0, 1, 0) x (1, 0, 0, 12)\n"
],
[
"for param in pdq:\n for param_seasonal in seasonal_pdq:\n try:\n mod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual,\n order=param,\n seasonal_order=param_seasonal,\n enforce_stationarity=False,\n enforce_invertibility=False)\n\n results = mod.fit()\n\n print('SARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\n except:\n continue",
"_____no_output_____"
]
],
[
[
"### Fitting the Model",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings(\"ignore\") # specify to ignore warning messages\n# Import the statsmodels library for using SARIMAX model\nimport statsmodels.api as sm\n\n# Fit the SARIMAX model using optimal parameters\nmod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual,\n order=(1,1,1),\n seasonal_order=(1,1,1,12),\n enforce_stationarity=False,\n enforce_invertibility=False)",
"_____no_output_____"
],
[
"results = mod.fit()",
"_____no_output_____"
],
[
"results.summary()",
"_____no_output_____"
]
],
[
[
"**Predictions**",
"_____no_output_____"
]
],
[
[
"pred = results.predict(start=0,end=49)[1:]\npred",
"_____no_output_____"
],
[
"pred = results.get_prediction(start=0,end = 49, dynamic=False)\npred_ci = pred.conf_int()",
"_____no_output_____"
],
[
"pred_ci.head()",
"_____no_output_____"
],
[
"print(pred)",
"<statsmodels.tsa.statespace.mlemodel.PredictionResultsWrapper object at 0x000002076919A788>\n"
],
[
"ax = one_step_df.rain_actual.plot(label='observed',figsize=(16,10))\nax.set_xlabel('Date')\nax.set_ylabel('value')\nplt.ylim([0,2.0])\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Forecast Diagnostic\nIt is also useful to quantify the accuracy of our forecasts. We will use the MSE (Mean Squared Error), in which for each predicted value, we compute its distance to the true value and square the result",
"_____no_output_____"
]
],
[
[
"y_forecasted = pred.predicted_mean[:49]\ny_truth = one_step_df.rain_actual\nprint(y_forecasted.shape)\nprint(y_truth.shape)\n# Compute the mean square error\nmse = MSE(y_truth, y_forecasted, squared=True)\nprint('The Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))",
"(49,)\n(49,)\nThe Mean Squared Error of our forecasts is 25.85\n"
]
],
[
[
"Amazziingggg! Our forecast model forecasts the rain with only an error of 25.85. \n\nIn the weather forecast field, the prediction error of 2.19 degrees seems promising and sufficient, as there are many other factors that contribute to the change in rain, including but not limited to the wind speed, the air pressure, etc.",
"_____no_output_____"
],
[
"### Validating the Dynamic Forecast",
"_____no_output_____"
],
[
"In this case, we only use information from the time series up to a certain point, and after that, forecasts are generated using values from previous forecasted time points.\n",
"_____no_output_____"
]
],
[
[
"pred_dynamic = results.get_prediction(start=0,end = 49, dynamic=True, full_results=True)\npred_dynamic_ci = pred_dynamic.conf_int()",
"_____no_output_____"
],
[
"pred_dynamic_ci.head()",
"_____no_output_____"
]
],
[
[
"Once again, we plot the real and forecasted values of the average daily rain to assess how well we did:",
"_____no_output_____"
]
],
[
[
"ax = one_step_df.rain_actual.plot(label='observed', figsize=(15, 11))\npred_dynamic.predicted_mean.plot(label='Dynamic Forecast', ax=ax)\n\nax.fill_between(pred_dynamic_ci.index,\n pred_dynamic_ci.iloc[:, 0],\n pred_dynamic_ci.iloc[:, 1], color='k', alpha=.25)\n\n\nax.set_xlabel('Date')\nax.set_ylabel('Temperature (in Celsius)')\nplt.ylim([0,2.0])\nplt.legend()\nplt.show()\n\n",
"_____no_output_____"
]
],
[
[
"> In this case, the model seems to predict the rain inaccurately, with major fluctuations between the true value and the predicted value.",
"_____no_output_____"
],
[
"### Forecast Diagnostic",
"_____no_output_____"
]
],
[
[
"# Extract the predicted and true values of our time series\ny_forecasted = pred_dynamic.predicted_mean[:49]\ny_truth = one_step_df.rain_actual\n\n# Compute the mean square error\nmse = sqrt(MSE(y_truth, y_forecasted).mean())\nprint('The Root Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))",
"The Root Mean Squared Error of our forecasts is 3.68\n"
]
],
[
[
"The **predicted** values obtained from the dynamic forecasts yield an MSE of 3.68. This is significantly higher than the one-step ahead, which is to be expected given that we are relying on less historical data from the time series.",
"_____no_output_____"
],
[
"# Conclusion",
"_____no_output_____"
],
[
"I described how to implement a seasonal SARIMA model in Python. I made extensive use of the pandas and statsmodels libraries and showed how to run model diagnostics, as well as how to produce forecasts of the Rain.",
"_____no_output_____"
],
[
"Recall that in the assumption I made in the section 2.2 Baseline Model, I could even reinforce our assumption and continue our belief that the rainfall today depends on the rainfall yesterday, the rainfall yesterday depends on the day before yesterday, and so on. \n\nIt is the best so far to use the history up to the point that we would like to make **predictions** on. Especially it holds for weather forecasting, where the rainfall today does not change much from yesterday, and the transition to another season signaling through the rainfall should gradually occur, unless there is any disastrous factors such as storm, drought, etc.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a20bee47c4263a0bd7ea91dbfd491602e5eb39d
| 49,766 |
ipynb
|
Jupyter Notebook
|
Chapter 3 - Regression Analysis/Exercise 28 - Plotting with Moving Average.ipynb
|
doc-E-brown/Applied-Supervised-Learning-with-Python
|
f125cecde1af4f77017302c3393acf9c2415ce9a
|
[
"MIT"
] | 2 |
2021-06-08T18:00:07.000Z
|
2021-10-08T06:31:38.000Z
|
Chapter 3 - Regression Analysis/Exercise 28 - Plotting with Moving Average.ipynb
|
TrainingByPackt/Applied-Supervised-Learning-with-Python
|
f125cecde1af4f77017302c3393acf9c2415ce9a
|
[
"MIT"
] | null | null | null |
Chapter 3 - Regression Analysis/Exercise 28 - Plotting with Moving Average.ipynb
|
TrainingByPackt/Applied-Supervised-Learning-with-Python
|
f125cecde1af4f77017302c3393acf9c2415ce9a
|
[
"MIT"
] | 16 |
2019-06-04T22:22:17.000Z
|
2022-01-02T06:43:44.000Z
| 130.619423 | 40,328 | 0.855886 |
[
[
[
"# Exercise 28 - Plotting Data with Moving Average\n\nThroughout this exercise we will plot, investigate and gain a thorough understanding of the dataset we are to model.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"df = pd.read_csv('synth_temp.csv')\ndf.head()",
"_____no_output_____"
],
[
"df = df.loc[df.Year > 1901]\ndf.head()",
"_____no_output_____"
]
],
[
[
"Construct the yearly averages",
"_____no_output_____"
]
],
[
[
"df_group_year = df.groupby('Year').agg(np.mean)\ndf_group_year.head()",
"_____no_output_____"
]
],
[
[
"Compute the moving average filter",
"_____no_output_____"
]
],
[
[
"window = 10\nrolling = df_group_year.AverageTemperature.rolling(window).mean();\nrolling.head(n=20)",
"_____no_output_____"
]
],
[
[
"Plot the raw data and moving average signal",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(10, 7))\nax = fig.add_axes([1, 1, 1, 1]);\n\n# Temp measurements\nax.scatter(df_group_year.index, df_group_year.AverageTemperature, label='Raw Data', c='k');\nax.plot(df_group_year.index, rolling, c='k', linestyle='--', label=f'{window} year moving average');\n\nax.set_title('Mean Air Temperature Measurements')\nax.set_xlabel('Year')\nax.set_ylabel('Temperature (degC)')\nax.set_xticks(range(df_group_year.index.min(), df_group_year.index.max(), 10))\nax.legend();",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a20c83dc5987ae4016d4907a60a57a068d190d5
| 366,719 |
ipynb
|
Jupyter Notebook
|
explore_seattle_airbnb.ipynb
|
provincit/explore_airbnb_seattle
|
e3bcbf6d4ce9681fb9ddfe5d042d9f0014dc18f6
|
[
"Apache-2.0"
] | null | null | null |
explore_seattle_airbnb.ipynb
|
provincit/explore_airbnb_seattle
|
e3bcbf6d4ce9681fb9ddfe5d042d9f0014dc18f6
|
[
"Apache-2.0"
] | null | null | null |
explore_seattle_airbnb.ipynb
|
provincit/explore_airbnb_seattle
|
e3bcbf6d4ce9681fb9ddfe5d042d9f0014dc18f6
|
[
"Apache-2.0"
] | null | null | null | 123.933423 | 82,518 | 0.792847 |
[
[
[
"# Seattle Airbnb",
"_____no_output_____"
],
[
"My significant foci are listing and calendar to display data from my business understanding.\n\n* Read dataset - read csv files to pandas dataframe.\n* Data manipulation - data cleaning and data wrangling to make quality data to visualization .\n* Exploratory data analysis (EDA) - Data visualizations that can answer business questions.\n\n\n\n",
"_____no_output_____"
],
[
"## Section 1 : Business Understanding",
"_____no_output_____"
],
[
"Airbnb is a sharing economy platform whether we can get low booking costs as we can. So we can analyse data to explore that meet our questions\n\n1. What is the most occupancy by month in 2016?\n2. Which month is the most expensive?\n3. Which day is the most costly?\n4. Is the host respond to you appropriately?\n5. What features were affected the price?",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport calendar",
"_____no_output_____"
]
],
[
[
"## Section 2 : Data Understanding",
"_____no_output_____"
],
[
"### Gather data",
"_____no_output_____"
]
],
[
[
"# calendar data\ndf_calender = pd.read_csv('calendar.csv')\ndf_calender.head()",
"_____no_output_____"
],
[
"# listings data\ndf_listings = pd.read_csv('listings.csv')\ndf_listings.head()",
"_____no_output_____"
],
[
"# reviews data\ndf_reviews = pd.read_csv('reviews.csv')\ndf_reviews.head()",
"_____no_output_____"
]
],
[
[
"#### Calendar data",
"_____no_output_____"
]
],
[
[
"# show number of null values\ndf_calender.isnull().sum()",
"_____no_output_____"
],
[
"# total data in df_calender\nlen(df_calender)",
"_____no_output_____"
],
[
"# check duplicated values in all columns\ndf_calender[df_calender.duplicated()]",
"_____no_output_____"
],
[
"# observ column types in df_calender\ndf_calender.dtypes",
"_____no_output_____"
]
],
[
[
"## Section 3 : Data Preparation\n\n",
"_____no_output_____"
]
],
[
[
"# So we must convert date column type to date \ndf_calender['date'] = pd.to_datetime(df_calender['date'])\n\n# and convert listing_id type to string\ndf_calender['listing_id'] = df_calender['listing_id'].astype(str)",
"_____no_output_____"
],
[
"d = {'t': 'Available', 'f': 'Not available'}\ndf_calender[\"available\"] = df_calender[\"available\"].map(d)",
"_____no_output_____"
],
[
"# remove the symbols in price\ndef remove_symbol(price):\n \"\"\"\n Return price in string format \n without the dollar sign.\n \"\"\"\n\n if type(price) is str:\n return price.replace(\"$\", \"\")\n return price\n\ndf_calender['price'] = df_calender['price'].apply(lambda x: remove_symbol(x))\n\n# convert the price type to numberic\ndf_calender['price'] = pd.to_numeric(df_calender['price'], errors='coerce')\n\n# replace na value with mean price in each listing_id group, which is usually the same value\ndf_calender['price'] = df_calender['price'].fillna(df_calender.groupby('listing_id')['price'].transform('mean'))\n",
"_____no_output_____"
],
[
"# drop na value becuase the droped data were not contains an important data in other column\ndf_calender = df_calender.dropna(subset=['price'])",
"_____no_output_____"
],
[
"# recheck null value in price column\ndf_calender[df_calender['price'].isnull()]",
"_____no_output_____"
],
[
"# preview samples in df_calender\ndf_calender.sample(5)",
"_____no_output_____"
],
[
"# create new column contains year value which extracted from datetime\ndf_calender['year'] = df_calender.date.dt.year\ndf_calender['year'].value_counts()",
"_____no_output_____"
],
[
"## Get data only in 2016 and drop no price\ndf_calender = df_calender[df_calender['year'] == 2016]\ndf_calender = df_calender[df_calender['price'] != 0]",
"_____no_output_____"
]
],
[
[
"#### Reviews data",
"_____no_output_____"
]
],
[
[
"# sneak peek dataframe\ndf_reviews.head()",
"_____no_output_____"
],
[
"df_reviews.dtypes",
"_____no_output_____"
],
[
"df_reviews['listing_id'] = df_reviews['listing_id'].to_string()\ndf_reviews['id'] = df_reviews['id'].to_string()",
"_____no_output_____"
],
[
"# Check null\ndf_reviews.isnull().sum()",
"_____no_output_____"
],
[
"# drop review_id and reveiew_name \ndf_reviews = df_reviews.drop(['reviewer_id', 'reviewer_name'], axis=1)\n\n# reomove comments row\ndf_reviews = df_reviews.dropna(subset=['comments'])",
"_____no_output_____"
]
],
[
[
"#### Listing data",
"_____no_output_____"
]
],
[
[
"# sneak peek dataframe\ndf_listings.head()",
"_____no_output_____"
],
[
"# Select only interest columns\ndf_listings = df_listings[['id','host_response_time','host_response_rate','accommodates','bathrooms','bedrooms','beds','price','weekly_price','monthly_price'\n,'cleaning_fee','extra_people','minimum_nights','review_scores_rating','instant_bookable']]",
"_____no_output_____"
],
[
"# convert id type to string\ndf_listings['id'] = df_listings['id'].to_string()",
"_____no_output_____"
],
[
"# replace na values with mode value that which is a most common value and number of beds must be an integer \ndf_listings['beds'] = df_listings['beds'].fillna(df_listings['beds'].mode()[0])",
"_____no_output_____"
],
[
"# remove symbol in price\ndf_listings['price'] = df_listings['price'].apply(lambda x: remove_symbol(x))\n\n# convert the price type to numberic\ndf_listings['price'] = pd.to_numeric(df_listings['price'], errors='coerce')",
"_____no_output_____"
],
[
"# check null value in price column\ndf_listings[df_listings['price'].isnull()]",
"_____no_output_____"
],
[
"df_listings = df_listings.dropna(subset=['price'])",
"_____no_output_____"
],
[
"# covert percentage to numberic in host_response_rate column\ndef percent_to_numberic(x):\n \"\"\"\n Return percent in floating-point number \n without the percent sign.\n \"\"\"\n \n if isinstance(x, str):\n return float(x.strip('%'))/100\n return 1\n\ndf_listings['host_response_rate'] = df_listings['host_response_rate'].apply(lambda x: percent_to_numberic(x))",
"_____no_output_____"
]
],
[
[
"## Section 4 : Evaluate the Results\n",
"_____no_output_____"
],
[
"### Question 1 : What is the most occupancy by month in 2016 ?",
"_____no_output_____"
],
[
"Visualize to find a pattern of the number of available occupation based on month",
"_____no_output_____"
]
],
[
[
"# Set charts size\nsns.set(rc={'figure.figsize':(11.7,8.27)})\n\nmonth = df_calender.date.dt.strftime('%b')\nax = sns.countplot(data = df_calender, x = month, hue = 'available');\nax.set(xlabel='Month', ylabel='Total rooms')\nplt.title('Occupation in 2016');",
"_____no_output_____"
]
],
[
[
"January got the highest number for not available rooms. However, December had the most available rooms for booking.",
"_____no_output_____"
],
[
"#### Analyzing Occupancy Percentage by using the number of available and unavailable.",
"_____no_output_____"
]
],
[
[
"df_group_month = df_calender.groupby(by=[df_calender.date.dt.month, \"available\"]).agg({'available': 'count'})\ndf_group_month = df_group_month.rename(columns={'available':'count'})\ndf_group_month = df_group_month.reset_index()\ndf_group_month = df_group_month.rename(columns={'date':'month'})",
"_____no_output_____"
],
[
"# Create new dataframe\ndf_available = pd.DataFrame(columns=['month', 'percent'])",
"_____no_output_____"
],
[
"for month in df_group_month['month'].unique():\n\n sum_total = df_group_month.loc[df_group_month['month'] == month, 'count'].sum()\n\n available_total = df_group_month.loc[(df_group_month['month'] == month) & \n (df_group_month['available'] == 'Available'), 'count'].sum()\n\n not_available_total = df_group_month.loc[(df_group_month['month'] == month) & \n (df_group_month['available'] == 'Not available'), 'count'].sum()\n\n available_percent = round(((available_total-not_available_total)/sum_total)*100, 2)\n\n df_available = df_available.append({'month': month, 'percent': available_percent}, ignore_index=True)\n",
"_____no_output_____"
],
[
"# convert number of month into month name\ndf_available['month'] = df_available['month'].apply(lambda x: calendar.month_abbr[int(x)])",
"_____no_output_____"
]
],
[
[
"Visualizing percentages by month from the cell above.",
"_____no_output_____"
]
],
[
[
"ax = sns.barplot(data = df_available, x = 'month', y='percent');\nax.set(xlabel='Month', ylabel='Percentage')\nplt.title('Available Percentage by month in 2016');",
"_____no_output_____"
]
],
[
[
"### Question 2 : Which month is the most expensive ?",
"_____no_output_____"
],
[
"Analyzing average price by month can show the cost variance.",
"_____no_output_____"
]
],
[
[
"# Grouping month with price mean\nmonth = df_calender.date.dt.month\nmonthly_avg=df_calender.groupby(month).price.mean()\nmonthly_avg = monthly_avg.reset_index()",
"_____no_output_____"
],
[
"# map number of month with month name\nd = {\n 1: 'Jan',\n 2: 'Feb',\n 3: 'Mar',\n 4: 'Apr',\n 5: 'May',\n 6: 'Jun',\n 7: 'Jul',\n 8: 'Aug',\n 9: 'Sep', \n 10: 'Oct',\n 11: 'Nov',\n 12: 'Dec'\n }\nmonthly_avg = monthly_avg.rename(columns={\"date\": \"month\"})\nmonthly_avg[\"month\"] = monthly_avg[\"month\"].map(d)",
"_____no_output_____"
]
],
[
[
"Visualizing trends from summarizing data above.",
"_____no_output_____"
]
],
[
[
"ax = sns.lineplot(x=\"month\", y=\"price\", data=monthly_avg)\nax.set(xlabel='Month', ylabel='Average price in USD')",
"_____no_output_____"
]
],
[
[
"This chart shows what the average price get highest around June and August.",
"_____no_output_____"
],
[
"### Question 3 : Which day is the most costly?",
"_____no_output_____"
],
[
"Analyze price in day groups to find trends that can answer the questions",
"_____no_output_____"
]
],
[
[
"# Find mean of price in each day\nday = df_calender.date.dt.dayofweek\nday_avg = df_calender.groupby(day).price.mean()\ndaily_avg = day_avg.reset_index()",
"_____no_output_____"
],
[
"# Mapping number of days to day name\nd = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n }\ndaily_avg[\"date\"] = daily_avg[\"date\"].map(d)",
"_____no_output_____"
]
],
[
[
"Visualize average price by day in 2016",
"_____no_output_____"
]
],
[
[
"ax = sns.lineplot(x=\"date\", y=\"price\", data=daily_avg)\nax.set(xlabel='Day', ylabel='Average price in USD')",
"_____no_output_____"
]
],
[
[
"The chart shows the high booking costs on Friday and Saturday",
"_____no_output_____"
],
[
"### Question 4 : Is the host respond to you appropriately?",
"_____no_output_____"
],
[
"Visualize response time categorial",
"_____no_output_____"
]
],
[
[
"cat_order = df_listings['host_response_time'].value_counts().index\nsns.countplot(data = df_listings, x = 'host_response_time', order=cat_order)\nplt.title('Host response time');",
"_____no_output_____"
]
],
[
[
"This chart displays that most common hosts respond within an hour, and renters have a few chances to answer more than a day.",
"_____no_output_____"
],
[
"**Visualize host reponse rate compared with response time**",
"_____no_output_____"
]
],
[
[
"response_rate_percent = df_listings['host_response_rate']*100\nax = sns.boxplot(x=\"host_response_time\", y=response_rate_percent,\n data=df_listings)\n\nax.set(xlabel='Response time', ylabel='Response rate')\n",
"_____no_output_____"
]
],
[
[
"Primarily, hosts always respond to the traveller but still has a little chance to have no response.",
"_____no_output_____"
],
[
"**The visualization shows the response time and price to find the pattern of responses time.**",
"_____no_output_____"
]
],
[
[
"ax = sns.boxplot(x=\"host_response_time\", y=\"price\",\n data=df_listings)\nax.set(xlabel='Response time', ylabel='Price')\nsns.despine(offset=10, trim=True)",
"_____no_output_____"
]
],
[
[
"It seems to have a few differences and no significant value for each response time categorial on price",
"_____no_output_____"
],
[
"#### Qustion 5 : What features were affected the price?",
"_____no_output_____"
],
[
"Visualize the correlation between features",
"_____no_output_____"
]
],
[
[
"corr = df_listings.corr()\nkot = corr[corr.apply(lambda x: abs(x)>=0)]\n\nsns.heatmap(kot, annot = True, fmt = '.2f', center = 0, cmap=\"Blues\")\nplt.title('Features Correlation');\nplt.xticks(rotation = 15);",
"_____no_output_____"
]
],
[
[
"This heatmap shows the relation between features, and we got the dark blue grids that show significant ties.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a20cb35e934a4f07ef8cd67ab4b96d278719114
| 1,018 |
ipynb
|
Jupyter Notebook
|
lab-notebook/bsong/2019-08-09 - bootstrap rev 2 - architect into data x model x test/bootstrap architecture - test data.ipynb
|
velexi-corporation/spectra-ml
|
10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d
|
[
"Apache-2.0"
] | null | null | null |
lab-notebook/bsong/2019-08-09 - bootstrap rev 2 - architect into data x model x test/bootstrap architecture - test data.ipynb
|
velexi-corporation/spectra-ml
|
10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d
|
[
"Apache-2.0"
] | null | null | null |
lab-notebook/bsong/2019-08-09 - bootstrap rev 2 - architect into data x model x test/bootstrap architecture - test data.ipynb
|
velexi-corporation/spectra-ml
|
10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d
|
[
"Apache-2.0"
] | null | null | null | 17.254237 | 49 | 0.500982 |
[
[
[
"num_samples = 5\n\nindices_to_scramble = range(num_samples)\n\nprint(indices_to_scramble)\nprint(range(num_samples))\nprint()",
"range(0, 5)\nrange(0, 5)\n"
],
[
"#test random function",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a20cf1cea3bed368f88947fdc010405816a7d5f
| 5,012 |
ipynb
|
Jupyter Notebook
|
Ch3/02_Bag_of_Words.ipynb
|
py-ranoid/practical-nlp
|
514fd4da3b72f26597d91cdb89704a849bf6b36d
|
[
"MIT"
] | 5 |
2020-12-02T23:17:57.000Z
|
2021-04-14T01:17:09.000Z
|
Ch3/02_Bag_of_Words.ipynb
|
ramnathv/practical-nlp
|
837d57e9b981d7a02a230334e0621e0e6918f4df
|
[
"MIT"
] | 5 |
2021-08-23T20:56:47.000Z
|
2022-02-10T04:38:21.000Z
|
Ch3/02_Bag_of_Words.ipynb
|
ramnathv/practical-nlp
|
837d57e9b981d7a02a230334e0621e0e6918f4df
|
[
"MIT"
] | 3 |
2020-12-02T23:42:01.000Z
|
2021-03-03T23:04:00.000Z
| 28.971098 | 354 | 0.577215 |
[
[
[
"## Bag of Words\n\nIn the last notebook, we saw how to get the one hot encoding representation for our toy corpus. In this notebook we will see how to use bag of words representation for the same data..",
"_____no_output_____"
]
],
[
[
"documents = [\"Dog bites man.\", \"Man bites dog.\", \"Dog eats meat.\", \"Man eats food.\"] #Same as the earlier notebook\nprocessed_docs = [doc.lower().replace(\".\",\"\") for doc in documents]\nprocessed_docs",
"_____no_output_____"
]
],
[
[
"Now, let's do the main task of finding bag of words representation. We will use CountVectorizer from sklearn. ",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import CountVectorizer\n\n#look at the documents list\nprint(\"Our corpus: \", processed_docs)\n\ncount_vect = CountVectorizer()\n#Build a BOW representation for the corpus\nbow_rep = count_vect.fit_transform(processed_docs)\n\n#Look at the vocabulary mapping\nprint(\"Our vocabulary: \", count_vect.vocabulary_)\n\n#see the BOW rep for first 2 documents\nprint(\"BoW representation for 'dog bites man': \", bow_rep[0].toarray())\nprint(\"BoW representation for 'man bites dog: \",bow_rep[1].toarray())\n\n#Get the representation using this vocabulary, for a new text\ntemp = count_vect.transform([\"dog and dog are friends\"])\nprint(\"Bow representation for 'dog and dog are friends':\", temp.toarray())\n",
"Our corpus: ['dog bites man', 'man bites dog', 'dog eats meat', 'man eats food']\nOur vocabulary: {'dog': 1, 'bites': 0, 'man': 4, 'eats': 2, 'meat': 5, 'food': 3}\nBoW representation for 'dog bites man': [[1 1 0 0 1 0]]\nBoW representation for 'man bites dog: [[1 1 0 0 1 0]]\nBow representation for 'dog and dog are friends': [[0 2 0 0 0 0]]\n"
]
],
[
[
"In the above code, we represented the text considering the frequency of words into account. However, sometimes, we don't care about frequency much, but only want to know whether a word appeared in a text or not. That is, each document is represented as a vector of 0s and 1s. We will use the option binary=True in CountVectorizer for this purpose. ",
"_____no_output_____"
]
],
[
[
"#BoW with binary vectors\ncount_vect = CountVectorizer(binary=True)\nbow_rep_bin = count_vect.fit_transform(processed_docs)\ntemp = count_vect.transform([\"dog and dog are friends\"])\nprint(\"Bow representation for 'dog and dog are friends':\", temp.toarray())\n",
"Bow representation for 'dog and dog are friends': [[0 1 0 0 0 0]]\n"
]
],
[
[
"We will see how we can use BoW representation for Text Classification later in Chapter 4. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a20d5c377a51982a84223c096bbff11dd6fa784
| 2,940 |
ipynb
|
Jupyter Notebook
|
projects/interactive test.ipynb
|
benti/Error-Pypagation
|
108feddc58a705da82fe6fdce658b419b589b533
|
[
"BSD-3-Clause"
] | null | null | null |
projects/interactive test.ipynb
|
benti/Error-Pypagation
|
108feddc58a705da82fe6fdce658b419b589b533
|
[
"BSD-3-Clause"
] | null | null | null |
projects/interactive test.ipynb
|
benti/Error-Pypagation
|
108feddc58a705da82fe6fdce658b419b589b533
|
[
"BSD-3-Clause"
] | null | null | null | 26.017699 | 452 | 0.507143 |
[
[
[
"import sys\nsys.path.append('../')\nsys.path.append('../../')\n\nfrom errorpro.interactive import *\ninit(locals())\n",
"100 (1 {'len': 2})\n"
],
[
"%%eq\nz = 2 <0.1> [m]\na = z**2",
"_____no_output_____"
],
[
"z = assign(2,0.1,name=\"z\")\na = assign(sin(z**2),name=\"a\")\na",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a20de112883e860aa5a037d336439964b762be4
| 7,809 |
ipynb
|
Jupyter Notebook
|
testcode/smartLoop/jetplotComp.ipynb
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null |
testcode/smartLoop/jetplotComp.ipynb
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null |
testcode/smartLoop/jetplotComp.ipynb
|
mtesseracted/TestBed
|
b96a655ed460b5af236ef0e51c68fc31e9c6f5d4
|
[
"BSD-3-Clause"
] | null | null | null | 30.744094 | 170 | 0.539378 |
[
[
[
"#Relevant video:\n#http://www.youtube.com/watch?v=VIt2z6zJrMs&t=1m52s\n\n#My output from code:\n#https://www.youtube.com/watch?v=E_yE2Q0ArpM\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport scipy.integrate as ing\n\nd2r = np.pi/180. #deg to radian\nk2f = 1.68781 #knots to ft per sec\nkilo=10.**3 \n\n###--Import and basic data manipulation--####\n\n#-Load dataset 1-#\na1 = np.loadtxt('./dumb2.dat') #My data: uses Youtube timestamp\n#Source:\n#https://drive.google.com/file/d/0ByW1n-WOmDAEeDlPSGc1UHpDcWs/view?usp=sharing\na1=a1.T\n#index map:: 0:time[s], 1:aspd[kt], 2:alt[kft], 3:Mach#?, 4:G, 5:Ptich[deg]\n\na1[2]=a1[2]*kilo #convert from kiloft to ft\n\n#Break velocity into components and convert to f/s\nxv1=np.multiply(a1[1],np.cos(d2r*a1[5])*k2f)\nyv1=np.multiply(a1[1],np.sin(d2r*a1[5])*k2f)\n\n\n#-Load dataset 2-#\na2 = np.loadtxt('./dumb5.dat') #from reddit.com/user/what_are_you_saying\n#Original Source:\n#https://drive.google.com/file/d/0B0DNIvRXrB1jeWdBYkVkZ1ByOXM/view\n#reformatted source for correct pitch data:\n#https://drive.google.com/file/d/0ByW1n-WOmDAEY2ZOZFZQQXdnZ0k/view?usp=sharing\na2=a2.T\n#index map:: 0:time[s], 1:aspd[kt], 2:alt[ft], 3:Ptich[deg]\n\nxv2=np.multiply(a2[1],np.cos(d2r*a2[3])*k2f)\nyv2=np.multiply(a2[1],np.sin(d2r*a2[3])*k2f)\n\n###--Numerically integrate to get position, using 2 methods\n# Simpson & CumulativeTrapezoid methods, (whoever\n# named that function didnt think about phrasing)--###\n\n#dataset 1\nn = len(a1[0])\nxp11 = np.empty(n)\nxp12 = np.empty(n)\nyp11 = np.empty(n)\nyp12 = np.empty(n)\nyp12[0]=yp11[0]=xp11[0]=xp12[0]=0.\nfor i in range(1,n):\n xp11[i]=ing.simps(xv1[0:i],a1[0,0:i])\n yp11[i]=ing.simps(yv1[0:i],a1[0,0:i])\n \nxp12=ing.cumtrapz(xv1,a1[0],initial=0.)\nyp12=ing.cumtrapz(yv1,a1[0],initial=0.)\n\n#dataset 2\nn = len(a2[0])\nxp21 = np.empty(n)\nxp22 = np.empty(n)\nyp21 = np.empty(n)\nyp22 = np.empty(n)\nyp22[0]=yp21[0]=xp21[0]=xp22[0]=0.\nfor i in range(1,n):\n xp21[i]=ing.simps(xv2[0:i],a2[0,0:i])\n yp21[i]=ing.simps(yv2[0:i],a2[0,0:i])\n \nxp22=ing.cumtrapz(xv2,a2[0],initial=0.)\nyp22=ing.cumtrapz(yv2,a2[0],initial=0.)",
"_____no_output_____"
],
[
"###--Create idealized circular trajectory--###\n\n#Circle trajectory parameters:\nxcirc=3800 #xcenter\nycirc=13550 #ycenter\nradc=3000 #radius circle\nxstart=0. #start x val\nxend=10000. #ending x val\nystart=ycirc - radc\n\nnc=60 #data points in circle, only make multiple of 10!\n\n#get circl points starting at bottom\ndef circlepts(xc,yc,r,frac):\n yret=r*np.sin((frac-0.25)*2*np.pi)+yc\n xret=r*np.cos((frac-0.25)*2*np.pi)+xc\n return (xret, yret)\n\n\nxpts = np.empty(nc)\nypts = np.empty(nc)\nfor i in range(0,nc): \n xpts[i], ypts[i] = circlepts(xcirc,ycirc,radc,float(i)/float(nc))\n \n\nxlin1= np.empty(nc/10)\nylin1= np.empty(nc/10)\nxlin2= np.empty(nc/10)\nylin2= np.empty(nc/10)\ndelx=float(xcirc-xstart)/float(nc/10)\ndelx2=float(xend-xcirc)/float(nc/10)\nfor i in range(0,nc/10):\n xlin1[i]=xstart + i*delx\n ylin1[i]=ystart\n xlin2[i]=xcirc + (i+1)*delx2\n ylin2[i]=ystart\n\nxtraj=np.concatenate((xlin1,xpts,xlin2))\nytraj=np.concatenate((ylin1,ypts,ylin2))",
"_____no_output_____"
],
[
"###--Comparison plots of data available and analysis methods--###\n\nplt.plot(xp11,a1[2],label='1) Simps vs Alt')\nplt.plot(xp12,a1[2],label='1) CumTrap vs Alt')\nplt.plot(xp12,yp12+a1[2,0],label='1) CumTrap vs CumTrap')\n\nplt.plot(xp21,a2[2],label='2) Simps vs Alt')\nplt.plot(xp22,a2[2],label='2) CumTrap vs Alt')\nplt.plot(xp22,yp22+a2[2,0],label='2) CumTrap vs CumTrap')\n\nplt.plot(xtraj,ytraj,label='Idealized circular Traj')\n\nplt.axis([0,12000,8000,20000])\nplt.axis(\"equal\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nplt.axis(\"equal\") #keeps plot on a 1:1 x:y scale\n\nplt.axis([-1000,12000,8000,20000]) #Plot ranges\n\nxjet=xp11\nyjet=a1[2]\n\nline, = ax.plot(xjet[0:3],yjet[0:3],label='Actual Trajectory')\nline2, = ax.plot(xtraj[0],ytraj[0],label='Circular Trajectory')\nplt.legend() #Comment to remove legend\n\nxlen=len(xjet)\nclen=len(xtraj)\n\ndef animate(i):\n if(i < 2*xlen and (i%2)==0): #Plot the actual trajectory first\n line.set_xdata(xjet[0:i/2]) #Only go every 2 to be slower\n line.set_ydata(yjet[0:i/2])\n elif(i< (2*xlen+clen) and i > 2*xlen): #Plot the circle trajectory second\n line2.set_xdata(xtraj[0:i-2*xlen+1])\n line2.set_ydata(ytraj[0:i-2*xlen+1])\n \n return (line,line2)\n\n\n# Init only required for blitting to give a clean slate.\ndef init():\n line.set_ydata(np.ma.array(yjet, mask=True))\n line.set_xdata(np.ma.array(xjet, mask=True))\n line2.set_ydata(np.ma.array(ytraj, mask=True))\n line2.set_xdata(np.ma.array(xtraj, mask=True))\n return (line, line2)\n\nani = animation.FuncAnimation(fig, animate, np.arange(4, (2*xlen+clen)), init_func=init,\n interval=120, blit=False)\n\nplt.show() #Comment to not show animation",
"_____no_output_____"
],
[
"#Save file method\n#ani.save(filename='test4.mpeg', writer=\"ffmpeg\", fps=30, dpi=140, codec=None, bitrate=8000, extra_args=None, metadata=None, extra_anim=None, savefig_kwargs=None)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a20eeb816dae0641500883bf3ae91e6c0050a41
| 1,250 |
ipynb
|
Jupyter Notebook
|
nbs/index.ipynb
|
maimanuel/rlplay
|
2bd01cfa4a52d801c1172e2e184c84233f5f7479
|
[
"Apache-2.0"
] | null | null | null |
nbs/index.ipynb
|
maimanuel/rlplay
|
2bd01cfa4a52d801c1172e2e184c84233f5f7479
|
[
"Apache-2.0"
] | null | null | null |
nbs/index.ipynb
|
maimanuel/rlplay
|
2bd01cfa4a52d801c1172e2e184c84233f5f7479
|
[
"Apache-2.0"
] | null | null | null | 15.432099 | 81 | 0.484 |
[
[
[
"#hide",
"_____no_output_____"
]
],
[
[
"# RL Playground\n\n> Summary description here.",
"_____no_output_____"
],
[
"This file will become your README and also the index of your documentation.",
"_____no_output_____"
],
[
"## Install",
"_____no_output_____"
],
[
"`pip install your_project_name`",
"_____no_output_____"
],
[
"## How to use",
"_____no_output_____"
],
[
"Fill me in please! Don't forget code examples:",
"_____no_output_____"
],
[
"This is a test",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a2102bf2e67b7977066b1cf88d53f8a4795c8c8
| 161,649 |
ipynb
|
Jupyter Notebook
|
Machine Learning/Feature-Selection-using-RFE-RFECV-SelectKBest/selectkbest.ipynb
|
Storiesbyharshit/Data-Science-Portfolio
|
968fa280583e3ce2086a12607eba4caa99dc7879
|
[
"MIT"
] | 16 |
2020-07-07T11:16:21.000Z
|
2022-03-18T08:53:49.000Z
|
Machine Learning/Feature-Selection-using-RFE-RFECV-SelectKBest/selectkbest.ipynb
|
Storiesbyharshit/Data-Science-Portfolio
|
968fa280583e3ce2086a12607eba4caa99dc7879
|
[
"MIT"
] | 9 |
2020-11-13T19:02:31.000Z
|
2022-02-10T02:27:10.000Z
|
Machine Learning/Feature-Selection-using-RFE-RFECV-SelectKBest/selectkbest.ipynb
|
Storiesbyharshit/Data-Science-Portfolio
|
968fa280583e3ce2086a12607eba4caa99dc7879
|
[
"MIT"
] | 10 |
2020-07-07T09:39:01.000Z
|
2021-04-30T04:22:30.000Z
| 154.245229 | 37,165 | 0.870312 |
[
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport warnings\nfrom google.colab import drive\nfrom scipy import stats\nfrom scipy.stats.stats import pearsonr\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.feature_selection import SelectKBest, f_regression",
"_____no_output_____"
],
[
"%matplotlib inline\nwarnings.filterwarnings(\"ignore\")\npd.set_option('display.expand_frame_repr', False)",
"_____no_output_____"
],
[
"drive.mount('/content/drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"path = \"/content/drive/My Drive/data.csv\"",
"_____no_output_____"
],
[
"df = pd.read_csv(path)",
"_____no_output_____"
],
[
"print (\"Total number of rows in dataset = {}\".format(df.shape[0]))\nprint (\"Total number of columns in dataset = {}\".format(df.shape[1]))",
"Total number of rows in dataset = 155\nTotal number of columns in dataset = 5\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"# Split df into x and Y\ntarget_col = \"Y\"\nX = df.loc[:, df.columns != target_col]\ny = df.loc[:, target_col]",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"y.shape",
"_____no_output_____"
],
[
"# Split the data into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.30, \n random_state=42)",
"_____no_output_____"
],
[
"X_train.head()",
"_____no_output_____"
],
[
"X_new = SelectKBest(f_regression, k=2).fit_transform(X_train, y_train)",
"_____no_output_____"
],
[
"X_new[0:5]",
"_____no_output_____"
]
],
[
[
"# Relationship of Features with Response Variables",
"_____no_output_____"
]
],
[
[
"def plot_join_plot(df, feature, target):\n j = sns.jointplot(feature, target, data = df, kind = 'reg')\n j.annotate(stats.pearsonr)\n return plt.show()",
"_____no_output_____"
],
[
"train_df = pd.concat([X_train, y_train], axis=1)",
"_____no_output_____"
],
[
"plot_join_plot(train_df, \"X1\", target_col)",
"_____no_output_____"
],
[
"plot_join_plot(train_df, \"X2\", target_col)",
"_____no_output_____"
],
[
"plot_join_plot(train_df, \"X3\", target_col)",
"_____no_output_____"
],
[
"plot_join_plot(train_df, \"X4\", target_col)",
"_____no_output_____"
]
],
[
[
"# Correlation Analysis using Pearson Analysis",
"_____no_output_____"
]
],
[
[
"pearsonr(X_train[\"X4\"], y_train)",
"_____no_output_____"
],
[
"out_list = []\nfor column in X_train.columns:\n corr_tuple = pearsonr(X_train[column], y_train)\n out_list.append([column, corr_tuple[0], corr_tuple[1]])",
"_____no_output_____"
],
[
"corr_df = pd.DataFrame(out_list, columns=[\"Features\", \"Correlation\", \"P-Value\"])",
"_____no_output_____"
],
[
"corr_df.head()",
"_____no_output_____"
],
[
"corr_df.sort_values(by=['P-Value'], inplace=True)",
"_____no_output_____"
],
[
"corr_df.head()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a211550e63561e7e2c808a1df514c7a8946684d
| 258,607 |
ipynb
|
Jupyter Notebook
|
notebooks/Pandeia-WFIRST Imaging.ipynb
|
josePhoenix/wfirst-tools
|
770783bc1ac0ebef797db0a2c902ae18d53c62cc
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/Pandeia-WFIRST Imaging.ipynb
|
josePhoenix/wfirst-tools
|
770783bc1ac0ebef797db0a2c902ae18d53c62cc
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/Pandeia-WFIRST Imaging.ipynb
|
josePhoenix/wfirst-tools
|
770783bc1ac0ebef797db0a2c902ae18d53c62cc
|
[
"BSD-3-Clause"
] | null | null | null | 49.446845 | 11,681 | 0.528396 |
[
[
[
"# Pandeia for WFIRST Imaging\n\nHow to cite this code:\n\n> Klaus M. Pontoppidan ; Timothy E. Pickering ; Victoria G. Laidler ; Karoline Gilbert ; Christopher D. Sontag, et al.\n\"Pandeia: a multi-mission exposure time calculator for JWST and WFIRST\", Proc. SPIE 9910, Observatory Operations: Strategies, Processes, and Systems VI, 991016 (July 15, 2016); doi:10.1117/12.2231768; http://dx.doi.org/10.1117/12.2231768\n\nThis is an introductory notebook that provides an easy-to-use interface for making Pandeia ETC calculations. This notebook only supports WFIRST imaging and has simplified some configuration options. \n\nRefer to the documentation links provided within the *Help* menu for general information on the Jupyter/IPython notebook interface and useful keyboard short-cuts. The key things you need to know are that you must use ``Shift-Enter`` to execute a cell and that once a cell is executed, all data defined within it becomes available to all other cells. (You can also click the <i class=\"fa-step-forward fa\"></i> icon in the toolbar to run a cell.)\n\nThis first cell sets up the imports and configuration that are required: ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'svg'\nimport sys\nimport os\nimport numpy as np\nimport matplotlib\nfrom matplotlib import style\nstyle.use('ggplot') # see http://matplotlib.org/users/style_sheets.html\n # for info on matplotlib styles\nmatplotlib.rcParams['axes.grid'] = False\nmatplotlib.rcParams['image.origin'] = 'lower'\nimport matplotlib.pyplot as plt\n\n# the first pandeia import is required to run the GUI. the others are provided to\n# allow manual running of calculations and loading/saving of inputs or results.\nfrom toolbox.etc.gui import PandeiaWFIRSTCalculator\nfrom pandeia.engine.perform_calculation import perform_calculation\nfrom pandeia.engine.io_utils import read_json, write_json",
"/opt/conda/lib/python3.6/site-packages/pysynphot/refs.py:118: UserWarning: No graph or component tables found; functionality will be SEVERELY crippled. No files found for /home/jovyan/grp/hst/cdbs/mtab/*_tmg.fits\n 'functionality will be SEVERELY crippled. ' + str(e))\n/opt/conda/lib/python3.6/site-packages/pysynphot/refs.py:125: UserWarning: No thermal tables found, no thermal calculations can be performed. No files found for /home/jovyan/grp/hst/cdbs/mtab/*_tmt.fits\n 'no thermal calculations can be performed. ' + str(e))\n"
]
],
[
[
"The next cell instantiates and runs the GUI. The inputs for ``Source type`` and ``SED type`` will change dynamically depending on which drop-down entry is selected. For simplicity's sake, only a single source at a time is currently supported. \n\nThis source can either be a point source or extended. Extended sources require extra configuration:\n\n- Surface brightness profile\n - Gaussian — $I \\propto e^{r^{-2}}$\n - Exponential — $I \\propto e^{r^{-1}}$\n - de Vaucouleurs — $I \\propto e^{r^{-0.25}}$\n- Major axis scale length of the surface brightness profile (sigma in the case of Gaussian)\n- Ellipticity of the source\n- Position angle of the major axis (measured CCW from horizontal)\n\nSource flux can currently only be specified in $F_{\\nu}$ units such as ``mJy`` or AB magnitudes at a specific wavelength. \n\nThere are several options for configuring the spectral energy distribution (SED) of the source:\n\n- Power-law — $F \\propto \\lambda^{index}$\n- Blackbody\n- Star — As calculated from the Phoenix models. Each entry shows the spectral type, $T_{eff}$, and $log\\ g$ used.\n- Extragalactic — Compiled from the Brown et al. (2014) catalog of integrated galaxy spectra\n\nIn each case, the specified redshift is applied to the SED.\n\nCurrently, the WFIRST wide-field imager (WFI) is the only available instrument. Its configuration parameters are:\n\n- Filter\n- Readmode — Currently modeled after JWST's NIRCam and specifies how many reads/skips there are per group\n- Subarray — Geometry of the region of the detector being read-out\n- Groups — Number of groups per integration\n- Integrations — Number of integrations to perform\n- Exposures — Number of sets of integrations to perform\n\nThe extracted flux and signal-to-noise ratio are calculated via aperture photometry. The source region is circular with the configured radius and the background region is annular with the configured inner and outer radii. The GUI automatically checks these radii to prevent overlap. For example, if you increase aperture radius, the annulus radii will automatically adjust accordingly. The display of these radii on the 2D plots can be toggled by clicking *Overlay*.\n\nTo run the calculation, click *Calculate* and the results will be displayed below. You can select what to display in the plots via the two pull-downs: *1D Plot* and *2D Image*.",
"_____no_output_____"
]
],
[
[
"g = PandeiaWFIRSTCalculator()\ng.display()",
"_____no_output_____"
]
],
[
[
"It is possible to extract the full input and output information from the ETC calculation. The ETC input and output are both stored as dictionaries, which can be directly manipulated. ",
"_____no_output_____"
]
],
[
[
"g.run_engine()\ncalculation = g.calculation_input\nresult = g.calculation_result",
"_____no_output_____"
]
],
[
[
"As an example, we will create a new source and add it to our ETC scene. First, we import a method to create a default source.",
"_____no_output_____"
]
],
[
[
"from pandeia.engine.calc_utils import build_default_source\ns = build_default_source()",
"_____no_output_____"
]
],
[
[
"Then, we move the source up by 1 arcsecond, change its flux to ABmag = 24 and make it extended.",
"_____no_output_____"
]
],
[
[
"s['spectrum']['normalization']['norm_fluxunit'] = 'abmag'\ns['spectrum']['normalization']['norm_flux'] = 21.\ns['shape']['geometry'] = 'sersic'\ns['shape']['sersic_index'] = 1. # exponential disk\ns['shape']['major'] = 0.4 # major axis in arcseconds\ns['shape']['minor'] = 0.1 # minor axis in arcseconds\ns['position']['y_offset'] = 1.0 # offset in arcseconds\ns['position']['orientation'] = 23. # Orientation relative to horizontal in degrees",
"_____no_output_____"
]
],
[
[
"A scene is just a list of sources, so we append the new source we just made. ",
"_____no_output_____"
]
],
[
[
"calculation['scene'].append(s)",
"_____no_output_____"
]
],
[
[
"And make a new calculation.",
"_____no_output_____"
]
],
[
[
"r = perform_calculation(calculation)",
"_____no_output_____"
]
],
[
[
"If we add the result of the calculation to the GUI, we can see everything plotted again.",
"_____no_output_____"
]
],
[
[
"g.calculation_result = r",
"_____no_output_____"
],
[
"plt.imshow(g.calculation_result['2d']['detector'])",
"_____no_output_____"
],
[
"g.calculation_input",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a2129943829e49a6056e1b801470a3750a845fd
| 3,738 |
ipynb
|
Jupyter Notebook
|
01_download_manipulate_data/05_Spd_Spatialsort_US_Contiguous.ipynb
|
Quansight/scipy2020_spatial_algorithms_at_scale
|
05e117e0cb450e72d803e2fb7eadb23c2ced749d
|
[
"MIT"
] | 5 |
2020-07-14T02:00:49.000Z
|
2022-01-12T14:15:52.000Z
|
01_download_manipulate_data/05_Spd_Spatialsort_US_Contiguous.ipynb
|
Quansight/scipy2020_spatial_algorithms_at_scale
|
05e117e0cb450e72d803e2fb7eadb23c2ced749d
|
[
"MIT"
] | 2 |
2020-07-08T20:34:25.000Z
|
2020-07-09T01:11:51.000Z
|
01_download_manipulate_data/05_Spd_Spatialsort_US_Contiguous.ipynb
|
Quansight/scipy2020_spatial_algorithms_at_scale
|
05e117e0cb450e72d803e2fb7eadb23c2ced749d
|
[
"MIT"
] | 5 |
2020-07-14T02:00:55.000Z
|
2021-07-01T16:58:05.000Z
| 23.961538 | 92 | 0.540396 |
[
[
[
"import time\nfrom datetime import datetime\nimport dask.dataframe as dd\nfrom distributed import LocalCluster, Client\nimport spatialpandas as spd\nfrom spatialpandas.geometry import (\n PointArray, MultiPointArray, LineArray,\n MultiLineArray, PolygonArray, MultiPolygonArray\n)\nfrom spatialpandas import GeoSeries, GeoDataFrame\n%matplotlib inline",
"_____no_output_____"
],
[
"# set up data paths\nbase_path = Path().cwd().parent\ndata_dir = base_path.joinpath('data')",
"_____no_output_____"
],
[
"# create local dask cluster\ncluster = LocalCluster(#silence_logs=logging.ERROR,\n dashboard_address=':8790',\n n_workers=4,\n threads_per_worker=2,\n memory_limit='3 GB')\n\n\nclient = Client(cluster)\nclient",
"_____no_output_____"
],
[
"# read in the point data\ncont_us_path = data_dir.joinpath('contiguous_us_w_geohash.parquet')\nddf = dd.read_parquet(cont_us_path, columns=['latitude', 'longitude'])\ndisplay(ddf.head(2))\nlen(ddf)",
"_____no_output_____"
],
[
"# load data into spatialpandas geodataframe\ndf = ddf.map_partitions(\n lambda df: GeoDataFrame(dict(\n position=PointArray(df[['longitude', 'latitude']]),\n **{col: df[col] for col in df.columns}\n ))\n)",
"_____no_output_____"
],
[
"t0 = time.time()\n# spatially sort the data\nsavepath = data_dir.joinpath('us_cont_spatiallysorted.parquet')\ndf.pack_partitions(npartitions=df.npartitions, shuffle='disk').to_parquet(savepath)\ndt = time.time() - t0",
"_____no_output_____"
],
[
"# save timing info\nwith open(f'spatial_sort_time-{datetime.now()}.csv', 'w') as f:\n f.write(f'time_min,npartitions\\n{dt/60},{df.npartitions}')\nprint('dt (s):', dt)",
"_____no_output_____"
],
[
"# check the saved file has the same data as the original\ndf = spd.io.read_parquet_dask(savepath)\ndisplay(df.head(2))\nlen(df)",
"_____no_output_____"
],
[
"# release the dask workers\ncluster.scale(0)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21335005374489daf368797a6a9f1dd1a36c0b
| 1,389 |
ipynb
|
Jupyter Notebook
|
first.ipynb
|
moharanajanmajay/lets-upgrade-assignment
|
6ad5bca14e6362dfe9c4dbd0e968ddcacfc36253
|
[
"Apache-2.0"
] | null | null | null |
first.ipynb
|
moharanajanmajay/lets-upgrade-assignment
|
6ad5bca14e6362dfe9c4dbd0e968ddcacfc36253
|
[
"Apache-2.0"
] | null | null | null |
first.ipynb
|
moharanajanmajay/lets-upgrade-assignment
|
6ad5bca14e6362dfe9c4dbd0e968ddcacfc36253
|
[
"Apache-2.0"
] | null | null | null | 28.346939 | 244 | 0.519078 |
[
[
[
"<a href=\"https://colab.research.google.com/github/moharanajanmajay/lets-upgrade-assignment/blob/master/first.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"print('Enter the height of the aeroplane')\nheight=input('The height of the aeroplane')\nheight=int(height)\nif height < 1000:\n print('Its safe to land')\nelif height > 1000 and height < 5000:\n print('Bring it down to 1000 ft')\nelse :\n print('The height is more than 5000ft . Turn around and try again later')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a213702c9c7e8c1825b10af781f95e5202b1dc4
| 66,316 |
ipynb
|
Jupyter Notebook
|
src/Stage_I/Badesha/Hospital_Beds.ipynb
|
HarinB4/Covid-19DataProject
|
86c519f5a4d242e70508e2629ef2537f5ecc02e7
|
[
"MIT"
] | 1 |
2021-05-28T19:58:01.000Z
|
2021-05-28T19:58:01.000Z
|
src/Stage_I/Badesha/Hospital_Beds.ipynb
|
HarinB4/Covid-19DataProject
|
86c519f5a4d242e70508e2629ef2537f5ecc02e7
|
[
"MIT"
] | null | null | null |
src/Stage_I/Badesha/Hospital_Beds.ipynb
|
HarinB4/Covid-19DataProject
|
86c519f5a4d242e70508e2629ef2537f5ecc02e7
|
[
"MIT"
] | null | null | null | 40.659718 | 129 | 0.281395 |
[
[
[
"# Display HospitalBeds Dataset and Merge with Super COVID-19 DataFrame",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\n#Reads simplefied hospitalbed data and covid-19 dataset\nread = pd.read_csv(\"../../../data/Hospital_Beds.csv\")\nsupercovid = pd.read_csv(\"../../../data/output/covid.csv\")",
"_____no_output_____"
]
],
[
[
"### Remove unnecessary variables, General display of Dataset. ",
"_____no_output_____"
]
],
[
[
"#Displays Enrichment Data while removing unnecssary variables.\nmyhb = read.drop([\"X\",\"Y\",\"HQ_ADDRESS\",\"HQ_ADDRESS1\",\"HQ_ZIP_CODE\",\"HQ_CITY\",\"CNTY_FIPS\"], axis=1)\n\n#renames columns so it will be easier to merge with supercovid-19 dataframe\nmyhb = myhb.rename(columns = {'HQ_STATE':'State','FIPS':'countyFIPS'})\n\n#displays dataset to see progress\nmyhb",
"_____no_output_____"
]
],
[
[
"### Modify data into Hierarchical Indexes for easy readability and to merge with super COVID-19 Dataframe.",
"_____no_output_____"
]
],
[
[
"#sets the Hierarchical index of states, counties, and countyFIPS\nHosbed = myhb.set_index(['STATE_NAME','COUNTY_NAME','countyFIPS'])\n\n#removes more unnecessary variables that won't be used now. *Previous dataset is kept for hospital name purpose*\nHosbed = Hosbed.drop([\"HOSPITAL_NAME\",\"OBJECTID\"], axis=1)\n\n#sorts dataframe by FIPS and outputs\nHosbed = Hosbed.sort_values(by=['countyFIPS'])\nHosbed.head(10)",
"_____no_output_____"
]
],
[
[
"## Groups Hospital Bed dataset by counties of each state and sums values by each individual county. ",
"_____no_output_____"
]
],
[
[
"#groups hospitalbed dataset by counties so integer variables can be added up together for easier merge with covid19 data\nNew_HosBed = Hosbed.groupby(['STATE_NAME','COUNTY_NAME','countyFIPS','STATE_FIPS']).sum()\n\n#display dataset to see if groupby function works\nNew_HosBed.head(20)",
"_____no_output_____"
],
[
"#merges the hospitalbed dataset with the covid19 data by countyFIPS which is the unique identifer\nHosBedMerge = pd.merge(supercovid,New_HosBed, on = [\"countyFIPS\"], how = \"left\")\n\n#drop unneccesarry extra columns that were added after the merge. \nHosBedMerge = HosBedMerge.drop(['Unnamed: 0'],axis=1)\n\n#display if merge was a success. \nHosBedMerge",
"_____no_output_____"
]
],
[
[
"### Exports data to a csv file",
"_____no_output_____"
]
],
[
[
"Merge_hb = HosBedMerge.to_csv(\"../../../data/output/COVID19_HOSBEDS_MERGE.csv\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a214df73e335e7dcd31246b303551577fa7e63c
| 37,061 |
ipynb
|
Jupyter Notebook
|
tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb
|
janeite/course-content
|
2a3ba168c5bfb5fd5e8305fe3ae79465b0add52c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 2,294 |
2020-05-11T12:05:35.000Z
|
2022-03-28T21:23:34.000Z
|
tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb
|
janeite/course-content
|
2a3ba168c5bfb5fd5e8305fe3ae79465b0add52c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 629 |
2020-05-11T15:42:26.000Z
|
2022-03-29T12:23:35.000Z
|
tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb
|
janeite/course-content
|
2a3ba168c5bfb5fd5e8305fe3ae79465b0add52c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 917 |
2020-05-11T12:47:53.000Z
|
2022-03-31T12:14:41.000Z
| 35.601345 | 487 | 0.579504 |
[
[
[
"<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Tutorial 3: Combining determinism and stochasticity\n**Week 2, Day 2: Linear Systems**\n\n**By Neuromatch Academy**\n\n**Content Creators**: Bing Wen Brunton, Alice Schwarze, Biraj Pandey\n\n**Content Reviewers**: Norma Kuhn, John Butler, Matthew Krause, Ella Batty, Richard Gao, Michael Waskom",
"_____no_output_____"
],
[
"**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**\n\n<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>",
"_____no_output_____"
],
[
"---\n# Tutorial Objectives\n\n*Estimated timing of tutorial: 45 minutes*\n\nTime-dependent processes rule the world. \n\nNow that we've spent some time familiarizing ourselves with the behavior of such systems when their trajectories are (1) entirely predictable and deterministic, or (2) governed by random processes, it's time to consider that neither is sufficient to describe neuroscience. Instead, we are often faced with processes for which we know some dynamics, but there is some random aspect as well. We call these **dynamical systems with stochasticity**.\n\nThis tutorial will build on our knowledge and gain some intuition for how deterministic and stochastic processes can both be a part of a dynamical system by:\n* Simulating random walks \n* Investigating the mean and variance of a Ornstein-Uhlenbeck (OU) process\n* Quantifying the OU process's behavior at equilibrium.",
"_____no_output_____"
]
],
[
[
"# @title Tutorial slides\n\n# @markdown These are the slides for the videos in all tutorials today\nfrom IPython.display import IFrame\nIFrame(src=f\"https://mfr.ca-1.osf.io/render?url=https://osf.io/snv4m/?direct%26mode=render%26action=download%26mode=render\", width=854, height=480)",
"_____no_output_____"
]
],
[
[
"---\n# Setup",
"_____no_output_____"
]
],
[
[
"# Imports\n\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# @title Figure Settings\nimport ipywidgets as widgets # interactive display\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")",
"_____no_output_____"
],
[
"# @title Plotting Functions\n# drift-diffusion model\n# returns t, x\n\ndef plot_random_walk_sims(sims, nsims=10):\n \"\"\"Helper for exercise 3A\"\"\"\n fig = plt.figure()\n plt.plot(sim[:nsims, :].T)\n plt.xlabel('time')\n plt.ylabel('position x')\n plt.show()\n\ndef plot_mean_var_by_timestep(mu, var):\n \"\"\"Helper function for exercise 3A.2\"\"\"\n fig, (ah1, ah2) = plt.subplots(2)\n\n # plot mean of distribution as a function of time\n ah1.plot(mu)\n ah1.set(ylabel='mean')\n ah1.set_ylim([-5, 5])\n\n # plot variance of distribution as a function of time\n ah2.plot(var)\n ah2.set(xlabel='time')\n ah2.set(ylabel='variance')\n\n plt.show()\n\ndef plot_ddm(t, x, xinfty, lam, x0):\n fig = plt.figure()\n\n plt.plot(t, xinfty * (1 - lam**t) + x0 * lam**t, 'r')\n plt.plot(t, x, 'k.') # simulated data pts\n\n plt.xlabel('t')\n plt.ylabel('x')\n\n plt.legend({'deterministic solution', 'simulation'})\n plt.show()\n\ndef var_comparison_plot(empirical, analytical):\n fig = plt.figure()\n plt.plot(empirical, analytical, '.', markersize=15)\n plt.xlabel('empirical equilibrium variance')\n plt.ylabel('analytic equilibrium variance')\n plt.plot(np.arange(8), np.arange(8), 'k', label='45 deg line')\n plt.legend()\n\n plt.grid(True)\n plt.show()\n\ndef plot_dynamics(x, t, lam, xinfty=0):\n \"\"\" Plot the dynamics \"\"\"\n fig = plt.figure()\n plt.title('$\\lambda=%0.1f$' % lam, fontsize=16)\n x0 = x[0]\n plt.plot(t, xinfty + (x0 - xinfty) * lam**t, 'r', label='analytic solution')\n plt.plot(t, x, 'k.', label='simulation') # simulated data pts\n plt.ylim(0, x0+1)\n\n plt.xlabel('t')\n plt.ylabel('x')\n plt.legend()\n plt.show()",
"_____no_output_____"
]
],
[
[
"---\n# Section 1: Random Walks\n",
"_____no_output_____"
]
],
[
[
"# @title Video 1: E. coli and Random Walks\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1LC4y1h7gD\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"VHwTBCQJjfw\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"To begin, let's first take a gander at how life sometimes wanders around aimlessly. One of the simplest and best-studied living systems that has some interesting behaviors is the _E. coli_ bacterium, which is capable of navigating odor gradients on a substrate to seek a food source. Larger life (including flies, dogs, and blindfolded humans) sometimes use the same strategies to guide their decisions.\n\nHere, we will consider what the _E. coli_ does in the absence of food odors. What's the best strategy when one does not know where to head? Why, flail around randomly, of course!\n\nThe **random walk** is exactly that --- at every time step, use a random process like flipping a coin to change one's heading accordingly. Note that this process is closely related to _Brownian motion_, so you may sometimes hear that terminology used as well.",
"_____no_output_____"
],
[
"Let's start with a **one-dimensional random walk**. A bacterium starts at $x=0$. At every time step, it flips a coin (a very small, microscopic coin of protein mintage), then heads left $\\Delta x = -1$ or right $\\Delta x = +1$ for with equal probability. For instance, if at time step $1$ the result of the coin flip is to head right, then its position at that time step becomes $x_1 = x_0 + \\Delta x = 1.$ Continuing in this way, its position at time step $k+1$ is given by \n$$x_{k+1} = x_k + \\Delta x $$ \n\nWe will simulate this process below and plot the position of the bacterium as a function of the time step. ",
"_____no_output_____"
]
],
[
[
"# @markdown Execute to simulate a random walk\n# parameters of simulation\nT = 100\nt = np.arange(T)\nx = np.zeros_like(t)\nnp.random.seed(2020) # set random seed\n\n# initial position\nx[0] = 0\n\n# step forward in time\nfor k in range(len(t)-1):\n # choose randomly between -1 and 1 (coin flip)\n this_step = np.random.choice([-1,1])\n\n # make the step\n x[k+1] = x[k] + this_step\n\n# plot this trajectory\nfig = plt.figure()\nplt.step(t, x)\nplt.xlabel('time')\nplt.ylabel('position x');",
"_____no_output_____"
]
],
[
[
"## Coding Exercise 1A: Random walk simulation\n\n*Referred to in video as exercise 3A*\n\nIn the previous plot, we assumed that the bacterium takes a step of size $1$ at every point in time. Let's let it take steps of different sizes!\n\nWe will code a random walk where the steps have a standard normal distribution (with mean $\\mu$ and standard deviation $\\sigma$). Instead of running one trajectory at a time, we will write our code so that we can simulate a large number of trajectories efficiently. We will combine this all into a function ``random_walk_simulator`` that generates $N$ random walks each with $T$ time points efficiently.\n\nWe will plot 10 random walks for 10000 time steps each.",
"_____no_output_____"
]
],
[
[
"def random_walk_simulator(N, T, mu=0, sigma=1):\n '''Simulate N random walks for T time points. At each time point, the step\n is drawn from a Gaussian distribution with mean mu and standard deviation\n sigma.\n\n Args:\n T (integer) : Duration of simulation in time steps\n N (integer) : Number of random walks\n mu (float) : mean of step distribution\n sigma (float) : standard deviation of step distribution\n\n Returns:\n (numpy array) : NxT array in which each row corresponds to trajectory\n '''\n\n ###############################################################################\n ## TODO: Code the simulated random steps to take\n ## Hints: you can generate all the random steps in one go in an N x T matrix\n raise NotImplementedError('Complete random_walk_simulator_function')\n ###############################################################################\n # generate all the random steps for all steps in all simulations in one go\n # produces a N x T array\n steps = np.random.normal(..., ..., size=(..., ...))\n\n # compute the cumulative sum of all the steps over the time axis\n sim = np.cumsum(steps, axis=1)\n\n return sim\n\nnp.random.seed(2020) # set random seed\n\n# simulate 1000 random walks for 10000 time steps\nsim = random_walk_simulator(1000, 10000, mu=0, sigma=1)\n\n# take a peek at the first 10 simulations\nplot_random_walk_sims(sim, nsims=10)",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_4265c9d0.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_4265c9d0_0.png>\n\n",
"_____no_output_____"
],
[
"We see that the trajectories all look a little different from each other. But there are some general observations one can make: at the beginning almost all trajectories are very close to $x=0$, which is where our bacterium started. As time progresses, some trajectories move further and further away from the starting point. However, a lot of trajectories stay close to the starting point of $x=0$. \n\nNow let's take a look in the next cell at the distribution of bacteria positions at different points in time, analyzing all the trajectories we just generated above. ",
"_____no_output_____"
]
],
[
[
"# @markdown Execute to visualize distribution of bateria positions\nfig = plt.figure()\n# look at the distribution of positions at different times\nfor i, t in enumerate([1000,2500,10000]):\n\n # get mean and standard deviation of distribution at time t\n mu = sim[:, t-1].mean()\n sig2 = sim[:, t-1].std()\n\n # make a plot label\n mytitle = '$t=${time:d} ($\\mu=${mu:.2f}, $\\sigma=${var:.2f})'\n\n # plot histogram\n plt.hist(sim[:,t-1],\n color=['blue','orange','black'][i],\n #make sure the histograms have the same bins!\n bins=np.arange(-300,300,20),\n # make histograms a little see-through\n alpha=0.6,\n # draw second histogram behind the first one\n zorder=3-i,\n label=mytitle.format(time=t, mu=mu, var=sig2))\n\n plt.xlabel('position x')\n\n # plot range\n plt.xlim([-500, 250])\n\n # add legend\n plt.legend(loc=2)\n\n # add title\n plt.title(r'Distribution of trajectory positions at time $t$')",
"_____no_output_____"
]
],
[
[
"At the beginning of the simulation, the distribution of positions is sharply peaked about $0$. As time progresses, the distribution becomes wider but its center stays closer to $0$. In other words, the mean of the distribution is independent of time, but the variance and standard deviation of the distribution scale with time. Such a process is called a **diffusive process**.\n",
"_____no_output_____"
],
[
"## Coding Exercise 1B: Random walk mean & variance\n\nCompute and then plot the mean and variance of our bacterium's random walk as a function of time.",
"_____no_output_____"
]
],
[
[
"# Simulate random walks\nnp.random.seed(2020) # set random seed\nsim = random_walk_simulator(5000, 1000, mu=0, sigma=1)\n\n##############################################################################\n# TODO: Insert your code here to compute the mean and variance of trajectory positions\n# at every time point:\nraise NotImplementedError(\"Student exercise: need to compute mean and variance\")\n##############################################################################\n\n# Compute mean\nmu = ...\n\n# Compute variance\nvar = ...\n\n# Visualize\nplot_mean_var_by_timestep(mu, var)",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_796a6346.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_796a6346_0.png>\n\n",
"_____no_output_____"
],
[
"The expected value of $x$ stays close to 0, even for random walks of very long time. Cool!\n\nThe variance, on the other hand, clearly increases with time. In fact, the variance seems to increase linearly with time!\n",
"_____no_output_____"
],
[
"## Interactive Demo 1: Influence of Parameter Choice\n\n How do the parameters $\\mu$ and $\\sigma$ of the Gaussian distribution from which we choose the steps affect the mean and variance of the bacterium's random walk?",
"_____no_output_____"
]
],
[
[
"#@title\n\n#@markdown Make sure you execute this cell to enable the widget!\n\[email protected]\ndef plot_gaussian(mean=(-0.5, 0.5, .02), std=(.5, 10, .5)):\n sim = random_walk_simulator(5000, 1000, mu=mean, sigma=std)\n\n # compute the mean and variance of trajectory positions at every time point\n mu = np.mean(sim, axis=0)\n var = np.var(sim, axis=0)\n\n # make a figure\n fig, (ah1, ah2) = plt.subplots(2)\n\n # plot mean of distribution as a function of time\n ah1.plot(mu)\n ah1.set(ylabel='mean')\n\n # plot variance of distribution as a function of time\n ah2.plot(var)\n ah2.set(xlabel='time')\n ah2.set(ylabel='variance')",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_55aa7188.py)\n\n",
"_____no_output_____"
],
[
"---\n# Section 2: The Ornstein-Uhlenbeck (OU) process\n\n*Estimated timing to here from start of tutorial: 14 min*",
"_____no_output_____"
]
],
[
[
"# @title Video 2: Combining Deterministic & Stochastic Processes\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1o5411Y7N2\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"pDNfs5p38fI\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"The random walk process we just explored is diffusive, and the distribution of possible trajectories _spreads_, taking on increasing variance with time. Even so, at least in one dimension, the mean remains close to the initial value (in the example above, 0).\n\nOur goal is now to build on this model to construct a **drift-diffusion** model (DDM). DDM is a popular model for memory, which as we all know, is often an exercise in hanging on to a value imperfectly. Decision-making and memory will be the topic for tomorrow, so here we build the mathematical foundations and develop some intuition for how such systems behave!",
"_____no_output_____"
],
[
"To build such a model, let's combine the random walk model with the first differential equations we explored in Tutorial 1 earlier. Although those models had been written in continuous time as $\\dot{x} = a x$, here let's consider the discrete version of the same system and write:\n\n$x_{k+1} = \\lambda x_k$,\n\nwhose solution can be written as\n\n$x_k = x_0 \\lambda^k$,\n\nwhere $x_0$ is the value of $x$ at time $t=0$.\n\nNow, let's simulate and plot the solution of the discrete version of our first differential equation from Tutorial 1 below. **Run the code below.**",
"_____no_output_____"
]
],
[
[
"# parameters\nlam = 0.9\nT = 100 # total Time duration in steps\nx0 = 4. # initial condition of x at time 0\n\n# initiatialize variables\nt = np.arange(0, T, 1.)\nx = np.zeros_like(t)\nx[0] = x0\n\n# Step through in time\nfor k in range(len(t)-1):\n x[k+1] = lam * x[k]\n\n# plot x as it evolves in time\nplot_dynamics(x, t, lam)",
"_____no_output_____"
]
],
[
[
"Notice that this process decays towards position $x=0$. We can make it decay towards any position by adding another parameter $x_\\infty$. The rate of decay is proportional to the difference between $x$ and $x_\\infty$. Our new system is\n\n$x_{k+1} = x_\\infty + \\lambda(x_k - x_{\\infty})$ \n\nWe have to modify our analytic solution slightly to take this into account:\n\n$x_k = x_\\infty(1 - \\lambda^k) + x_0 \\lambda^k$.\n\nLet's simulate and plot the dynamics of this process below. Hopefully, we see that it start at $x_0$ and decay towards $x_{\\infty}.$\n",
"_____no_output_____"
]
],
[
[
"# parameters\nlam = 0.9 # decay rate\nT = 100 # total Time duration in steps\nx0 = 4. # initial condition of x at time 0\nxinfty = 1. # x drifts towards this value in long time\n\n# initiatialize variables\nt = np.arange(0, T, 1.)\nx = np.zeros_like(t)\nx[0] = x0\n\n# Step through in time\nfor k in range(len(t)-1):\n x[k+1] = xinfty + lam * (x[k] - xinfty)\n\n# plot x as it evolves in time\nplot_dynamics(x, t, lam, xinfty)",
"_____no_output_____"
]
],
[
[
"Now we are ready to take this basic, deterministic difference equation and add a diffusion process on top of it! Fun times in Python land.\n\nAs a point of terminology: this type of process is commonly known as a **drift-diffusion model** or **Ornstein-Uhlenbeck (OU) process**. The model is a combination of a _drift_ term toward $x_{\\infty}$ and a _diffusion_ term that walks randomly. You may sometimes see them written as continuous stochastic differential equations, but here we are doing the discrete version to maintain continuity in the tutorial. The discrete version of our OU process has the following form:\n\n$x_{k+1} = x_\\infty + \\lambda(x_k - x_{\\infty}) + \\sigma \\eta$\n\nwhere $\\eta$ is sampled from a standard normal distribution ($\\mu=0, \\sigma=1$). \n",
"_____no_output_____"
],
[
"## Coding Exercise 2: Drift-diffusion model\n\nModify the code below so that each step through time has a _deterministic_ part (_hint_: exactly like the code above) plus a _random, diffusive_ part that is drawn from from a normal distribution with standard deviation of $\\sigma$ (sig in the code). It will plot the dynamics of this process.",
"_____no_output_____"
]
],
[
[
"def simulate_ddm(lam, sig, x0, xinfty, T):\n \"\"\"\n Simulate the drift-diffusion model with given parameters and initial condition.\n Args:\n lam (scalar): decay rate\n sig (scalar): standard deviation of normal distribution\n x0 (scalar): initial condition (x at time 0)\n xinfty (scalar): drift towards convergence in the limit\n T (scalar): total duration of the simulation (in steps)\n\n Returns:\n ndarray, ndarray: `x` for all simulation steps and the time `t` at each step\n \"\"\"\n\n # initiatialize variables\n t = np.arange(0, T, 1.)\n x = np.zeros_like(t)\n x[0] = x0\n\n # Step through in time\n for k in range(len(t)-1):\n ##############################################################################\n ## TODO: Insert your code below then remove\n raise NotImplementedError(\"Student exercise: need to implement simulation\")\n ##############################################################################\n # update x at time k+1 with a determinstic and a stochastic component\n # hint: the deterministic component will be like above, and\n # the stochastic component is drawn from a scaled normal distribution\n x[k+1] = ...\n\n return t, x\n\nlam = 0.9 # decay rate\nsig = 0.1 # standard deviation of diffusive process\nT = 500 # total Time duration in steps\nx0 = 4. # initial condition of x at time 0\nxinfty = 1. # x drifts towards this value in long time\n\n# Plot x as it evolves in time\nnp.random.seed(2020)\nt, x = simulate_ddm(lam, sig, x0, xinfty, T)\nplot_ddm(t, x, xinfty, lam, x0)",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_c67c12d7.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_c67c12d7_0.png>\n\n",
"_____no_output_____"
],
[
"## Think! 2: Drift-Diffusion Simulation Observations\n\nDescribe the behavior of your simulation by making some observations. How does it compare to the deterministic solution? How does it behave in the beginning of the stimulation? At the end?",
"_____no_output_____"
],
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_301f6f83.py)\n\n",
"_____no_output_____"
],
[
"---\n# Section 3: Variance of the OU process\n\n*Estimated timing to here from start of tutorial: 35 min*\n",
"_____no_output_____"
]
],
[
[
"# @title Video 3: Balance of Variances\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV15f4y1R7PU\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"49A-3kftau0\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"As we can see, the **mean** of the process follows the solution to the deterministic part of the governing equation. So far, so good!\n\nBut what about the **variance**? \n\nUnlike the random walk, because there's a decay process that \"pulls\" $x$ back towards $x_\\infty$, the variance does not grow without bound with large $t$. Instead, when it gets far from $x_\\infty$, the position of $x$ is restored, until an equilibrium is reached.\n\nThe equilibrium variance for our drift-diffusion system is\n\nVar $= \\frac{\\sigma^2}{1 - \\lambda^2}$.\n\nNotice that the value of this equilibrium variance depends on $\\lambda$ and $\\sigma$. It does not depend on $x_0$ and $x_\\infty$.",
"_____no_output_____"
],
[
"To convince ourselves that things are behaving sensibly, let's compare the empirical variances of the equilibrium solution to the OU equations with the expected formula.\n\n",
"_____no_output_____"
],
[
"## Coding Exercise 3: Computing the variances empirically\n\nWrite code to compute the analytical variance: Var $= \\frac{\\sigma^2}{1 - \\lambda^2}$, and compare against the empirical variances (which is already provided for you using the helper function). You should see that they should be about equal to each other and lie close to the 45 degree ($y=x$) line. ",
"_____no_output_____"
]
],
[
[
"def ddm(T, x0, xinfty, lam, sig):\n t = np.arange(0, T, 1.)\n x = np.zeros_like(t)\n x[0] = x0\n\n for k in range(len(t)-1):\n x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1)\n\n return t, x\n\n# computes equilibrium variance of ddm\n# returns variance\ndef ddm_eq_var(T, x0, xinfty, lam, sig):\n t, x = ddm(T, x0, xinfty, lam, sig)\n\n # returns variance of the second half of the simulation\n # this is a hack: assumes system has settled by second half\n return x[-round(T/2):].var()\n\nnp.random.seed(2020) # set random seed\n\n# sweep through values for lambda\nlambdas = np.arange(0.05, 0.95, 0.01)\nempirical_variances = np.zeros_like(lambdas)\nanalytical_variances = np.zeros_like(lambdas)\n\nsig = 0.87\n\n# compute empirical equilibrium variance\nfor i, lam in enumerate(lambdas):\n empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig)\n\n##############################################################################\n## Insert your code below to calculate the analytical variances\nraise NotImplementedError(\"Student exercise: need to compute variances\")\n##############################################################################\n\n# Hint: you can also do this in one line outside the loop!\nanalytical_variances = ...\n\n# Plot the empirical variance vs analytical variance\nvar_comparison_plot(empirical_variances, analytical_variances)",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_b972f241_0.png>\n\n",
"_____no_output_____"
],
[
"---\n# Summary\n\n*Estimated timing of tutorial: 45 minutes*\n\nIn this tutorial, we have built and observed OU systems, which have both deterministic and stochastic parts. We see that they behave, on average, similar to our expectations from analyzing deterministic dynamical systems. \n\nImportantly, **the interplay between the deterministic and stochastic parts** serve to _balance_ the tendency of purely stochastic processes (like the random walk) to increase in variance over time. This behavior is one of the properties of OU systems that make them popular choices for modeling cognitive functions, including short-term memory and decision-making.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a215467df0e5a799954a62d467fc1e935b9f5c0
| 22,488 |
ipynb
|
Jupyter Notebook
|
02_String.ipynb
|
agus-zuliyanto/dasarpython
|
5b73453ca9b963d60c7fb867b0800fe0b58ae343
|
[
"CC-BY-3.0"
] | 6 |
2019-08-06T03:35:14.000Z
|
2021-12-06T11:45:42.000Z
|
02_String.ipynb
|
agus-zuliyanto/dasarpython
|
5b73453ca9b963d60c7fb867b0800fe0b58ae343
|
[
"CC-BY-3.0"
] | null | null | null |
02_String.ipynb
|
agus-zuliyanto/dasarpython
|
5b73453ca9b963d60c7fb867b0800fe0b58ae343
|
[
"CC-BY-3.0"
] | 8 |
2019-08-10T10:03:46.000Z
|
2020-11-26T10:56:02.000Z
| 25.042316 | 285 | 0.550827 |
[
[
[
"# String\n## `print()`\nFungsi `print()` mencetak seluruh argumennya sebagai *string*, dipisahkan dengan spasi dan diikuti dengan sebuah *line break*:",
"_____no_output_____"
]
],
[
[
"name = \"Budi\"\n\nprint(\"Hello World\")\nprint(\"Hello\", 'World')\nprint(\"Hello\", name)",
"Hello World\nHello World\nHello Budi\n"
]
],
[
[
"> Catatan: Fungsi untuk mencetak di Python 2.7 dan Python 3 berbeda. Di Python 2.7, kita tidak perlu menggunakan tanda kurung di sekitar argumennya (contoh: `print \"Hello World\"`).",
"_____no_output_____"
]
],
[
[
"print(\"Hello\", \"World\")",
"Hello World\n"
]
],
[
[
"Fungsi `print()` memiliki argumen opsional untuk mengontrol di mana dan bagaimana statemen yang diberikan akan dicetak. Di antaranya adalah:\n- `sep`, yaitu pemisah antar kata (nilai *default*-nya adalah spasi)\n- `end`, yaitu karakter yang akan ditambahkan di akhir statemen (nilai *default*-nya adalah `\\n` (karakter *newline*))",
"_____no_output_____"
]
],
[
[
"print(\"Hello\", \"World\", sep=\"...\", end=\"!!\")",
"Hello...World!!"
],
[
"print(\"Good\", \"Morning\", \"Everyone\", sep=\"...\", end=\":)\")",
"Good...Morning...Everyone:)"
]
],
[
[
"## Mengatur format string",
"_____no_output_____"
],
[
"Ada banyak metode yang dapat digunakan untuk mengatur format dan memanipulasi string. Beberapa metode tersebut akan ditunjukkan di sini.\n\n*String concatenation* adalah penggabungan dari dua *string*. Perhatikan bahwa ketika kita melakukan penggabungan, tidak ada spasi di antara kedua *string*.",
"_____no_output_____"
]
],
[
[
"string1 = 'World'\nstring2 = '!'\nprint('Hello' + string1 + string2)",
"HelloWorld!\n"
]
],
[
[
"Operator `%` digunakan untuk melakukan format pada sebuah *string*, dengan cara menyisipkan nilai yang disertakan setelahnya. *String* tersebut harus memiliki penanda yang mengidentifikasikan di mana kita harus menyisipkan nilai tersebut. Penanda yang sering digunakan adalah:\n- `%s`: string\n- `%d`: integer\n- `%f`: float\n- `%o`: oktal\n- `%x`: heksadesimal\n- `%e`: eksponensial\n",
"_____no_output_____"
]
],
[
[
"print(\"Hello %s\" % string1)\nprint(\"Actual Number = %d\" %18)\nprint(\"Float of the number = %f\" %18)\nprint(\"Octal equivalent of the number = %o\" %18)\nprint(\"Hexadecimal equivalent of the number = %x\" %18)\nprint(\"Exponential equivalent of the number = %e\" %18)",
"Hello World\nActual Number = 18\nFloat of the number = 18.000000\nOctal equivalent of the number = 22\nHexadecimal equivalent of the number = 12\nExponential equivalent of the number = 1.800000e+01\n"
]
],
[
[
"Ketika kita merujuk ke lebih dari satu variabel, kita harus menggunakan tanda kurung. Nilai-nilai disisipkan sesuai dengan urutan mereka di dalam tanda kurung.",
"_____no_output_____"
]
],
[
[
"print(\"Hello %s%s The meaning of life is %d\" % (string1, string2, 42))",
"Hello World! The meaning of life is 42\n"
]
],
[
[
"## Metode-metode terkait string lainnya\nMengalikan sebuah *string* sebuah integer akan mengembalikan sebuah *string* dengan *string* asli yang diulang-ulang sebanyak nilai integer tersebut.",
"_____no_output_____"
]
],
[
[
"print(\"Hello World! \" * 5)",
"Hello World! Hello World! Hello World! Hello World! Hello World! \n"
]
],
[
[
"*String* dapat ditransformasikan dengan menggunakan banyak fungsi:",
"_____no_output_____"
]
],
[
[
"s = \"hello wOrld\"\nprint(s.capitalize()) # mengubah seluruh huruf di string menjadi huruf kecil, kecuali huruf pertama yang menjadi huruf kapital\nprint(s.upper()) # mengubah seluruh huruf di string menjadi huruf besar\nprint(s.lower()) # mengubah seluruh huruf di string menjadi huruf kecil\nprint('|%s|'% \" lots of space \".strip()) # menghilangkan spasi di awal dan akhir string\nprint(\"Hello World\".replace(\"World\", \"Class\")) # mengganti kata \"World\" dengan kata \"Class\"",
"Hello world\nHELLO WORLD\nhello world\n|lots of space|\nHello Class\n"
]
],
[
[
"Python juga menyediakan banyak fungsi yang dapat kita gunakan untuk melakukan pengecekan pada *string*.",
"_____no_output_____"
]
],
[
[
"s = \"Hello World\"\nprint(\"The length of '%s' is\" %s, len(s), \"characters\") # len() memberikan panjang string\ns.startswith(\"Hello\") and s.endswith(\"World\") # mengecek awal dan akhir\nprint(\"There are %d 'l's but only %d World in %s\" % (s.count('l'), s.count('World'), s)) # menghitung huruf di sebuah string\nprint('\"el\" is at index', s.find('el'), \"in\", s) # mencari index potongan kata \"el\" di kalimat \"Hello World\"\ns.find('ab') # mencari index potongan kata \"ab\" di kalimat \"Hello World\". Apabila tidak ditemukan, maka fungsi akan mengembalikan -1",
"The length of 'Hello World' is 11 characters\nThere are 3 'l's but only 1 World in Hello World\n\"el\" is at index 1 in Hello World\n"
]
],
[
[
"## Operator untuk perbandingan string",
"_____no_output_____"
],
[
"*String* dapat dibandingkan satu sama lain sesuai dengan urutan leksikal/alfabet.",
"_____no_output_____"
]
],
[
[
"'abc' < 'bbc' <= 'bbc'",
"_____no_output_____"
],
[
"'abc' > 'def'",
"_____no_output_____"
]
],
[
[
"Kita dapat menggunakan `in` untuk mengecek apakah sebuah *string* merupakan potongan (*substring*) dari *string* lainnya.",
"_____no_output_____"
]
],
[
[
"\"ABC\" in \"This is the ABC of Python\"",
"_____no_output_____"
]
],
[
[
"## Mengakses bagian dari string",
"_____no_output_____"
],
[
"Kita dapat mengakses bagian dari *string* dengan menggunakan indeks dan kurung siku. Indeks dimulai dari 0.",
"_____no_output_____"
]
],
[
[
"s = '123456789'\nprint('The first character of', s, 'is', s[0])\nprint('The last character of', s, 'is', s[len(s)-1])",
"The first character of 123456789 is 1\nThe last character of 123456789 is 9\n"
]
],
[
[
"Indeks negatif dapat digunakan untuk memulai perhitungan dari belakang.",
"_____no_output_____"
]
],
[
[
"print('The first character of', s, 'is', s[-len(s)])\nprint('The last character of', s, 'is', s[-1])",
"The first character of 123456789 is 1\nThe last character of 123456789 is 9\n"
]
],
[
[
"*Substring* bisa didapatkan dengan menggunakan `a:b` untuk menandakan karakter dari indeks `a` sampai indeks `b-1`. Perhatikan bahwa karakter terakhir (indeks `b`) tidak diikutsertakan.",
"_____no_output_____"
]
],
[
[
"print(\"First three charcters\", s[0:3])\nprint(\"Next three characters\", s[3:6])",
"First three charcters 123\nNext three characters 456\n"
]
],
[
[
"Indeks awal yang kosong menandakan awal *string* (sama dengan indeks 0), sementara indeks akhir yang kosong menandakan akhir *string*.",
"_____no_output_____"
]
],
[
[
"print(\"First three characters\", s[:3])\nprint(\"Last three characters\", s[-3:])",
"First three characters 123\nLast three characters 789\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a2167aa4875fff5024278932b4405b70dc505f1
| 19,245 |
ipynb
|
Jupyter Notebook
|
examples/tutorial/05_Interactive_Pipelines.ipynb
|
ablythed/holoviz
|
ddfbfc504ade73e24aeb66560d9d3aa6f578956b
|
[
"BSD-3-Clause"
] | 207 |
2019-11-14T08:41:44.000Z
|
2022-03-31T11:26:18.000Z
|
examples/tutorial/05_Interactive_Pipelines.ipynb
|
ablythed/holoviz
|
ddfbfc504ade73e24aeb66560d9d3aa6f578956b
|
[
"BSD-3-Clause"
] | 74 |
2019-11-21T16:39:45.000Z
|
2022-02-15T16:46:51.000Z
|
examples/tutorial/05_Interactive_Pipelines.ipynb
|
ablythed/holoviz
|
ddfbfc504ade73e24aeb66560d9d3aa6f578956b
|
[
"BSD-3-Clause"
] | 36 |
2020-01-17T08:01:53.000Z
|
2022-03-11T01:33:47.000Z
| 31.140777 | 563 | 0.606495 |
[
[
[
"<style>div.container { width: 100% }</style>\n<img style=\"float:left; vertical-align:text-bottom;\" height=\"65\" width=\"172\" src=\"../assets/holoviz-logo-unstacked.svg\" />\n<div style=\"float:right; vertical-align:text-bottom;\"><h2>Tutorial 5. Interactive Pipelines</h2></div>",
"_____no_output_____"
],
[
"The plots built up over the first few tutorials were all highly interactive in the web browser, with interactivity provided by Bokeh plotting tools within the plots or in some cases by HoloViews generating a Bokeh widget to select for a `groupby` over a categorical variable. However, when you are exploring a dataset, you might want to see how _any_ aspect of the data or plot changes if varied interactively. Luckily, hvPlot makes it almost trivially easy to do this, so that you can very easily explore any parameter or setting in your code. \n\n## Panel widgets\n\nTo do this, we will need a widget library, and here we will be using [Panel](https://panel.holoviz.org/) to generate Bokeh widgets under user control, just as hvPlot uses Panel to generate widgets for a `groupby` as shown previously. Let's first get ahold of a Panel widget to see how they work. Here, let's create a Panel floating-point number slider to specify an earthquake magnitude between zero and nine:",
"_____no_output_____"
]
],
[
[
"import panel as pn\n\npn.extension(sizing_mode='stretch_width')",
"_____no_output_____"
],
[
"mag_slider = pn.widgets.FloatSlider(name='Minimum Magnitude', start=0, end=9, value=6)\nmag_slider",
"_____no_output_____"
]
],
[
[
"The widget is a JavaScript object, but there are bidirectional connections between JS and Python that let us see and change the value of this slider using its `value` parameter:",
"_____no_output_____"
]
],
[
[
"mag_slider.value",
"_____no_output_____"
],
[
"mag_slider.value = 7",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\nTry moving the slider around and rerunning the `mag_slider.value` above to access the current slider value. As you can see, you can easily get the value of any widget to use in subsequent cells, but you'd need to re-run any cell that accesses that value for it to get updated.\n\n\n# hvPlot .interactive()\n\nhvPlot provides an easy way to connect widgets directly into an expression you want to control.\n\nFirst, let's read in our data:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport holoviews as hv\nimport hvplot.pandas # noqa",
"_____no_output_____"
],
[
"%%time\ndf = pd.read_parquet('../data/earthquakes-projected.parq')\ndf = df.set_index('time').tz_localize(None)",
"_____no_output_____"
]
],
[
[
"Now, let's do a little filtering that we might want to control with such a widget, such as selecting the highest-magnitude events:",
"_____no_output_____"
]
],
[
[
"from holoviews.element.tiles import WEB_MERCATOR_LIMITS\n\ndf2 = df[['mag', 'depth', 'latitude', 'longitude', 'place', 'type']][df['northing'] < WEB_MERCATOR_LIMITS[1]]\n\ndf2[df2['mag'] > 5].head()",
"_____no_output_____"
]
],
[
[
"What if instead of '5', we want the output above always to reflect the current value of `mag_slider`? We can do that by using hvPlot's `.interactive()` support, passing in a widget almost anywhere we want in a pipeline:",
"_____no_output_____"
]
],
[
[
"dfi = df2.interactive()\n\ndfi[dfi['mag'] > mag_slider].head()",
"_____no_output_____"
]
],
[
[
"Here, `.interactive` is a wrapper around your DataFrame or Xarray object that lets you provide Panel widgets almost anywhere you'd otherwise be using a number. Just as importing `hvplot.pandas` provides a `.hvplot()` method or object on your dataframe, it also provides a `.interactive` method or object that gives you a general-purpose *interactive* `Dataframe` driven by widgets. `.interactive` stores a copy of your pipeline (series of method calls or other expressions on your data) and dynamically replays the pipeline whenever that widget changes. \n\n`.interactive` supports just about any output you might want to get out of such a pipeline, such as text or numbers:",
"_____no_output_____"
]
],
[
[
"dfi[dfi['mag'] > mag_slider].shape",
"_____no_output_____"
]
],
[
[
"Or Matplotlib plots:",
"_____no_output_____"
]
],
[
[
"dfi[dfi['mag'] > mag_slider].plot(y='depth', kind='hist', bins=np.linspace(0, 50, 51))",
"_____no_output_____"
]
],
[
[
"Each time you drag the widget, hvPlot replays the pipeline and updates the output shown. \n\nOf course, `.interactive` also supports `.hvplot()`, here with a new copy of a widget so that it will be independent of the other cells above:",
"_____no_output_____"
]
],
[
[
"mag_slider2 = pn.widgets.FloatSlider(name='Minimum magnitude', start=0, end=9, value=6)\n\ndfi[dfi['mag'] > mag_slider2].hvplot(y='depth', kind='hist', bins=np.linspace(0, 50, 51))",
"_____no_output_____"
]
],
[
[
"You can see that the depth distribution varies dramatically as you vary the minimum magnitude, with the lowest magnitude events apparently only detectable at short depths. There also seems to be some artifact at depth 10, which is the largest bin regardless of the filtering for all but the largest magnitudes.",
"_____no_output_____"
],
[
"## Date widgets\n\nA `.interactive()` pipeline can contain any number of widgets, including any from the Panel [reference gallery](https://panel.holoviz.org/reference/index.html#widgets). For instance, let's make a widget to specify a date range covering the dates found in this data:",
"_____no_output_____"
]
],
[
[
"date = pn.widgets.DateRangeSlider(name='Date', start=df.index[0], end=df.index[-1])\ndate",
"_____no_output_____"
]
],
[
[
"Now we can access the value of this slider:",
"_____no_output_____"
]
],
[
[
"date.value",
"_____no_output_____"
]
],
[
[
"As this widget is specifying a range, this time the value is returned as a tuple. If you prefer, you can get the components of the tuple directly via the `value_start` and `value_end` parameters respectively:",
"_____no_output_____"
]
],
[
[
"f'Start is at {date.value_start} and the end is at {date.value_end}'",
"_____no_output_____"
]
],
[
[
"Once again, try specifying different ranges with the widgets and rerunning the cell above.",
"_____no_output_____"
],
[
"Now let's use this widget to expand our expression to filter by date as well as magnitude:",
"_____no_output_____"
]
],
[
[
"mag = pn.widgets.FloatSlider(name='Minimum magnitude', start=0, end=9, value=6)\n\nfiltered = dfi[\n (dfi['mag'] > mag) &\n (dfi.index >= date.param.value_start) &\n (dfi.index <= date.param.value_end)]\n\nfiltered.head()",
"_____no_output_____"
]
],
[
[
"You can now use either the magnitude or the date range (or both) to filter the data, and the output will update. Note that here you want to move the start date of the range slider rather than the end; otherwise, you may not see the table change because the earthquakes are displayed in date order.",
"_____no_output_____"
],
[
"#### Exercise\n\nTo specify the minimum earthquake magnitude, notice that we supplied the whole `mag` widget but `.interactive()` used only the `value` parameter of this widget by default. To be explicit, you may use `mag.param.value` instead if you wish. Try it!",
"_____no_output_____"
],
[
"#### Exercise\n\nFor readability, seven columns were chosen before displaying the `DataFrame`. Have a look at `df.columns` and pick a different set of columns for display.",
"_____no_output_____"
],
[
"## .interactive() and HoloViews \n\n`.interactive()` lets you work naturally with the compositional HoloViews plots provided by `.hvplot()`. Here, let's combine such plots using the HoloViews `+` operator:",
"_____no_output_____"
]
],
[
[
"mag_hist = filtered.hvplot(y='mag', kind='hist', responsive=True, min_height=200)\ndepth_hist = filtered.hvplot(y='depth', kind='hist', responsive=True, min_height=200)\n\nmag_hist + depth_hist",
"_____no_output_____"
]
],
[
[
"These are the same two histograms we saw earlier, but now we can filter them on data dimensions like `time` that aren't even explicitly shown in the plot, using the Panel widgets.\n\n## Filtering earthquakes on a map\n\nTo display the earthquakes on a map, we will first create a subset of the data to make it quick to update without needing Datashader.:",
"_____no_output_____"
]
],
[
[
"subset_df = df[\n (df.northing < WEB_MERCATOR_LIMITS[1]) &\n (df.mag > 4) &\n (df.index >= pd.Timestamp('2017-01-01')) &\n (df.index <= pd.Timestamp('2018-01-01'))]",
"_____no_output_____"
]
],
[
[
"Now we can make a new interactive `DataFrame` from this new subselection:",
"_____no_output_____"
]
],
[
[
"subset_dfi = subset_df.interactive(sizing_mode='stretch_width')",
"_____no_output_____"
]
],
[
[
"And now we can declare our widgets and use them to filter the interactive `DataFrame` as before:",
"_____no_output_____"
]
],
[
[
"date_subrange = pn.widgets.DateRangeSlider(\n name='Date', start=subset_df.index[0], end=subset_df.index[-1])\nmag_subrange = pn.widgets.FloatSlider(name='Magnitude', start=3, end=9, value=3)\n\nfiltered_subrange = subset_dfi[\n (subset_dfi.mag > mag_subrange) &\n (subset_dfi.index >= date_subrange.param.value_start) &\n (subset_dfi.index <= date_subrange.param.value_end)]",
"_____no_output_____"
]
],
[
[
"Now we can plot the earthquakes on an ESRI tilesource, including the filtering widgets as follows:",
"_____no_output_____"
]
],
[
[
"geo = filtered_subrange.hvplot(\n 'easting', 'northing', color='mag', kind='points',\n xaxis=None, yaxis=None, responsive=True, min_height=500, tiles='ESRI')\n\ngeo",
"_____no_output_____"
]
],
[
[
"You'll likely notice some flickering as Panel updates the display when the widgets change in value. The flickering comes because the entire plot gets recreated each time the widget is dragged. You can get finer control over such updates, but doing so requires more advanced methods covered in later tutorials, so here, we will just accept that the plot flickers.\n\n## Terminating methods for `.interactive`\n\nThe examples above all illustrate cases where you can display the output of `.interactive()` and not worry about its type, which is no longer a DataFrame or a HoloViews object, but an `Interactive` object:",
"_____no_output_____"
]
],
[
[
"type(geo)",
"_____no_output_____"
]
],
[
[
"What if you need to work with some part of the interactive pipeline, e.g. to feed it to some function or object that does not understand `Interactive` objects? In such a case, you can use what is called a `terminating method` on your Interactive object to get at the underlying object for you to use.\n\nFor instance, let's create magnitude and depth histograms on this subset of the data as in an earlier notebook and see if we can enable linked selections on them:",
"_____no_output_____"
]
],
[
[
"mag_subhist = filtered_subrange.hvplot(y='mag', kind='hist', responsive=True, min_height=200)\ndepth_subhist = filtered_subrange.hvplot(y='depth', kind='hist', responsive=True, min_height=200)\n\ncombined = mag_subhist + depth_subhist\ncombined",
"_____no_output_____"
]
],
[
[
"Note that this looks like a HoloViews layout with some widgets, but this object is *not* a HoloViews object. Instead it is still an `Interactive` object:",
"_____no_output_____"
]
],
[
[
"type(combined)",
"_____no_output_____"
]
],
[
[
"`link_selections` does not currently understand `Interactive` objects, and so it will raise an exception when given one. If we need a HoloViews `Layout`, e.g. for calling `link_selections`, we can build a layout from the constituent objects using the `.holoviews()` terminating method on `Interactive`:",
"_____no_output_____"
]
],
[
[
"layout = mag_subhist.holoviews() + depth_subhist.holoviews()\nlayout",
"_____no_output_____"
]
],
[
[
"This is now a HoloViews object, so we can use it with `link_selections`:",
"_____no_output_____"
]
],
[
[
"print(type(layout))\n\nls = hv.link_selections.instance()\nls(mag_subhist.holoviews()) + ls(depth_subhist.holoviews())",
"_____no_output_____"
]
],
[
[
"You can use the box selection tool to see how selections compare between these plots. However, you will note that the widgets are no longer displayed. To address this, we can display the widgets separately using a different terminating method, namely `.widgets()`:",
"_____no_output_____"
]
],
[
[
"filtered_subrange.widgets()",
"_____no_output_____"
]
],
[
[
"For reference, the terminating methods for an `Interactive` object are:\n\n- `.holoviews()`: Give me a HoloViews object\n- `.panel()`: Give me a Panel ParamFunction\n\n- `.widgets()`: Give me a layout of widgets associated with this interactive object\n- `.layout()`: Give me the layout of the widgets and display `pn.Column(obj.widgets(), obj.panel())` where `pn.Column` will be described in the [Dashboards notebook](./06_Dashboards.ipynb).",
"_____no_output_____"
],
[
"## Conclusion\n\nUsing the techniques above, you can build up a collection of plots, and other outputs with Panel widgets to control individual bits of computation and display. \n\nWhat if you want to collect these pieces and put them together into a coherent app or dashboard? If so, then the next tutorial will show you how to do so!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a216d83a6b12d38897128b0a45a71691127851f
| 4,094 |
ipynb
|
Jupyter Notebook
|
example-2/example-2-data.ipynb
|
shivyucel/aas-extended-examples
|
117574a3cb3ef43267a45c029c7c5e6b3b3c1e01
|
[
"MIT"
] | null | null | null |
example-2/example-2-data.ipynb
|
shivyucel/aas-extended-examples
|
117574a3cb3ef43267a45c029c7c5e6b3b3c1e01
|
[
"MIT"
] | null | null | null |
example-2/example-2-data.ipynb
|
shivyucel/aas-extended-examples
|
117574a3cb3ef43267a45c029c7c5e6b3b3c1e01
|
[
"MIT"
] | null | null | null | 28.234483 | 282 | 0.533464 |
[
[
[
"Most of the simulation functionality is provided by `scipy` but there is still some useful material in `numpy`. We set the seed so that we can reproduce the data again.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom scipy import stats\nimport numpy as np\n\nnp.random.seed(seed=23)",
"_____no_output_____"
]
],
[
[
"The `random_cat_covariates` function simulates the properties of a random cat.",
"_____no_output_____"
]
],
[
[
"def random_cat_covariates():\n hidden = stats.norm.rvs()\n is_longhaired = stats.bernoulli.rvs(0.5)\n height = stats.norm.rvs(loc = 24 + hidden, scale = 0.5)\n loudness = np.log(stats.expon.rvs(scale = 10 + 5 * (4 + max(hidden,0))) + 5)\n return {\n \"time_outdoors\": stats.gamma.rvs(3, scale = 2),\n \"coat_colour\": stats.randint.rvs(low = 1, high = 4),\n \"weight\": stats.norm.rvs(loc = 4, scale = 0.5),\n \"height\": height,\n \"loudness\": loudness,\n \"whisker_length\": 0.3 * loudness + 0.3 * height + 0.1 * stats.norm.rvs(scale = 2),\n \"is_longhaired\": is_longhaired,\n \"coat_length\": stats.gamma.rvs((4 + 3 * is_longhaired) * 4, scale = 1/4)\n }",
"_____no_output_____"
]
],
[
[
"The `random_num_pats` takes the measurements of a random cat and returns the number of pats that they recieved on the day that they were observed. It is this function that specifies the relationship between the properties of the cat and the average number of pats it receives.",
"_____no_output_____"
]
],
[
[
"def random_num_pats(cat_covariates):\n coat_length_val = cat_covariates[\"coat_length\"] * ((-1) ** cat_covariates[\"is_longhaired\"])\n \n mean_pats = (\n 0.3 + \n 1.0 * cat_covariates[\"height\"] +\n 1.0 * cat_covariates[\"coat_colour\"] ** 2 +\n 1.0 * cat_covariates[\"weight\"] +\n 0.1 * cat_covariates[\"loudness\"] +\n 0.9 * coat_length_val +\n 1 * cat_covariates[\"time_outdoors\"]\n )\n \n safe_mean_pats = max(0.1, mean_pats)\n \n return stats.poisson.rvs(safe_mean_pats)",
"_____no_output_____"
]
],
[
[
"The `random_observation` function generates a random observation to include in the data set.",
"_____no_output_____"
]
],
[
[
"def random_observation():\n x = random_cat_covariates()\n y = random_num_pats(x)\n x[\"num_pats\"] = y\n if x[\"time_outdoors\"] > 24:\n x[\"time_outdoors\"] = 24\n return x",
"_____no_output_____"
],
[
"pd.DataFrame([random_observation() for _ in range(1000)]).to_csv(\"cat-pats.csv\", index = False)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a217ad0ed77804aadd15f6085910f54ced7969f
| 318,815 |
ipynb
|
Jupyter Notebook
|
Matplotlib/Matplotlib.ipynb
|
Pearl6193/Data-Science
|
6398cd6b14c77126ff0bc02e83e3ffd882a3c21a
|
[
"MIT"
] | null | null | null |
Matplotlib/Matplotlib.ipynb
|
Pearl6193/Data-Science
|
6398cd6b14c77126ff0bc02e83e3ffd882a3c21a
|
[
"MIT"
] | null | null | null |
Matplotlib/Matplotlib.ipynb
|
Pearl6193/Data-Science
|
6398cd6b14c77126ff0bc02e83e3ffd882a3c21a
|
[
"MIT"
] | null | null | null | 388.79878 | 61,060 | 0.94058 |
[
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nx = np.linspace(0, 5, 11)\ny = x ** 2",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
],
[
"#Functional\nplt.plot(x,y,\"r\")\nplt.xlabel(\"X Axis\")\nplt.ylabel(\"Y Axis\")\nplt.title(\"Title\")\nplt.show()",
"_____no_output_____"
],
[
"plt.subplot(1,2,1)\nplt.plot(x,y,\"r-\")\nplt.subplot(1,2,2)\nplt.plot(y,x,\"g*-\")",
"_____no_output_____"
],
[
"# OOP Method\nfig = plt.figure()\naxes = fig.add_axes([.1,.1,.5,.5])\naxes.plot(x,y)",
"_____no_output_____"
],
[
"fig = plt.figure()\naxes = fig.add_axes([.1,.1,1,1])\naxes.plot(x,y)",
"_____no_output_____"
],
[
"fig = plt.figure()\naxes = fig.add_axes([1,1,1,1])\naxes.plot(x,y)\naxes.set_xlabel(\"X Axis\")\naxes.set_ylabel(\"Y Axis\")\naxes.set_title(\"The Title\")",
"_____no_output_____"
],
[
"fig = plt.figure()\naxes1 = fig.add_axes([.1,.1,1,1])\naxes2 = fig.add_axes([.18,.53,.5,.5])\naxes1.plot(x,y,\"m\")\naxes2.plot(y,x,\"r\")\naxes1.set_xlabel(\"X1 Axis\")\naxes1.set_ylabel(\"Y1 Axis\")\naxes1.set_title(\"Title1\")\naxes2.set_xlabel(\"X2 Axis\")\naxes2.set_ylabel(\"Y2 Axis\")\naxes2.set_title(\"Title2\")\n",
"_____no_output_____"
],
[
"fig,axes = plt.subplots()\naxes.plot(x,y,\"r\")\naxes.set_xlabel(\"X Axis\")\naxes.set_ylabel(\"Y Axis\")\naxes.set_title(\"The Title\")",
"_____no_output_____"
],
[
"fig,axes = plt.subplots(1,2)\naxes[0].plot(x,y,\"y\")\naxes[1].plot(y,x,\"b\")\n\nfor i in axes:\n i.set_xlabel(\"X\")\n i.set_ylabel(\"Y\")\n i.set_title(\"Title\")\nplt.tight_layout()",
"_____no_output_____"
],
[
"axes",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(5,3),dpi=100)\naxes = fig.add_axes([1,1,1,1])\naxes.plot(x,y,\"r\")",
"_____no_output_____"
],
[
"fig,axes = plt.subplots(nrows=2,ncols=1,figsize = (3,3))\naxes[0].plot(y,x)\naxes[1].plot(x,y)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig.savefig(\"filename.png\")",
"_____no_output_____"
],
[
"fig.savefig(\"filename.png\",dpi = 140)",
"_____no_output_____"
],
[
"fig,axes = plt.subplots(nrows=1,ncols=2,figsize=(5,5))\naxes[0].plot(x,y,\"r\",label=\"Y=X**2\")\naxes[1].plot(y,x,\"y\",label=\"Y=X**(0.5)\")\naxes[0].legend()\naxes[1].legend()\nfor i in axes:\n i.set_xlabel(\"X\")\n i.set_ylabel(\"Y\")\n i.set_title(\"Title\")\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig = plt.figure()\n\nax = fig.add_axes([0,0,1,1])\n\nax.plot(x, x**2, label=\"x**2\")\nax.plot(x, x**3, label=\"x**3\")\nax.legend(loc=0)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nax.plot(x, x**2, 'b.-') # blue line with dots\nax.plot(x, x**3, 'g--')",
"_____no_output_____"
],
[
"fig,ax = plt.subplots()\nax.plot(x,x+1,\"black\",ls=\"-.\",alpha=0.5)\nax.plot(x,x+2,\"b--\")\n",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\n\nax.plot(x, x+1, color=\"blue\", alpha=0.5)\nax.plot(x, x+2, color=\"#8B008B\") \nax.plot(x, x+3, color=\"#FF8C00\")",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,6))\nax.plot(x, x+1, color=\"red\", linewidth=0.25)\nax.plot(x, x+2, color=\"red\", linewidth=0.50)\nax.plot(x, x+3, color=\"red\", linewidth=1.00)\nax.plot(x, x+4, color=\"red\", linewidth=2.00)\n\nax.plot(x, x+5, color=\"green\", lw=3, linestyle='-')\nax.plot(x, x+6, color=\"green\", lw=3, ls='-.')\nax.plot(x, x+7, color=\"green\", lw=3, ls=':')\n\nline, = ax.plot(x, x+8, color=\"black\", lw=1.50)\nline.set_dashes([5, 10, 15, 10])\n\nax.plot(x, x+ 9, color=\"blue\", lw=3, ls='-', marker='+')\nax.plot(x, x+10, color=\"blue\", lw=3, ls='--', marker='o')\nax.plot(x, x+11, color=\"blue\", lw=3, ls='-', marker='s')\nax.plot(x, x+12, color=\"blue\", lw=3, ls='--', marker='1')\n\nax.plot(x, x+13, color=\"purple\", lw=1, ls='-', marker='o', markersize=2)\nax.plot(x, x+14, color=\"purple\", lw=1, ls='-', marker='o', markersize=4)\nax.plot(x, x+15, color=\"purple\", lw=1, ls='-', marker='o', markersize=8, markerfacecolor=\"red\")\n\nax.plot(x, x+16, color=\"purple\", lw=1, ls='-', marker='s', markersize=8, \n markerfacecolor=\"yellow\", markeredgewidth=3, markeredgecolor=\"green\");",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n\naxes[0].plot(x, x**2, x, x**3)\naxes[0].set_title(\"default axes ranges\")\n\naxes[1].plot(x, x**2, x, x**3)\naxes[1].axis('tight')\naxes[1].set_title(\"tight axes\")\n\naxes[2].plot(x, x**2, x, x**3)\naxes[2].set_ylim([0, 60])\naxes[2].set_xlim([2, 5])\naxes[2].set_title(\"custom axes range\");",
"_____no_output_____"
],
[
"plt.scatter(x,y)",
"_____no_output_____"
],
[
"from random import sample\ndata = sample(range(1, 1000), 100)\nplt.hist(data)",
"_____no_output_____"
],
[
"data = [np.random.normal(0, std, 100) for std in range(1, 4)]\nplt.boxplot(data,vert=True,patch_artist=True);",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21810a0bf8f5b94de395952a84d604893f9b0e
| 48,606 |
ipynb
|
Jupyter Notebook
|
NaverSentimentAnalysis_LSTM.ipynb
|
creamcheesesteak/test_deeplearning
|
141d7371d7d4257468d6db33f084357f83c0c85a
|
[
"Apache-2.0"
] | null | null | null |
NaverSentimentAnalysis_LSTM.ipynb
|
creamcheesesteak/test_deeplearning
|
141d7371d7d4257468d6db33f084357f83c0c85a
|
[
"Apache-2.0"
] | null | null | null |
NaverSentimentAnalysis_LSTM.ipynb
|
creamcheesesteak/test_deeplearning
|
141d7371d7d4257468d6db33f084357f83c0c85a
|
[
"Apache-2.0"
] | null | null | null | 59.933416 | 18,349 | 0.489384 |
[
[
[
"<a href=\"https://colab.research.google.com/github/creamcheesesteak/test_deeplearning/blob/master/NaverSentimentAnalysis_LSTM.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!curl -O https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt",
" % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 13.9M 100 13.9M 0 0 25.3M 0 --:--:-- --:--:-- --:--:-- 25.3M\n"
],
[
"!curl -O https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt",
" % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 4778k 100 4778k 0 0 12.8M 0 --:--:-- --:--:-- --:--:-- 12.8M\n"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"train_data = pd.read_table('./ratings_train.txt')\ntrain_data.head(5)",
"_____no_output_____"
],
[
"train_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150000 entries, 0 to 149999\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 150000 non-null int64 \n 1 document 149995 non-null object\n 2 label 150000 non-null int64 \ndtypes: int64(2), object(1)\nmemory usage: 3.4+ MB\n"
],
[
"train_data.dropna(inplace=True)\ntrain_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 149995 entries, 0 to 149999\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 149995 non-null int64 \n 1 document 149995 non-null object\n 2 label 149995 non-null int64 \ndtypes: int64(2), object(1)\nmemory usage: 4.6+ MB\n"
],
[
"train_data['label'].value_counts()",
"_____no_output_____"
],
[
"!python -m pip install konlpy",
"Collecting konlpy\n Downloading konlpy-0.5.2-py2.py3-none-any.whl (19.4 MB)\n\u001b[K |████████████████████████████████| 19.4 MB 1.2 MB/s \n\u001b[?25hCollecting JPype1>=0.7.0\n Downloading JPype1-1.3.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl (448 kB)\n\u001b[K |████████████████████████████████| 448 kB 61.0 MB/s \n\u001b[?25hCollecting beautifulsoup4==4.6.0\n Downloading beautifulsoup4-4.6.0-py3-none-any.whl (86 kB)\n\u001b[K |████████████████████████████████| 86 kB 4.7 MB/s \n\u001b[?25hCollecting colorama\n Downloading colorama-0.4.4-py2.py3-none-any.whl (16 kB)\nRequirement already satisfied: lxml>=4.1.0 in /usr/local/lib/python3.7/dist-packages (from konlpy) (4.2.6)\nRequirement already satisfied: numpy>=1.6 in /usr/local/lib/python3.7/dist-packages (from konlpy) (1.19.5)\nRequirement already satisfied: tweepy>=3.7.0 in /usr/local/lib/python3.7/dist-packages (from konlpy) (3.10.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from JPype1>=0.7.0->konlpy) (3.7.4.3)\nRequirement already satisfied: requests[socks]>=2.11.1 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (2.23.0)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (1.3.0)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tweepy>=3.7.0->konlpy) (1.15.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->tweepy>=3.7.0->konlpy) (3.1.1)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (2021.5.30)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (3.0.4)\nRequirement already satisfied: PySocks!=1.5.7,>=1.5.6 in /usr/local/lib/python3.7/dist-packages (from requests[socks]>=2.11.1->tweepy>=3.7.0->konlpy) (1.7.1)\nInstalling collected packages: JPype1, colorama, beautifulsoup4, konlpy\n Attempting uninstall: beautifulsoup4\n Found existing installation: beautifulsoup4 4.6.3\n Uninstalling beautifulsoup4-4.6.3:\n Successfully uninstalled beautifulsoup4-4.6.3\nSuccessfully installed JPype1-1.3.0 beautifulsoup4-4.6.0 colorama-0.4.4 konlpy-0.5.2\n"
],
[
"import konlpy",
"_____no_output_____"
],
[
"stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']",
"_____no_output_____"
],
[
"okt = konlpy.tag.Okt()\nokt.morphs('와 이런 것도 영화라고 차라리 뮤직비디오를 만드는 게 나을 뻔', stem=True)",
"_____no_output_____"
],
[
"train_data_small = train_data[0:300]",
"_____no_output_____"
],
[
"# Y_train = train_data['|abe|'][0:300]\nY_train = train_data['label'][0:300]\nY_train",
"_____no_output_____"
],
[
"x_train = list()\nokt = konlpy.tag.Okt()\nfor sentence in train_data_small['document']:\n temp_x = okt.morphs(sentence, stem=True)\n words = list()\n for tok in temp_x:\n if tok not in stopwords:\n words.append(tok)\n x_train.append(words)\n\nx_train[3:5]",
"_____no_output_____"
],
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"tokenizer = tf.keras.preprocessing.text.Tokenizer()\ntokenizer.fit_on_texts(x_train)",
"_____no_output_____"
],
[
"print(tokenizer.word_index)",
"{'.': 1, '영화': 2, '보다': 3, '..': 4, '...': 5, '없다': 6, '이다': 7, ',': 8, '을': 9, '있다': 10, '?': 11, '다': 12, '정말': 13, '만': 14, '연기': 15, '!': 16, '너무': 17, '진짜': 18, '않다': 19, '나오다': 20, '안': 21, '....': 22, '인': 23, '적': 24, '내': 25, '하고': 26, '점': 27, '에서': 28, '좋다': 29, '아니다': 30, '같다': 31, '재밌다': 32, '요': 33, '아': 34, '보고': 35, '시간': 36, '하나': 37, '그': 38, '드라마': 39, '감동': 40, '되다': 41, '왜': 42, '재미있다': 43, '평점': 44, '로': 45, '사람': 46, '볼': 47, 'ㅋㅋ': 48, '싶다': 49, '~': 50, '고': 51, '게': 52, '말': 53, '나': 54, '아깝다': 55, '것': 56, '그냥': 57, '이건': 58, '완전': 59, '최고': 60, '내용': 61, '만들다': 62, '-': 63, '느낌': 64, '작품': 65, '더': 66, '기': 67, '못': 68, '재미없다': 69, '별로': 70, 'ㅡㅡ': 71, '모르다': 72, '들다': 73, '또': 74, '^^': 75, '생각': 76, '정도': 77, '알다': 78, '줄': 79, '재미': 80, '때': 81, 'ㅠㅠ': 82, '듯': 83, '지루하다': 84, '수': 85, '2': 86, '다시': 87, \"'\": 88, '10': 89, '이렇다': 90, '사랑': 91, '감독': 92, '주인공': 93, '가다': 94, '1': 95, '별': 96, '!!': 97, '임': 98, '스토리': 99, '끄다': 100, '난': 101, '하': 102, '자다': 103, '저': 104, '절대': 105, '그렇다': 106, '버리다': 107, '나가다': 108, '배우': 109, '나다': 110, '에선': 111, '전': 112, '전개': 113, '어떻다': 114, '남다': 115, '성': 116, '역시': 117, '보이다': 118, '3': 119, '부터': 120, '네': 121, '꽤': 122, '중': 123, '면': 124, '위': 125, '걸': 126, '인데': 127, '이네': 128, '받다': 129, '인가': 130, '오다': 131, '조금': 132, 'ㅋ': 133, '속': 134, '라': 135, '(': 136, ')': 137, 'ㅜㅜ': 138, '냐': 139, '.....': 140, '뭐': 141, '진심': 142, '장면': 143, '거': 144, '시': 145, '이야기': 146, '욕': 147, '연': 148, '안되다': 149, '♥': 150, '죽다': 151, '손': 152, '지': 153, '놈': 154, '건': 155, '넘다': 156, ';': 157, '그래서': 158, '력': 159, '남': 160, '쓰레기': 161, '돼다': 162, '이렇게': 163, '매력': 164, '무섭다': 165, '멋지다': 166, '잔잔하다': 167, '아쉽다': 168, '돈': 169, '우리': 170, '용': 171, '웃음': 172, '보여주다': 173, '자체': 174, '무슨': 175, '짜증': 176, '이제': 177, '??': 178, '뻔하다': 179, '먹다': 180, '기대': 181, '설정': 182, '명작': 183, '이상하다': 184, '짜증나다': 185, '포스터': 186, '너': 187, '솔직하다': 188, '이쁘다': 189, '막': 190, '세': 191, '살다': 192, 'ㅋㅋㅋ': 193, '움': 194, '인지': 195, '그것': 196, '액션': 197, '낮다': 198, '화려하다': 199, '자극': 200, '감성': 201, '절제': 202, '노': 203, '웃기다': 204, '라고': 205, '이해': 206, '갈수록': 207, '캐스팅': 208, '나름': 209, '웃다': 210, '음식': 211, '늘다': 212, '평범하다': 213, '연출': 214, '한번': 215, '님': 216, '인생': 217, '대한': 218, 'ㅇ': 219, '이라는': 220, '일': 221, '음악': 222, '주다': 223, '말다': 224, '낭비': 225, '초반': 226, '엔': 227, '수준': 228, '딱': 229, '이나': 230, '제일': 231, '작다': 232, '맛': 233, '콩': 234, 'oo': 235, '그저': 236, '대단하다': 237, '중간': 238, '많다': 239, '노력': 240, '일본': 241, '이런': 242, '건가': 243, '만에': 244, '근데': 245, '열리다': 246, '그래도': 247, '졸작': 248, '실망': 249, '갈등': 250, '아햏햏': 251, '에게': 252, '엉망': 253, '개': 254, '좋아하다': 255, '가요': 256, '눈': 257, 'ㅋㅋㅋㅋ': 258, '여': 259, '대박': 260, '두': 261, '빠지다': 262, '아주': 263, '또한': 264, '전혀': 265, '그리고': 266, '~~': 267, '그만': 268, '잼': 269, '한석규': 270, '많이': 271, '하지만': 272, '뜨다': 273, '???': 274, '요즘': 275, '허다': 276, '순간': 277, '랑': 278, '현실': 279, '그때': 280, '느끼다': 281, '조연': 282, 'ㅎㅎ': 283, 'ooo': 284, '더빙': 285, '목소리': 286, '흠': 287, '초딩': 288, '오버': 289, '가볍다': 290, '추천': 291, '돋보이다': 292, '너무나도': 293, '떼다': 294, '8': 295, '반개': 296, '원작': 297, '긴장감': 298, '제대로': 299, '살리다': 300, '몇': 301, '년': 302, '낫다': 303, '반복': 304, '데': 305, '헐리우드': 306, '짱': 307, '90년': 308, '대의': 309, '향수': 310, '서': 311, '뛰다': 312, '뻔': 313, '드럽다': 314, '담백하다': 315, '신': 316, '가장': 317, '노잼': 318, '어거지': 319, '냥': 320, '차다': 321, '이기': 322, '이라고': 323, '깨알': 324, '바베트': 325, '만찬': 326, '수작': 327, '이라': 328, '주제': 329, '야': 330, '고추': 331, '발연기': 332, '센스': 333, '9': 334, '해주다': 335, '진부하다': 336, '도안': 337, '죄인': 338, '아직도': 339, '정신': 340, '관객': 341, '단순하다': 342, '은은하다': 343, '두다': 344, '뛰어나다': 345, '공감': 346, '간다': 347, '이민기': 348, '캐릭터': 349, '물건': 350, '대다': 351, '이랑': 352, '소설': 353, '잊다': 354, '가슴': 355, '깊다': 356, '완전하다': 357, '언제': 358, '그대로': 359, '악역': 360, '끝나다': 361, '느리다': 362, '모습': 363, '맨날': 364, '빨리': 365, '감정': 366, '예요': 367, '우리나라': 368, '슬프다': 369, '위해': 370, '살인자': 371, '예전': 372, '에피소드': 373, '시청률': 374, '백': 375, '연기력': 376, '몰입도': 377, '에도': 378, '분': 379, '한테': 380, '유치하다': 381, '이틀': 382, '차': 383, '조작': 384, '안이': 385, '아무나': 386, '억지스럽다': 387, '어설프다': 388, '어이없다': 389, '결말': 390, '온몸': 391, '매우': 392, '흥행': 393, '사진': 394, '뭘': 395, '진창': 396, '우뢰매': 397, '별루': 398, '내일': 399, '최악': 400, '때리다': 401, '인상': 402, '밉다': 403, '인거': 404, '쓰다': 405, '술': 406, '킬링타임': 407, 'ㅎㅎㅎ': 408, ';;': 409, '태어나다': 410, '로맨스': 411, '짬뽕': 412, '소재': 413, '끌': 414, '투': 415, '허풍': 416, '도대체': 417, '까지': 418, '스스로': 419, '전부': 420, '대': 421, '문제': 422, '연기자': 423, '어울리다': 424, '놓다': 425, '......': 426, '나라': 427, '만들어지다': 428, '교훈': 429, '당시': 430, '해': 431, '믿다': 432, '힘들다': 433, '마지막': 434, '나이': 435, '왠만하다': 436, '질': 437, '아끼다': 438, '지겹다': 439, '괜찮다': 440, '지네': 441, '어린이': 442, '어리다': 443, '이란': 444, '외국': 445, '상상': 446, '되어다': 447, '오늘': 448, '@': 449, '순정': 450, '쯤': 451, '한마디': 452, '!!!': 453, '이유': 454, '니': 455, '거슬리다': 456, '라니': 457, '뻑': 458, '애': 459, '시종일관': 460, '풍': 461, '듣기': 462, '싫다': 463, '상당하다': 464, '얼마나': 465, '밖에': 466, '화가': 467, '아무': 468, '판': 469, '따다': 470, '류': 471, '판타지': 472, '실력': 473, '어': 474, '옛날': 475, '추억': 476, '쏙': 477, '이영화': 478, '보': 479, '니까': 480, '물': 481, 'ㅠ': 482, '원': 483, '래': 484, '언': 485, '높다': 486, '더럽다': 487, '허무하다': 488, '꿈': 489, '동': 490, 'trash': 491, '버킷리스트': 492, '직전': 493, '기도': 494, '지다': 495, '모순': 496, '전쟁': 497, '주년': 498, '개그콘서트': 499, '없애다': 500, '시키다': 501, '뿐': 502, '제': 503, '범죄': 504, '느와르': 505, '부': 506, '스릴러': 507, '모든': 508, '씩': 509, '기억': 510, '함': 511, '물폭탄': 512, '개연': 513, '굳다': 514, '넘어가다': 515, '파다': 516, '송강호': 517, '조차': 518, '무재': 519, '밓었': 520, '다그': 521, '래서': 522, '교도소': 523, '구먼': 524, '조정': 525, '사이': 526, '몬페': 527, '익살스럽다': 528, '스파이더맨': 529, '늙다': 530, '커스틴': 531, '던스트': 532, '걸음': 533, '마': 534, '초등학교': 535, '학년': 536, '생인': 537, '이응경': 538, '길용우': 539, '생활': 540, '발': 541, '해도': 542, '보단': 543, '납치': 544, '감금': 545, '가족': 546, '모': 547, '엿': 548, '왜케': 549, '식': 550, '길들이다': 551, '인피니트': 552, '볼때': 553, '마다': 554, '눈물나다': 555, '허진호': 556, '멜로': 557, '달인': 558, '울면': 559, '횡단보도': 560, '건너다': 561, '치다': 562, '올': 563, '이범수': 564, '깔끔하다': 565, '문': 566, '사': 567, '로만': 568, '자꾸': 569, '잊어버리다': 570, '취향': 571, '존중': 572, '다지': 573, '내생': 574, '극장': 575, 'ㄱ': 576, '매번': 577, '긴장': 578, '재밋음': 579, '바스코': 580, '락스': 581, '코': 582, '바비': 583, '아이돌': 584, '깔다': 585, '안달': 586, '처럼': 587, '굿바이': 588, '레닌': 589, '표절': 590, '뒤': 591, '없어지다': 592, '질퍽': 593, '산뜻하다': 594, '용구성': 595, '버무러진': 596, '일드': 597, '약탈': 598, '변명': 599, '이르다': 600, '착하다': 601, '심오하다': 602, '뜻': 603, '학생': 604, '선생': 605, '놀다': 606, '불가능하다': 607, '차이나다': 608, '핀란드': 609, '풍경': 610, '이라도': 611, '구': 612, '경': 613, '할랫': 614, '는걸': 615, '말씀드리다': 616, '중반': 617, '짤랐을꺼': 618, '납득': 619, '꼭': 620, 'kl': 621, 'g': 622, '털다': 623, '카밀라': 624, '벨': 625, '재밋는뎅': 626, '탁월하다': 627, '엄포스': 628, '위력': 629, '깨닫다': 630, '꽃': 631, '검사': 632, '명품': 633, '졸': 634, '이리': 635, '1%': 636, '라도': 637, '기대하다': 638, '패션': 639, '열정': 640, '안나': 641, '윈': 642, '투어': 643, '키이라': 644, '나이틀리': 645, '대체': 646, '정신장애': 647, '틱장애': 648, '허허': 649, '유령': 650, '114': 651, '명': 652, '평가': 653, '알바생': 654, '싱겁다': 655, '낚임': 656, '서리': 657, '굶주리다': 658, '맘': 659, '방법': 660, '>..': 661, 'ㅜㅡ': 662, '윤제문': 663, '발견': 664, '소소하다': 665, '탈': 666, '미소': 667, '머금': 668, '올리다': 669, '속지': 670, '리얼리티': 671, '한데': 672, '크다': 673, '정신의학': 674, '상': 675, '분노조절': 676, '장애': 677, '초기': 678, '증상': 679, '툭하면': 680, '패': 681, '파손': 682, '오': 683, '바': 684, '극': 685, '신선하다': 686, '가면': 687, '상태': 688, '불가': 689, '마이너스': 690, '뮤비': 691, '알': 692, 'ㅉㅉ': 693, '북한': 694, '....^^;': 695, '리스': 696, '타르': 697, '가르다': 698, '용의': 699, '주인': 700, '누': 701, '근친상간': 702, '다니다': 703, '메': 704, '니스': 705, '터': 706, '드래곤': 707, '토르': 708, '다크': 709, '월드': 710, '잡수다': 711, '기본': 712, '선방': 713, '영혼': 714, '어루만지다': 715, '수도': 716, '거치다': 717, '상사': 718, '잠시': 719, '동화': 720, '행복하다': 721, '세르게이': 722, '맵다': 723, '포퐁': 724, '저그': 725, '진호': 726, '시리': 727, '난또': 728, '꼬마': 729, '애가': 730, '원한': 731, '.,.': 732, '혼자': 733, '나대다': 734, '어쩌라고': 735, '충격': 736, '적다': 737, '기분': 738, '푹': 739, '꺼지다': 740, '활': 741, '이라고는': 742, '무겁다': 743, '지독하다': 744, '차갑다': 745, '무자비하다': 746, '일본인': 747, '상상력': 748, '심심하다': 749, '백봉기': 750, '들어맞다': 751, '예측': 752, '카리스마': 753, '불알': 754, '당황': 755, '아무튼': 756, '녹다': 757, '일상': 758, '밋밋하다': 759, '계속': 760, '전개도': 761, '은희': 762, '한두': 763, '컷': 764, '소': 765, '극적': 766, '대만': 767, '가슴속': 768, '온': 769, '헤집다': 770, '다큐': 771, '현대': 772, '사의': 773, '단면': 774, '대해': 775, '깊이': 776, '사죄': 777, '바로': 778, '잡기': 779, '말로': 780, '듣다': 781, '보도': 782, '연맹': 783, '민간인': 784, '학살': 785, '이정': 786, '이야': 787, '명백하다': 788, '살인': 789, '어디': 790, '재탕': 791, '삼': 792, '탕': 793, '사골': 794, '우려': 795, '먹듯': 796, '산': 797, '아예': 798, '70회': 799, '중반인데': 800, '120': 801, '부작': 802, '이라니': 803, '김남길': 804, '짜다': 805, '불구': 806, '손예진': 807, 'ㅈㅈ': 808, '비슷하다': 809, '노래실력': 810, '뽑다': 811, '맞다': 812, '박시환': 813, 'mama': 814, '망신': 815, '넣다': 816, '집': 817, '활짝': 818, '들어가다': 819, '문자': 820, '비번': 821, '걸리다': 822, '재밋네': 823, '달팽이': 824, '빨': 825, '라서': 826, '부패하다': 827, '로마노프': 828, '왕조': 829, '기리': 830, '뭣같': 831, '항거': 832, '러시아': 833, '민중': 834, '폭도': 835, '무난': 836, '펴다': 837, '한국영': 838, '화': 839, '코드': 840, ':': 841, '계': 842, '화해': 843, '남발': 844, '시작': 845, '3분': 846, '리플릿': 847, '불안하다': 848, '단연': 849, '럼': 850, ';;;': 851, '진정': 852, '위대하다': 853, '`': 854, '조미': 855, '막문위': 856, '골깜': 857, '부라리다': 858, '쓰러지다': 859, '성룡': 860, '골': 861, '걸스데이': 862, '이혜리': 863, '서기': 864, 'ㅋㅋㅋㅋㅋ': 865, '인공': 866, '주귀': 867, 'ㅋㅋㅋㅋㅋㅋ': 868, '어내스트': 869, '셀레스틴': 870, '강추': 871, '에요': 872, '클라라': 873, '볼라': 874, '화신': 875, '새롭다': 876, '메인': 877, '차차': 878, '신카이': 879, '마코토': 880, '작화': 881, '유': 882, '카나': 883, '이훨': 884, '고은님': 885, '노골': 886, '광고': 887, '크리스마스': 888, '떠오르다': 889, '행복': 890, \"'-'\": 891, '쫌': 892, '산만하다': 893, '처음': 894, ',,,,': 895, '불륜': 896, ',,': 897, '왕': 898, '믹스': 899, '음향': 900, '하아': 901, '별루더': 902, '기준': 903, '패널': 904, '가구': 905, '머': 906, '망치': 907, '서운하다': 908, '몬스터': 909, '주식회사': 910, 'd': 911, '너무나': 912, '........': 913, '흥미': 914, '지만': 915, '박하다': 916, '몰입': 917, '중국인': 918, '특유': 919, '과장': 920, '안간힘': 921, '쓸다': 922, '가상하다': 923, '고증': 924, '현': 925, '실감': 926, '떨어지다': 927, '거북': 928, '스럽다': 929, '과대': 930, '포장': 931, '불법체류자': 932, '잡다': 933, '우상화': 934, '미국': 935, '따뜻하다': 936, '뭥미': 937, '2년': 938, '삶속': 939, '생애': 940, '드러나다': 941, '지난': 942, '후': 943, '...;;;': 944, '10년': 945, '지나': 946, '순수하다': 947, '숀펜': 948, '甲': 949, '올레': 950, '공짜': 951, '헐다': 952, '배역': 953, '상대': 954, '따로': 955, '보아': 956, '라미란': 957, '아들': 958, '젤': 959, '욕심': 960, '어느': 961, '쪽': 962, '만이라도': 963, '빵점': 964, '베댓': 965, '잘쓰다': 966, '모자라다': 967, '도둑': 968, '뫼비우스': 969, '믿어지다': 970, '..?': 971, '찌릿': 972, '짜릿': 973, '용기': 974, '영': 975, '화이': 976, '상황': 977, '주입': 978, '식이': 979, '전하': 980, '케이블': 981, '다르덴': 982, '차이밍량': 983, '섞이다': 984, '채': 985, '그릇': 986, '담기다': 987, '여군': 988, '건지다': 989, '엠비씨': 990, '질린다': 991, '김혜수': 992, '어딘': 993, '에볼라': 994, '바이러스': 995, '떠들다': 996, '석': 997, '어떤': 998, '에서도': 999, '20': 1000, '년전': 1001, '보기': 1002, '후반': 1003, '부가': 1004, '살짝': 1005, '만해': 1006, '떨다': 1007, '용가리': 1008, '짱짱맨': 1009, '서다': 1010, '감히': 1011, '하나로': 1012, '꼽': 1013, '살': 1014, '야하다': 1015, '나르다': 1016, '고민': 1017, '모건': 1018, '프리': 1019, '멀다': 1020, '여전하다': 1021, '섹시하다': 1022, '작가': 1023, '용이': 1024, '재방송': 1025, '혹': 1026, '시나': 1027, '답': 1028, '여운': 1029, '상업': 1030, '퀄리티': 1031, '쩔다': 1032, '충분하다': 1033, '개인': 1034, '잔인하다': 1035, '노출씬': 1036, '화끈하다': 1037, '국산': 1038, '보임': 1039, '끝내': 1040, '드니': 1041, '일품': 1042, '맥스': 1043, '샘': 1044, '죽이다': 1045, '바랬다': 1046, '스러웟음': 1047, '찍을껀데': 1048, '면상': 1049, '자신': 1050, '동심': 1051, '멀리': 1052, '무술': 1053, '총을드': 1054, '크리스토퍼': 1055, '왈츠': 1056, '타란티노': 1057, '조합': 1058, '한국': 1059, '유명': 1060, '한편': 1061, '초월': 1062, '유명하다': 1063, '오랜': 1064, '재밋': 1065, '종방': 1066, '방도': 1067, '방송': 1068, '대본': 1069, '완성': 1070, '막장': 1071, '지치다': 1072, '수백향': 1073, '바른': 1074, '그리다': 1075, '심하다': 1076, 'mbc': 1077, '화이팅': 1078, '조절': 1079, '위원회': 1080, '김혜선': 1081, '김': 1082, '역할': 1083, '팜므파탈': 1084, '로써': 1085, '해내다': 1086, '의외': 1087, '20년': 1088, '사극': 1089, '벌어지다': 1090, '그녀': 1091, '논란': 1092, '왠지': 1093, '코미디': 1094, '\"': 1095, '끝': 1096, '멍하다': 1097, '\"\"': 1098, 'ㅈ': 1099, '.\"\"\"': 1100, '공유': 1101, '존잘': 1102, '상쾌': 1103, '발랄하다': 1104, '껄끄런': 1105, '유쾌하다': 1106, '해설': 1107, '소파': 1108, '죽': 1109, '치고': 1110, '앉다': 1111, '지키다': 1112, '로큰롤': 1113, '!!!!!!!!!!!!!!': 1114, '주된': 1115, '타겟': 1116, '일반': 1117, '논리': 1118, '통': 1119, '게임': 1120, '흥미롭다': 1121, '요원': 1122, '무능력하다': 1123, 'cg': 1124, '배경': 1125, '뮤지컬': 1126, '사운드': 1127, '녹음': 1128, '춤': 1129, '추다': 1130, '어제': 1131, 'cgv': 1132, '참다': 1133, '말리': 1134, '영국': 1135, '예산': 1136, 'dvd': 1137, '뮤지컬영화': 1138, '맘마미아': 1139, '1/10': 1140, '로맨틱코미디': 1141, '게이물': 1142, '알바': 1143, '머임': 1144, '????': 1145, '알고싶다': 1146, '스텝': 1147, '꼭두각시': 1148, '4': 1149, '아름답다': 1150, '익숙해지다': 1151, '현대인': 1152, '힘드다': 1153, '읽다': 1154, '볼걸': 1155, '당하다': 1156, '극치': 1157, '굉장하다': 1158, '언밸러스': 1159, '뚱뚱하다': 1160, '생기다': 1161, '남자': 1162, '고역': 1163, '간간히': 1164, '흘러나오다': 1165, '클래식': 1166, '조차도': 1167, '!!!!': 1168, '주기도': 1169, '인가요': 1170, '죄송하다': 1171, '나서다': 1172, '보지': 1173, '티비': 1174, '짜지다': 1175, '...-_-': 1176, '돼지': 1177, '피': 1178, '닭목': 1179, '우웩': 1180, '무당': 1181, '잠': 1182, '가져가다': 1183, '윤종신': 1184, '복귀': 1185, '이하늘': 1186, '뽑히다': 1187, '참가자': 1188, '심사': 1189, '위원': 1190, '인격': 1191, '쌓다': 1192, '어허': 1193, '광장': 1194, '생': 1195, '어린시절': 1196, '판타지영화': 1197, '나쁘다': 1198, '짓다': 1199, '금물': 1200, '지옥': 1201, '기존': 1202, '멜로영화': 1203, '형식': 1204, '탈피': 1205, '지나치다': 1206, '사랑비': 1207, '서준': 1208, '1.2': 1209, '3초': 1210, '후회': 1211, '주네': 1212, '젖다': 1213, '없이': 1214, '보고오다': 1215, '싸이코': 1216, '벗어나다': 1217, '14년': 1218, '도에': 1219, '개봉': 1220, '접해': 1221, '사랑스럽다': 1222, '~!!': 1223, '귀엽다': 1224, '♥♥': 1225, '청춘': 1226, '만이': 1227, '넘치다': 1228, '지나가다': 1229, '돌아오다': 1230, '만큼은': 1231, '무한': 1232, '젊음': 1233, 'tv': 1234, '건담': 1235, '시리즈': 1236, '아직': 1237, '까지도': 1238, '최고봉': 1239, '개콘': 1240, '코너': 1241, '이고': 1242, '이안': 1243, '사다코': 1244, '서린': 1245, '우물': 1246, '펀치': 1247, '후세': 1248, '결정': 1249, '계기': 1250, '표현': 1251, '람': 1252, '생명': 1253, '빼앗다': 1254, '등': 1255, '출현': 1256, '연극': 1257, '으': 1258, '이든': 1259, '본능': 1260, '히': 1261, '면서': 1262, '새벽': 1263, '본': 1264, '이래': 1265, '강수연': 1266, '~!': 1267, '최정원': 1268, '신음': 1269, '김혜성': 1270, '예쁘다': 1271, '이현진': 1272, '가발': 1273, '여정': 1274, '이인상': 1275, '재밋어': 1276, '배두나': 1277, '대들다': 1278, '성적': 1279, '호기심': 1280, '필요': 1281, '닥치고': 1282, '각기': 1283, '다른': 1284, '미국드라마': 1285, '파워': 1286, '미묘하다': 1287, '사로자다': 1288, '훈훈하다': 1289, '응답': 1290, '하라': 1291, '은지원': 1292, '웃기': 1293, '배꼽': 1294, '컴': 1295, '구성': 1296, '부실하다': 1297, '네이버': 1298, '장끌': 1299, '로드': 1300, '몰락': 1301, '가져오다': 1302, '오우삼': 1303, '제적': 1304, '영웅본색': 1305, '제로': 1306, '설레다': 1307, '학창시절': 1308, '대사': 1309, '배경음악': 1310, '화남': 1311, '형태': 1312, '닿다': 1313, '완벽하다': 1314, '영양가': 1315, '이승기': 1316, '현충일': 1317, '특집': 1318, '프로': 1319, '1963년': 1320, '도의': 1321, '훌륭하다': 1322, '미로': 1323, '급': 1324, 'of': 1325, 'the': 1326, '제대': 1327, '마음': 1328, '휴가': 1329, '다녀오다': 1330, '소박하다': 1331, '햇살': 1332, '가득하다': 1333, '비': 1334, '이의': 1335, '부엌': 1336, '요리': 1337, '코믹': 1338, '짜임새': 1339, '1996년': 1340, '에는': 1341, '신현준': 1342, '황': 1343, '장군': 1344, '음': 1345, '대가': 1346, '뿅': 1347, '일단': 1348, '재생': 1349, '괴물': 1350, '서스펜스': 1351, '귀신': 1352, '흡입': 1353, '댓글': 1354, '소원': 1355, '들어주다': 1356, '가보다': 1357, '란': 1358, '발톱': 1359, '만큼도': 1360, '따라가다': 1361, '완존': 1362, '밝다': 1363, '긍정': 1364, '인게': 1365, '무표': 1366, '홍혜정': 1367, '역': 1368, '이그': 1369, '마도': 1370, '후련': 1371, '시원하다': 1372, '나머': 1373, '답답하다': 1374, '낼': 1375, '월요일': 1376, '해피': 1377, '정치인': 1378, '정치범': 1379, '어렵다': 1380, '묘사': 1381, '극장판': 1382, '재개': 1383, '봉하': 1384, 'ㅄ': 1385, '대희': 1386, '전화': 1387, '끊음': 1388, '간': 1389, '서양': 1390, '싸움판': 1391, '푸하하하': 1392, '구만': 1393, '세계': 1394, '최초': 1395, '반공': 1396, '애니매이션': 1397, '역사': 1398, '가치': 1399, '감각': 1400, '시각': 1401, '바라보다': 1402, '색다르다': 1403, '문학': 1404, '엄마': 1405, '무고': 1406, '딸': 1407, '감옥살이': 1408, '제발': 1409, '책': 1410, '미치다': 1411, '겁니다': 1412, '이따위': 1413, '날': 1414, '신나다': 1415, '흑인음악': 1416, '아이스': 1417, '큐브': 1418, '어쩔': 1419, '건데': 1420, '알리시아': 1421, '생생하다': 1422, '옥소리': 1423, '프로필': 1424, 'ㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋ': 1425, '아우': 1426, '재밓당': 1427, '짱짱': 1428, '굿': 1429, '다시다': 1430, '시절': 1431, '나타내다': 1432, '이적': 1433, '액션영화': 1434, '영화롭다': 1435, '실패': 1436, '작': 1437, '.......': 1438, '뭔가': 1439, '빠져들다': 1440, '모녀': 1441, '토막': 1442, '살해': 1443, '본인': 1444, '피해자': 1445, '묻다': 1446, '구리지': 1447, '필름': 1448, '값': 1449, '매미': 1450, '어디서': 1451, '드': 1452, '안습': 1453, '혀': 1454, '짧다': 1455, '소리': 1456, '매니저': 1457, '역활': 1458, '훨': 1459, '적당하다': 1460, '스릴': 1461, '이구': 1462, '비추다': 1463, 'ㅜ': 1464, '전달': 1465, '오글거리다': 1466, '할머니': 1467, '월': 1468, '익숙하다': 1469, 'ㄷㅔ': 1470, '편이': 1471, '2%': 1472, '부족하다': 1473, '50%': 1474, '쇼': 1475, '12년': 1476, '진개': 1477, '천카이거': 1478, '이름': 1479, '정은지': 1480, '언니': 1481, '노래': 1482, '부르다': 1483, '회': 1484, '즐겁다': 1485, '트로트': 1486, '연인': 1487, '망하다': 1488, '아이': 1489, '시선': 1490, '..,': 1491, '내내': 1492, '레알': 1493, '여자애': 1494, '복': 1495, '순이': 1496, '다투다': 1497, '그건': 1498, '굳이': 1499, '겁나다': 1500, '어색': 1501, '해보다': 1502, '연속극': 1503, '인터뷰': 1504, '박스': 1505, '오피스': 1506, '몰다': 1507, '80~90년': 1508, '싸움': 1509, '비다': 1510, '다르다': 1511, '여자': 1512, '기량': 1513, '딸리다': 1514, '울': 1515, '었': 1516, '조디': 1517, '개막': 1518, '장': 1519, '계시다': 1520, '당신': 1521, '^.^': 1522, '휴': 1523, '호구': 1524, '두기': 1525, '쌀': 1526, '세기': 1527, '명대사': 1528, '쓸데없이': 1529, '뒷받침': 1530, '중요하다': 1531, '여배우': 1532, '저스트': 1533, '위드': 1534, '잇다': 1535, '애니스톤': 1536, '런가': 1537, '실망하다': 1538, '맛깔': 1539, '시베리아': 1540, '거기': 1541, '훈련': 1542, 'ost': 1543, '작살': 1544, '질왜': 1545, '제목': 1546, '야경': 1547, '꾼': 1548, '지금': 1549, '10회': 1550, '즉': 1551, '달이': 1552, '앞': 1553, '누군가': 1554, '얘기': 1555, '100': 1556, '훅': 1557, '지루함': 1558, '지존': 1559, '수록': 1560, '슬픔': 1561, '성도': 1562, '아역': 1563, '추노': 1564, '민폐': 1565, '싸하다': 1566, '저급': 1567, '딱하다': 1568, '킁': 1569, '그로': 1570, '인하다': 1571, '즐거움': 1572}\n"
],
[
"len(tokenizer.word_index)",
"_____no_output_____"
],
[
"X_train = tokenizer.texts_to_sequences(x_train)",
"_____no_output_____"
],
[
"print(X_train[3:5])",
"[[523, 146, 524, 4, 188, 80, 6, 4, 44, 525], [526, 527, 38, 528, 15, 292, 2, 16, 529, 28, 530, 118, 531, 532, 293, 189, 118]]\n"
],
[
"hist = list()\nfor sent in X_train:\n hist.append(len(sent))\n\nprint(hist)",
"[6, 13, 8, 10, 17, 22, 6, 38, 7, 16, 8, 16, 15, 20, 21, 7, 24, 12, 17, 15, 13, 5, 43, 9, 5, 15, 6, 3, 1, 16, 21, 10, 6, 9, 6, 8, 8, 10, 7, 7, 4, 29, 6, 14, 30, 8, 45, 19, 5, 57, 17, 15, 13, 16, 1, 2, 1, 32, 3, 4, 7, 9, 10, 28, 14, 44, 27, 13, 10, 10, 8, 33, 1, 7, 4, 14, 7, 3, 27, 4, 14, 3, 18, 8, 4, 5, 3, 4, 6, 11, 7, 8, 2, 9, 5, 9, 4, 6, 6, 11, 14, 9, 9, 9, 5, 13, 1, 3, 13, 11, 11, 13, 7, 1, 16, 3, 6, 5, 10, 4, 33, 18, 8, 5, 3, 7, 8, 18, 41, 11, 3, 5, 4, 10, 11, 10, 11, 7, 14, 15, 1, 15, 46, 3, 4, 5, 40, 22, 13, 43, 10, 1, 11, 4, 3, 4, 24, 11, 6, 2, 6, 13, 6, 38, 56, 16, 4, 13, 10, 2, 30, 45, 6, 6, 4, 18, 6, 4, 14, 9, 38, 9, 34, 5, 14, 1, 4, 8, 27, 6, 4, 16, 11, 20, 20, 14, 5, 20, 16, 27, 9, 11, 6, 48, 17, 8, 13, 3, 13, 7, 4, 7, 7, 13, 4, 11, 13, 11, 12, 10, 28, 8, 18, 7, 5, 13, 8, 7, 25, 9, 37, 9, 55, 48, 8, 6, 10, 17, 9, 8, 10, 10, 16, 12, 19, 7, 10, 6, 5, 8, 15, 3, 7, 2, 12, 8, 8, 5, 15, 6, 24, 5, 19, 16, 6, 6, 30, 12, 9, 6, 2, 10, 5, 12, 8, 17, 19, 3, 1, 11, 32, 11, 14, 35, 7, 9, 20, 2, 21, 11, 46, 4, 10, 35, 5, 2, 23, 8, 5, 16]\n"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"plt.hist(hist)",
"_____no_output_____"
],
[
"X_train = tf.keras.preprocessing.sequence.pad_sequences(X_train, maxlen=50)\nX_train[3:5]",
"_____no_output_____"
],
[
"Y_train.shape, X_train.shape",
"_____no_output_____"
],
[
"model = tf.keras.models.Sequential()",
"_____no_output_____"
],
[
"import numpy as np\nnp.unique(Y_train)",
"_____no_output_____"
],
[
"model.add(tf.keras.layers.Embedding(1572, 50)) # input layer\nmodel.add(tf.keras.layers.LSTM(64, activation='tanh')) # hidden layer\nmodel.add(tf.keras.layers.Dense(1, activation='sigmoid')) # output layer\nmodel.compile(optimizer='adam', loss='binary_crossentropy') # gadget",
"_____no_output_____"
],
[
"hist = model.fit(X_train, Y_train, epochs=50, batch_size=8)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21865ee1143f164b3c31640a851b55cdca167f
| 92,817 |
ipynb
|
Jupyter Notebook
|
docs/allcools/cell_level/step_by_step/100kb/01-CellBasicFiltering.ipynb
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 5 |
2019-07-16T17:27:15.000Z
|
2022-01-14T19:12:27.000Z
|
docs/allcools/cell_level/step_by_step/100kb/01-CellBasicFiltering.ipynb
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 12 |
2019-10-17T19:34:43.000Z
|
2022-03-23T16:04:18.000Z
|
docs/allcools/cell_level/step_by_step/100kb/01-CellBasicFiltering.ipynb
|
mukamel-lab/ALLCools
|
756ef790665c6ce40633873211929ea92bcccc21
|
[
"MIT"
] | 4 |
2019-10-18T23:43:48.000Z
|
2022-02-12T04:12:26.000Z
| 95.885331 | 14,384 | 0.807384 |
[
[
[
"# Cell Basic Filtering\n\n## Content\nThe purpose of this step is to get rid of cells having **obvious** issues, including the cells with low mapping rate (potentially contaminated), low final reads (empty well or lost a large amount of DNA during library prep.), or abnormal methylation fractions (failed in bisulfite conversion or contaminated).\n\nWe have two principles when applying these filters:\n1. **We set the cutoff based on the distribution of the whole dataset**, where we assume the input dataset is largely successful (mostly > 80-90% cells will pass QC). The cutoffs below are typical values we used in brain methylome analysis. Still, you may need to adjust cutoffs based on different data quality or sample source.\n2. **The cutoff is intended to be loose.** We do not use stringent cutoffs here to prevent potential data loss. Abormal cells may remain after basic filtering, and will likely be identified in the analysis based filtering (see later notebooks about doublet score and outliers in clustering)\n\n## Input\n- Cell metadata table that contains mapping metric for basic QC filtering.\n\n## Output\n- Filtered cell metadata table that contains only cells passed QC.\n\n## About Cell Mapping Metrics\nWe usually gather many mapping metrics from each processing step, but not all of the metrics are relevant to the cell filtering. Below are the most relevant metrics that we use to filter cells. The name of these metrics might be different in your dataset. Change it according to the file you have.\n\nIf you use [YAP](https://hq-1.gitbook.io/mc) to do mapping, you can find up-to-date mapping metrics documentation for [key metrics](https://hq-1.gitbook.io/mc/mapping-metrics/key-mapping-metrics) and [all metrics](https://hq-1.gitbook.io/mc/mapping-metrics/all-mapping-metrics) in YAP doc.",
"_____no_output_____"
],
[
"## Import",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns",
"_____no_output_____"
],
[
"sns.set_context(context='notebook', font_scale=1.3)",
"_____no_output_____"
]
],
[
[
"## Parameters",
"_____no_output_____"
]
],
[
[
"# change this to the path to your metadata\nmetadata_path = '../../../data/Brain/snmC-seq2/HIP.CellMetadata.csv.gz'\n\n# Basic filtering parameters\nmapping_rate_cutoff = 0.5\nmapping_rate_col_name = 'MappingRate' # Name may change\nfinal_reads_cutoff = 500000\nfinal_reads_col_name = 'FinalReads' # Name may change\nmccc_cutoff = 0.03\nmccc_col_name = 'mCCCFrac' # Name may change\nmch_cutoff = 0.2\nmch_col_name = 'mCHFrac' # Name may change\nmcg_cutoff = 0.5\nmcg_col_name = 'mCGFrac' # Name may change",
"_____no_output_____"
]
],
[
[
"## Load metadata",
"_____no_output_____"
]
],
[
[
"metadata = pd.read_csv(metadata_path, index_col=0)\ntotal_cells = metadata.shape[0]\nprint(f'Metadata of {total_cells} cells')",
"Metadata of 16985 cells\n"
],
[
"metadata.head()",
"_____no_output_____"
]
],
[
[
"## Filter by key mapping metrics",
"_____no_output_____"
],
[
"### Bismark Mapping Rate\n\n- Low mapping rate indicates potential contamination.\n- Usually R1 mapping rate is 8-10% higher than R2 mapping rate for snmC based technologies, but they should be highly correlated. Here I am using the combined mapping rate. If you are using the R1MappingRate or R2MappingRate, change the cutoff accordingly.\n- Usually there is a peak on the left, which corresponding to the empty wells.",
"_____no_output_____"
]
],
[
[
"_cutoff = mapping_rate_cutoff\n_col_name = mapping_rate_col_name\n\n# plot distribution to make sure cutoff is appropriate\ng = sns.displot(metadata[_col_name], binrange=(0, 1))\ng.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--')\n\nmapping_rate_judge = metadata[_col_name] > _cutoff\n_passed_cells = mapping_rate_judge.sum()\nprint(\n f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) '\n f'passed the {_col_name} cutoff {_cutoff}.')",
"16985 / 16985 cells (100.0%) passed the MappingRate cutoff 0.5.\n"
]
],
[
[
"### Final Reads\n\n- The cutoff may change depending on how deep the library has been sequenced.\n- Usually there is a peak on the left, which corresponding to the empty wells.\n- There are also some cells having small number of reads, these wells may lost most of the DNA during library prep. Cells having too less reads can be hard to classify, since the methylome sequencing is an untargeted whole-genome sequencing.",
"_____no_output_____"
]
],
[
[
"_cutoff = final_reads_cutoff\n_col_name = final_reads_col_name\n\n# plot distribution to make sure cutoff is appropriate\ng = sns.displot(metadata[_col_name], binrange=(0, 5e6))\ng.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--')\n\nfinal_reads_judge = metadata[_col_name] > _cutoff\n_passed_cells = final_reads_judge.sum()\nprint(\n f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) '\n f'passed the {_col_name} cutoff {_cutoff}.')",
"16985 / 16985 cells (100.0%) passed the FinalReads cutoff 500000.\n"
]
],
[
[
"### mCCC / CCC\n\n- The mCCC fraction is used as the proxy of the upper bound of the non-conversion rate for cell-level QC. The methylation level at CCC sites is the lowest among all of the different 3 base-contexts (CNN), and, in fact, it is very close to the unmethylated lambda mC fraction.\n- However, mCCC fraction is correlated with mCH (especially in brain data), so you can see a similar shape of distribution of mCCC and mCH, but the range is different.",
"_____no_output_____"
]
],
[
[
"_cutoff = mccc_cutoff\n_col_name = mccc_col_name\n\n# plot distribution to make sure cutoff is appropriate\ng = sns.displot(metadata[_col_name], binrange=(0, 0.05))\ng.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--')\n\nmccc_judge = metadata[_col_name] < _cutoff\n_passed_cells = mccc_judge.sum()\nprint(\n f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) '\n f'passed the {_col_name} cutoff {_cutoff}.')",
"16985 / 16985 cells (100.0%) passed the mCCCFrac cutoff 0.03.\n"
]
],
[
[
"### mCH / CH\n\n- Usually failed cells (empty well or contaminated) tend to have abormal methylation level as well.",
"_____no_output_____"
]
],
[
[
"_cutoff = mch_cutoff\n_col_name = mch_col_name\n\n# plot distribution to make sure cutoff is appropriate\ng = sns.displot(metadata[_col_name], binrange=(0, 0.3))\ng.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--')\n\nmch_judge = metadata[_col_name] < _cutoff\n_passed_cells = mch_judge.sum()\nprint(\n f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) '\n f'passed the {_col_name} cutoff {_cutoff}.')",
"16985 / 16985 cells (100.0%) passed the mCHFrac cutoff 0.2.\n"
]
],
[
[
"### mCG\n\n- Usually failed cells (empty well or contaminated) tend to have abormal methylation level as well.",
"_____no_output_____"
]
],
[
[
"_cutoff = mcg_cutoff\n_col_name = mcg_col_name\n\n# plot distribution to make sure cutoff is appropriate\ng = sns.displot(metadata[_col_name], binrange=(0.3, 1))\ng.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--')\n\nmcg_judge = metadata[_col_name] > _cutoff\n_passed_cells = mcg_judge.sum()\nprint(\n f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) '\n f'passed the {_col_name} cutoff {_cutoff}.')",
"16985 / 16985 cells (100.0%) passed the mCGFrac cutoff 0.5.\n"
]
],
[
[
"## Combine filters",
"_____no_output_____"
]
],
[
[
"judge = mapping_rate_judge & final_reads_judge & mccc_judge & mch_judge & mcg_judge\npassed_cells = judge.sum()\nprint(\n f'{passed_cells} / {total_cells} cells ({passed_cells / total_cells * 100:.1f}%) '\n f'passed all the filters.')",
"16985 / 16985 cells (100.0%) passed all the filters.\n"
]
],
[
[
"## Sanity Test",
"_____no_output_____"
]
],
[
[
"try:\n assert (passed_cells / total_cells) > 0.6\nexcept AssertionError as e:\n e.args += (\n 'A large amount of the cells do not pass filter, check your cutoffs or overall dataset quality.',\n )\n raise e\n\ntry:\n assert passed_cells > 0\nexcept AssertionError as e:\n e.args += ('No cell remained after all the filters.', )\n raise e\n\nprint('Feel good')",
"Feel good\n"
]
],
[
[
"## Save filtered metadata",
"_____no_output_____"
]
],
[
[
"metadata_filtered = metadata[judge].copy()\n\nmetadata_filtered.to_csv('CellMetadata.PassQC.csv.gz')",
"_____no_output_____"
],
[
"metadata_filtered.head()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a218ed25ff06ee86e6e5466969b83a3714b655f
| 55,160 |
ipynb
|
Jupyter Notebook
|
Train_on_GPU_HIRA.ipynb
|
Bandolu/Hogen
|
4b148aec242a82e879e83824850a5e15e060b071
|
[
"Apache-2.0"
] | null | null | null |
Train_on_GPU_HIRA.ipynb
|
Bandolu/Hogen
|
4b148aec242a82e879e83824850a5e15e060b071
|
[
"Apache-2.0"
] | null | null | null |
Train_on_GPU_HIRA.ipynb
|
Bandolu/Hogen
|
4b148aec242a82e879e83824850a5e15e060b071
|
[
"Apache-2.0"
] | null | null | null | 50.651974 | 218 | 0.547788 |
[
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
]
],
[
[
"\r\n`cd /content/drive/My\\ Drive/Transformer-master/` -> `cd /content/drive/My\\ Drive/Colab\\ Notebooks/Transformer`",
"_____no_output_____"
]
],
[
[
"cd /content/drive/My\\ Drive/Colab\\ Notebooks/Transformer",
"/content/drive/My Drive/Colab Notebooks/Transformer\n"
]
],
[
[
"# ライブラリ読み込み",
"_____no_output_____"
]
],
[
[
"!apt install aptitude\n!aptitude install mecab libmecab-dev mecab-ipadic-utf8 git make curl xz-utils file -y\n!pip install mecab-python3==0.6",
"Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\nThe following additional packages will be installed:\n aptitude-common libcgi-fast-perl libcgi-pm-perl libclass-accessor-perl\n libcwidget3v5 libencode-locale-perl libfcgi-perl libhtml-parser-perl\n libhtml-tagset-perl libhttp-date-perl libhttp-message-perl libio-html-perl\n libio-string-perl liblwp-mediatypes-perl libparse-debianchangelog-perl\n libsigc++-2.0-0v5 libsub-name-perl libtimedate-perl liburi-perl libxapian30\nSuggested packages:\n aptitude-doc-en | aptitude-doc apt-xapian-index debtags tasksel\n libcwidget-dev libdata-dump-perl libhtml-template-perl libxml-simple-perl\n libwww-perl xapian-tools\nThe following NEW packages will be installed:\n aptitude aptitude-common libcgi-fast-perl libcgi-pm-perl\n libclass-accessor-perl libcwidget3v5 libencode-locale-perl libfcgi-perl\n libhtml-parser-perl libhtml-tagset-perl libhttp-date-perl\n libhttp-message-perl libio-html-perl libio-string-perl\n liblwp-mediatypes-perl libparse-debianchangelog-perl libsigc++-2.0-0v5\n libsub-name-perl libtimedate-perl liburi-perl libxapian30\n0 upgraded, 21 newly installed, 0 to remove and 13 not upgraded.\nNeed to get 3,877 kB of archives.\nAfter this operation, 15.6 MB of additional disk space will be used.\nGet:1 http://archive.ubuntu.com/ubuntu bionic/main amd64 aptitude-common all 0.8.10-6ubuntu1 [1,014 kB]\nGet:2 http://archive.ubuntu.com/ubuntu bionic/main amd64 libsigc++-2.0-0v5 amd64 2.10.0-2 [10.9 kB]\nGet:3 http://archive.ubuntu.com/ubuntu bionic/main amd64 libcwidget3v5 amd64 0.5.17-7 [286 kB]\nGet:4 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libxapian30 amd64 1.4.5-1ubuntu0.1 [631 kB]\nGet:5 http://archive.ubuntu.com/ubuntu bionic/main amd64 aptitude amd64 0.8.10-6ubuntu1 [1,269 kB]\nGet:6 http://archive.ubuntu.com/ubuntu bionic/main amd64 libhtml-tagset-perl all 3.20-3 [12.1 kB]\nGet:7 http://archive.ubuntu.com/ubuntu bionic/main amd64 liburi-perl all 1.73-1 [77.2 kB]\nGet:8 http://archive.ubuntu.com/ubuntu bionic/main amd64 libhtml-parser-perl amd64 3.72-3build1 [85.9 kB]\nGet:9 http://archive.ubuntu.com/ubuntu bionic/main amd64 libcgi-pm-perl all 4.38-1 [185 kB]\nGet:10 http://archive.ubuntu.com/ubuntu bionic/main amd64 libfcgi-perl amd64 0.78-2build1 [32.8 kB]\nGet:11 http://archive.ubuntu.com/ubuntu bionic/main amd64 libcgi-fast-perl all 1:2.13-1 [9,940 B]\nGet:12 http://archive.ubuntu.com/ubuntu bionic/main amd64 libsub-name-perl amd64 0.21-1build1 [11.6 kB]\nGet:13 http://archive.ubuntu.com/ubuntu bionic/main amd64 libclass-accessor-perl all 0.51-1 [21.2 kB]\nGet:14 http://archive.ubuntu.com/ubuntu bionic/main amd64 libencode-locale-perl all 1.05-1 [12.3 kB]\nGet:15 http://archive.ubuntu.com/ubuntu bionic/main amd64 libtimedate-perl all 2.3000-2 [37.5 kB]\nGet:16 http://archive.ubuntu.com/ubuntu bionic/main amd64 libhttp-date-perl all 6.02-1 [10.4 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic/main amd64 libio-html-perl all 1.001-1 [14.9 kB]\nGet:18 http://archive.ubuntu.com/ubuntu bionic/main amd64 liblwp-mediatypes-perl all 6.02-1 [21.7 kB]\nGet:19 http://archive.ubuntu.com/ubuntu bionic/main amd64 libhttp-message-perl all 6.14-1 [72.1 kB]\nGet:20 http://archive.ubuntu.com/ubuntu bionic/main amd64 libio-string-perl all 1.08-3 [11.1 kB]\nGet:21 http://archive.ubuntu.com/ubuntu bionic/main amd64 libparse-debianchangelog-perl all 1.2.0-12 [49.5 kB]\nFetched 3,877 kB in 2s (2,203 kB/s)\nSelecting previously unselected package aptitude-common.\n(Reading database ... 146374 files and directories currently installed.)\nPreparing to unpack .../00-aptitude-common_0.8.10-6ubuntu1_all.deb ...\nUnpacking aptitude-common (0.8.10-6ubuntu1) ...\nSelecting previously unselected package libsigc++-2.0-0v5:amd64.\nPreparing to unpack .../01-libsigc++-2.0-0v5_2.10.0-2_amd64.deb ...\nUnpacking libsigc++-2.0-0v5:amd64 (2.10.0-2) ...\nSelecting previously unselected package libcwidget3v5:amd64.\nPreparing to unpack .../02-libcwidget3v5_0.5.17-7_amd64.deb ...\nUnpacking libcwidget3v5:amd64 (0.5.17-7) ...\nSelecting previously unselected package libxapian30:amd64.\nPreparing to unpack .../03-libxapian30_1.4.5-1ubuntu0.1_amd64.deb ...\nUnpacking libxapian30:amd64 (1.4.5-1ubuntu0.1) ...\nSelecting previously unselected package aptitude.\nPreparing to unpack .../04-aptitude_0.8.10-6ubuntu1_amd64.deb ...\nUnpacking aptitude (0.8.10-6ubuntu1) ...\nSelecting previously unselected package libhtml-tagset-perl.\nPreparing to unpack .../05-libhtml-tagset-perl_3.20-3_all.deb ...\nUnpacking libhtml-tagset-perl (3.20-3) ...\nSelecting previously unselected package liburi-perl.\nPreparing to unpack .../06-liburi-perl_1.73-1_all.deb ...\nUnpacking liburi-perl (1.73-1) ...\nSelecting previously unselected package libhtml-parser-perl.\nPreparing to unpack .../07-libhtml-parser-perl_3.72-3build1_amd64.deb ...\nUnpacking libhtml-parser-perl (3.72-3build1) ...\nSelecting previously unselected package libcgi-pm-perl.\nPreparing to unpack .../08-libcgi-pm-perl_4.38-1_all.deb ...\nUnpacking libcgi-pm-perl (4.38-1) ...\nSelecting previously unselected package libfcgi-perl.\nPreparing to unpack .../09-libfcgi-perl_0.78-2build1_amd64.deb ...\nUnpacking libfcgi-perl (0.78-2build1) ...\nSelecting previously unselected package libcgi-fast-perl.\nPreparing to unpack .../10-libcgi-fast-perl_1%3a2.13-1_all.deb ...\nUnpacking libcgi-fast-perl (1:2.13-1) ...\nSelecting previously unselected package libsub-name-perl.\nPreparing to unpack .../11-libsub-name-perl_0.21-1build1_amd64.deb ...\nUnpacking libsub-name-perl (0.21-1build1) ...\nSelecting previously unselected package libclass-accessor-perl.\nPreparing to unpack .../12-libclass-accessor-perl_0.51-1_all.deb ...\nUnpacking libclass-accessor-perl (0.51-1) ...\nSelecting previously unselected package libencode-locale-perl.\nPreparing to unpack .../13-libencode-locale-perl_1.05-1_all.deb ...\nUnpacking libencode-locale-perl (1.05-1) ...\nSelecting previously unselected package libtimedate-perl.\nPreparing to unpack .../14-libtimedate-perl_2.3000-2_all.deb ...\nUnpacking libtimedate-perl (2.3000-2) ...\nSelecting previously unselected package libhttp-date-perl.\nPreparing to unpack .../15-libhttp-date-perl_6.02-1_all.deb ...\nUnpacking libhttp-date-perl (6.02-1) ...\nSelecting previously unselected package libio-html-perl.\nPreparing to unpack .../16-libio-html-perl_1.001-1_all.deb ...\nUnpacking libio-html-perl (1.001-1) ...\nSelecting previously unselected package liblwp-mediatypes-perl.\nPreparing to unpack .../17-liblwp-mediatypes-perl_6.02-1_all.deb ...\nUnpacking liblwp-mediatypes-perl (6.02-1) ...\nSelecting previously unselected package libhttp-message-perl.\nPreparing to unpack .../18-libhttp-message-perl_6.14-1_all.deb ...\nUnpacking libhttp-message-perl (6.14-1) ...\nSelecting previously unselected package libio-string-perl.\nPreparing to unpack .../19-libio-string-perl_1.08-3_all.deb ...\nUnpacking libio-string-perl (1.08-3) ...\nSelecting previously unselected package libparse-debianchangelog-perl.\nPreparing to unpack .../20-libparse-debianchangelog-perl_1.2.0-12_all.deb ...\nUnpacking libparse-debianchangelog-perl (1.2.0-12) ...\nSetting up libhtml-tagset-perl (3.20-3) ...\nSetting up libxapian30:amd64 (1.4.5-1ubuntu0.1) ...\nSetting up libencode-locale-perl (1.05-1) ...\nSetting up libtimedate-perl (2.3000-2) ...\nSetting up libio-html-perl (1.001-1) ...\nSetting up aptitude-common (0.8.10-6ubuntu1) ...\nSetting up liblwp-mediatypes-perl (6.02-1) ...\nSetting up liburi-perl (1.73-1) ...\nSetting up libhtml-parser-perl (3.72-3build1) ...\nSetting up libcgi-pm-perl (4.38-1) ...\nSetting up libio-string-perl (1.08-3) ...\nSetting up libsub-name-perl (0.21-1build1) ...\nSetting up libfcgi-perl (0.78-2build1) ...\nSetting up libsigc++-2.0-0v5:amd64 (2.10.0-2) ...\nSetting up libclass-accessor-perl (0.51-1) ...\nSetting up libhttp-date-perl (6.02-1) ...\nSetting up libcgi-fast-perl (1:2.13-1) ...\nSetting up libparse-debianchangelog-perl (1.2.0-12) ...\nSetting up libhttp-message-perl (6.14-1) ...\nSetting up libcwidget3v5:amd64 (0.5.17-7) ...\nSetting up aptitude (0.8.10-6ubuntu1) ...\nupdate-alternatives: using /usr/bin/aptitude-curses to provide /usr/bin/aptitude (aptitude) in auto mode\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.3) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\ngit is already installed at the requested version (1:2.17.1-1ubuntu0.7)\nmake is already installed at the requested version (4.1-9.1ubuntu1)\ncurl is already installed at the requested version (7.58.0-2ubuntu3.12)\nxz-utils is already installed at the requested version (5.2.2-1.3)\ngit is already installed at the requested version (1:2.17.1-1ubuntu0.7)\nmake is already installed at the requested version (4.1-9.1ubuntu1)\ncurl is already installed at the requested version (7.58.0-2ubuntu3.12)\nxz-utils is already installed at the requested version (5.2.2-1.3)\nThe following NEW packages will be installed:\n file libmagic-mgc{a} libmagic1{a} libmecab-dev libmecab2{a} mecab mecab-ipadic{a} mecab-ipadic-utf8 mecab-jumandic{a} mecab-jumandic-utf8{a} mecab-utils{a} \n0 packages upgraded, 11 newly installed, 0 to remove and 13 not upgraded.\nNeed to get 29.3 MB of archives. After unpacking 282 MB will be used.\nGet: 1 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmagic-mgc amd64 1:5.32-2ubuntu0.4 [184 kB]\nGet: 2 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 libmagic1 amd64 1:5.32-2ubuntu0.4 [68.6 kB]\nGet: 3 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 file amd64 1:5.32-2ubuntu0.4 [22.1 kB]\nGet: 4 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libmecab2 amd64 0.996-5 [257 kB]\nGet: 5 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libmecab-dev amd64 0.996-5 [308 kB]\nGet: 6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab-utils amd64 0.996-5 [4,856 B]\nGet: 7 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab-jumandic-utf8 all 7.0-20130310-4 [16.2 MB]\nGet: 8 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab-jumandic all 7.0-20130310-4 [2,212 B]\nGet: 9 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab-ipadic all 2.7.0-20070801+main-1 [12.1 MB]\nGet: 10 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab amd64 0.996-5 [132 kB]\nGet: 11 http://archive.ubuntu.com/ubuntu bionic/universe amd64 mecab-ipadic-utf8 all 2.7.0-20070801+main-1 [3,522 B]\nFetched 29.3 MB in 3s (10.9 MB/s)\nSelecting previously unselected package libmagic-mgc.\n(Reading database ... 146833 files and directories currently installed.)\nPreparing to unpack .../00-libmagic-mgc_1%3a5.32-2ubuntu0.4_amd64.deb ...\nUnpacking libmagic-mgc (1:5.32-2ubuntu0.4) ...\nSelecting previously unselected package libmagic1:amd64.\nPreparing to unpack .../01-libmagic1_1%3a5.32-2ubuntu0.4_amd64.deb ...\nUnpacking libmagic1:amd64 (1:5.32-2ubuntu0.4) ...\nSelecting previously unselected package file.\nPreparing to unpack .../02-file_1%3a5.32-2ubuntu0.4_amd64.deb ...\nUnpacking file (1:5.32-2ubuntu0.4) ...\nSelecting previously unselected package libmecab2:amd64.\nPreparing to unpack .../03-libmecab2_0.996-5_amd64.deb ...\nUnpacking libmecab2:amd64 (0.996-5) ...\nSelecting previously unselected package libmecab-dev.\nPreparing to unpack .../04-libmecab-dev_0.996-5_amd64.deb ...\nUnpacking libmecab-dev (0.996-5) ...\nSelecting previously unselected package mecab-utils.\nPreparing to unpack .../05-mecab-utils_0.996-5_amd64.deb ...\nUnpacking mecab-utils (0.996-5) ...\nSelecting previously unselected package mecab-jumandic-utf8.\nPreparing to unpack .../06-mecab-jumandic-utf8_7.0-20130310-4_all.deb ...\nUnpacking mecab-jumandic-utf8 (7.0-20130310-4) ...\nSelecting previously unselected package mecab-jumandic.\nPreparing to unpack .../07-mecab-jumandic_7.0-20130310-4_all.deb ...\nUnpacking mecab-jumandic (7.0-20130310-4) ...\nSelecting previously unselected package mecab-ipadic.\nPreparing to unpack .../08-mecab-ipadic_2.7.0-20070801+main-1_all.deb ...\nUnpacking mecab-ipadic (2.7.0-20070801+main-1) ...\nSelecting previously unselected package mecab.\nPreparing to unpack .../09-mecab_0.996-5_amd64.deb ...\nUnpacking mecab (0.996-5) ...\nSelecting previously unselected package mecab-ipadic-utf8.\nPreparing to unpack .../10-mecab-ipadic-utf8_2.7.0-20070801+main-1_all.deb ...\nUnpacking mecab-ipadic-utf8 (2.7.0-20070801+main-1) ...\nSetting up libmecab2:amd64 (0.996-5) ...\nSetting up libmagic-mgc (1:5.32-2ubuntu0.4) ...\nSetting up libmagic1:amd64 (1:5.32-2ubuntu0.4) ...\nSetting up mecab-utils (0.996-5) ...\nSetting up mecab-ipadic (2.7.0-20070801+main-1) ...\nCompiling IPA dictionary for Mecab. This takes long time...\nreading /usr/share/mecab/dic/ipadic/unk.def ... 40\nemitting double-array: 100% |###########################################| \n/usr/share/mecab/dic/ipadic/model.def is not found. skipped.\nreading /usr/share/mecab/dic/ipadic/Noun.verbal.csv ... 12146\nreading /usr/share/mecab/dic/ipadic/Suffix.csv ... 1393\nreading /usr/share/mecab/dic/ipadic/Noun.nai.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Adverb.csv ... 3032\nreading /usr/share/mecab/dic/ipadic/Noun.adverbal.csv ... 795\nreading /usr/share/mecab/dic/ipadic/Symbol.csv ... 208\nreading /usr/share/mecab/dic/ipadic/Adnominal.csv ... 135\nreading /usr/share/mecab/dic/ipadic/Noun.others.csv ... 151\nreading /usr/share/mecab/dic/ipadic/Postp-col.csv ... 91\nreading /usr/share/mecab/dic/ipadic/Prefix.csv ... 221\nreading /usr/share/mecab/dic/ipadic/Noun.adjv.csv ... 3328\nreading /usr/share/mecab/dic/ipadic/Interjection.csv ... 252\nreading /usr/share/mecab/dic/ipadic/Noun.place.csv ... 72999\nreading /usr/share/mecab/dic/ipadic/Noun.number.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Noun.org.csv ... 16668\nreading /usr/share/mecab/dic/ipadic/Filler.csv ... 19\nreading /usr/share/mecab/dic/ipadic/Noun.csv ... 60477\nreading /usr/share/mecab/dic/ipadic/Noun.name.csv ... 34202\nreading /usr/share/mecab/dic/ipadic/Others.csv ... 2\nreading /usr/share/mecab/dic/ipadic/Noun.demonst.csv ... 120\nreading /usr/share/mecab/dic/ipadic/Noun.proper.csv ... 27327\nreading /usr/share/mecab/dic/ipadic/Verb.csv ... 130750\nreading /usr/share/mecab/dic/ipadic/Auxil.csv ... 199\nreading /usr/share/mecab/dic/ipadic/Adj.csv ... 27210\nreading /usr/share/mecab/dic/ipadic/Postp.csv ... 146\nreading /usr/share/mecab/dic/ipadic/Conjunction.csv ... 171\nemitting double-array: 100% |###########################################| \nreading /usr/share/mecab/dic/ipadic/matrix.def ... 1316x1316\nemitting matrix : 100% |###########################################| \n\ndone!\nupdate-alternatives: using /var/lib/mecab/dic/ipadic to provide /var/lib/mecab/dic/debian (mecab-dictionary) in auto mode\nSetting up libmecab-dev (0.996-5) ...\nSetting up file (1:5.32-2ubuntu0.4) ...\nSetting up mecab-jumandic-utf8 (7.0-20130310-4) ...\nCompiling Juman dictionary for Mecab.\nreading /usr/share/mecab/dic/juman/unk.def ... 37\nemitting double-array: 100% |###########################################| \nreading /usr/share/mecab/dic/juman/Assert.csv ... 34\nreading /usr/share/mecab/dic/juman/ContentW.csv ... 551145\nreading /usr/share/mecab/dic/juman/Noun.koyuu.csv ... 7964\nreading /usr/share/mecab/dic/juman/Suffix.csv ... 2128\nreading /usr/share/mecab/dic/juman/Noun.keishiki.csv ... 8\nreading /usr/share/mecab/dic/juman/Wikipedia.csv ... 167709\nreading /usr/share/mecab/dic/juman/Prefix.csv ... 90\nreading /usr/share/mecab/dic/juman/Noun.hukusi.csv ... 81\nreading /usr/share/mecab/dic/juman/Emoticon.csv ... 972\nreading /usr/share/mecab/dic/juman/Demonstrative.csv ... 97\nreading /usr/share/mecab/dic/juman/Special.csv ... 158\nreading /usr/share/mecab/dic/juman/Auto.csv ... 18931\nreading /usr/share/mecab/dic/juman/Postp.csv ... 108\nreading /usr/share/mecab/dic/juman/Noun.suusi.csv ... 49\nreading /usr/share/mecab/dic/juman/Rengo.csv ... 1118\nreading /usr/share/mecab/dic/juman/AuxV.csv ... 593\nemitting double-array: 100% |###########################################| \nreading /usr/share/mecab/dic/juman/matrix.def ... 1876x1876\nemitting matrix : 100% |###########################################| \n\ndone!\nSetting up mecab-ipadic-utf8 (2.7.0-20070801+main-1) ...\nCompiling IPA dictionary for Mecab. This takes long time...\nreading /usr/share/mecab/dic/ipadic/unk.def ... 40\nemitting double-array: 100% |###########################################| \n/usr/share/mecab/dic/ipadic/model.def is not found. skipped.\nreading /usr/share/mecab/dic/ipadic/Noun.verbal.csv ... 12146\nreading /usr/share/mecab/dic/ipadic/Suffix.csv ... 1393\nreading /usr/share/mecab/dic/ipadic/Noun.nai.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Adverb.csv ... 3032\nreading /usr/share/mecab/dic/ipadic/Noun.adverbal.csv ... 795\nreading /usr/share/mecab/dic/ipadic/Symbol.csv ... 208\nreading /usr/share/mecab/dic/ipadic/Adnominal.csv ... 135\nreading /usr/share/mecab/dic/ipadic/Noun.others.csv ... 151\nreading /usr/share/mecab/dic/ipadic/Postp-col.csv ... 91\nreading /usr/share/mecab/dic/ipadic/Prefix.csv ... 221\nreading /usr/share/mecab/dic/ipadic/Noun.adjv.csv ... 3328\nreading /usr/share/mecab/dic/ipadic/Interjection.csv ... 252\nreading /usr/share/mecab/dic/ipadic/Noun.place.csv ... 72999\nreading /usr/share/mecab/dic/ipadic/Noun.number.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Noun.org.csv ... 16668\nreading /usr/share/mecab/dic/ipadic/Filler.csv ... 19\nreading /usr/share/mecab/dic/ipadic/Noun.csv ... 60477\nreading /usr/share/mecab/dic/ipadic/Noun.name.csv ... 34202\nreading /usr/share/mecab/dic/ipadic/Others.csv ... 2\nreading /usr/share/mecab/dic/ipadic/Noun.demonst.csv ... 120\nreading /usr/share/mecab/dic/ipadic/Noun.proper.csv ... 27327\nreading /usr/share/mecab/dic/ipadic/Verb.csv ... 130750\nreading /usr/share/mecab/dic/ipadic/Auxil.csv ... 199\nreading /usr/share/mecab/dic/ipadic/Adj.csv ... 27210\nreading /usr/share/mecab/dic/ipadic/Postp.csv ... 146\nreading /usr/share/mecab/dic/ipadic/Conjunction.csv ... 171\nemitting double-array: 100% |###########################################| \nreading /usr/share/mecab/dic/ipadic/matrix.def ... 1316x1316\nemitting matrix : 100% |###########################################| \n\ndone!\nupdate-alternatives: using /var/lib/mecab/dic/ipadic-utf8 to provide /var/lib/mecab/dic/debian (mecab-dictionary) in auto mode\nSetting up mecab (0.996-5) ...\nCompiling IPA dictionary for Mecab. This takes long time...\nreading /usr/share/mecab/dic/ipadic/unk.def ... 40\nemitting double-array: 100% |###########################################| \n/usr/share/mecab/dic/ipadic/model.def is not found. skipped.\nreading /usr/share/mecab/dic/ipadic/Noun.verbal.csv ... 12146\nreading /usr/share/mecab/dic/ipadic/Suffix.csv ... 1393\nreading /usr/share/mecab/dic/ipadic/Noun.nai.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Adverb.csv ... 3032\nreading /usr/share/mecab/dic/ipadic/Noun.adverbal.csv ... 795\nreading /usr/share/mecab/dic/ipadic/Symbol.csv ... 208\nreading /usr/share/mecab/dic/ipadic/Adnominal.csv ... 135\nreading /usr/share/mecab/dic/ipadic/Noun.others.csv ... 151\nreading /usr/share/mecab/dic/ipadic/Postp-col.csv ... 91\nreading /usr/share/mecab/dic/ipadic/Prefix.csv ... 221\nreading /usr/share/mecab/dic/ipadic/Noun.adjv.csv ... 3328\nreading /usr/share/mecab/dic/ipadic/Interjection.csv ... 252\nreading /usr/share/mecab/dic/ipadic/Noun.place.csv ... 72999\nreading /usr/share/mecab/dic/ipadic/Noun.number.csv ... 42\nreading /usr/share/mecab/dic/ipadic/Noun.org.csv ... 16668\nreading /usr/share/mecab/dic/ipadic/Filler.csv ... 19\nreading /usr/share/mecab/dic/ipadic/Noun.csv ... 60477\nreading /usr/share/mecab/dic/ipadic/Noun.name.csv ... 34202\nreading /usr/share/mecab/dic/ipadic/Others.csv ... 2\nreading /usr/share/mecab/dic/ipadic/Noun.demonst.csv ... 120\nreading /usr/share/mecab/dic/ipadic/Noun.proper.csv ... 27327\nreading /usr/share/mecab/dic/ipadic/Verb.csv ... 130750\nreading /usr/share/mecab/dic/ipadic/Auxil.csv ... 199\nreading /usr/share/mecab/dic/ipadic/Adj.csv ... 27210\nreading /usr/share/mecab/dic/ipadic/Postp.csv ... 146\nreading /usr/share/mecab/dic/ipadic/Conjunction.csv ... 171\nemitting double-array: 100% |###########################################| \nreading /usr/share/mecab/dic/ipadic/matrix.def ... 1316x1316\nemitting matrix : 100% |###########################################| \n\ndone!\nSetting up mecab-jumandic (7.0-20130310-4) ...\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\nProcessing triggers for libc-bin (2.27-3ubuntu1.3) ...\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n\n \nCollecting mecab-python3==0.6\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e3/7f/f98371035a0171abf95f9893eabf915f8a3199d005fed3cd69cc122fed40/mecab-python3-0.6.tar.gz (41kB)\n\u001b[K |████████████████████████████████| 51kB 6.0MB/s \n\u001b[?25hBuilding wheels for collected packages: mecab-python3\n Building wheel for mecab-python3 (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for mecab-python3: filename=mecab_python3-0.6-cp36-cp36m-linux_x86_64.whl size=155487 sha256=3291240955920d96a1c3ba501a71c28cc28e991acd2a4bc70bd15a5d685db5aa\n Stored in directory: /root/.cache/pip/wheels/4d/51/5b/987888cacaf8bb25982ef4569261f68debe85b7587c5563c79\nSuccessfully built mecab-python3\nInstalling collected packages: mecab-python3\nSuccessfully installed mecab-python3-0.6\n"
],
[
"import numpy as np\nimport os\nimport time\nimport MeCab\n\nimport preprocess_utils\nimport model\nimport weight_utils\n\nimport tensorflow.keras as keras\nimport tensorflow as tf\nprint(tf.__version__)",
"2.4.1\n"
]
],
[
[
"# 日英翻訳データ ダウンロード",
"_____no_output_____"
]
],
[
[
"# !wget http://www.manythings.org/anki/jpn-eng.zip\n# !unzip ./jpn-eng.zip",
"_____no_output_____"
]
],
[
[
"# データ読み込み",
"_____no_output_____"
],
[
" corpus_path = './jpn.txt' -> corpus_path = './DATA/kesen3_ex.tsv'",
"_____no_output_____"
]
],
[
[
"dataset = preprocess_utils.CreateData(\n corpus_path = './DATA/kesenngo.tsv',\n do_shuffle=True,\n seed_value=123,\n split_percent=0.95 # 学習データの割合\n)\n\ntrain_source, train_target, test_source, test_target, train_licence, test_licence = dataset.split_data()\n\nprint('**** Amount of data ****')\nprint('train_source: ', len(train_source))\nprint('train_target: ', len(train_target))\nprint('test_source: ', len(test_source))\nprint('test_target: ', len(test_target))\nprint('\\n')\nprint('**** Train data example ****')\nprint('Source Example: ', train_source[0])\nprint('Target Example: ', train_target[0])\nprint('Licence: ', train_licence[0])\nprint('\\n')\nprint('**** Test data example ****')\nprint('Source Example: ', test_source[0])\nprint('Target Example: ', test_target[0])\nprint('Licence: ', test_licence[0])",
"**** Amount of data ****\ntrain_source: 8654\ntrain_target: 8654\ntest_source: 456\ntest_target: 456\n\n\n**** Train data example ****\nSource Example: 本家\nTarget Example: ほんけ\nLicence: 気仙沼市\n\n\n**** Test data example ****\nSource Example: 行ったなあ\nTarget Example: 行ったよねー\nLicence: 気仙沼市\n"
],
[
"import pandas as pd\r\nimport re\r\nimport codecs\r\nimport copy\r\n\r\n\r\ncorpus_path = './DATA/Kesennuma.csv'\r\ndf = pd.read_csv(corpus_path)\r\nprint('**** Amount of data ****')\r\nprint(df)\r\nprint('\\n')\r\nprint('**** Amount of data ****')\r\n#for index, row in df.iterrows():\r\n #print(row['項目名'])\r\n",
"**** Amount of data ****\n 市区町村 項目名 回答語形\n0 気仙沼市 起きない オギネー\n1 気仙沼市 来ない コネー\n2 気仙沼市 しない シネー\n3 気仙沼市 しない スネー\n4 気仙沼市 行かなかった イガナカッタ\n.. ... ... ...\n242 気仙沼市 知事:一つ仮名 [ts??z??]\n243 気仙沼市 地図:一つ仮名 [ts?z??]\n244 気仙沼市 切符(きっぷ):拍意識・文字意識 き/っ/ぷ\n245 気仙沼市 切符(きっぷ):拍意識・文字意識 きっ/ぷ\n246 気仙沼市 風船(ふうせん):拍意識・文字意識 ふ/う/せ/ん\n\n[247 rows x 3 columns]\n\n\n**** Amount of data ****\n"
]
],
[
[
"# 前処理",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 64 # バッチサイズ\nMAX_LENGTH = 60 # シーケンスの長さ\nUSE_TPU = False # TPUを使うか\nBUFFER_SIZE = 50000",
"_____no_output_____"
],
[
"train_dataset = preprocess_utils.PreprocessData(\n mecab = MeCab.Tagger(\"-Ochasen\"),\n source_data = train_source,\n target_data = train_target,\n max_length = MAX_LENGTH,\n batch_size = BATCH_SIZE,\n test_flag = False,\n train_dataset = None,\n)\n\ntrain_dataset.preprocess_data()",
"_____no_output_____"
],
[
"if USE_TPU:\n tpu_grpc_url = \"grpc://\" + os.environ[\"COLAB_TPU_ADDR\"]\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu_grpc_url)\n tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)\n tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver) \n strategy = tf.distribute.experimental.TPUStrategy(tpu_cluster_resolver)\n\ntrainset = tf.data.Dataset.from_tensor_slices((train_dataset.source_vector, train_dataset.target_vector))\ntrainset = trainset.map(lambda source, target: (tf.cast(source, tf.int64), tf.cast(target, tf.int64))).shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\nif USE_TPU:\n trainset = strategy.experimental_distribute_dataset(trainset)",
"_____no_output_____"
]
],
[
[
"# モデル定義",
"_____no_output_____"
]
],
[
[
"num_layers=4 # レイヤー数\nd_model=64 # 中間層の次元数\nnum_heads=4 # Multi Head Attentionのヘッド数\ndff=2048 # Feed Forward Networkの次元数\ndropout_rate = 0.1 # ドロップアウト率\n\nsource_vocab_size = max(train_dataset.source_token.values()) + 1 # source文の語彙数\ntarget_vocab_size = max(train_dataset.target_token.values()) + 1 # target文の語彙数",
"_____no_output_____"
],
[
"# 重み初期化\ndef initialize_weight(checkpoint_path, optimizer, transformer, max_length, batch_size, use_tpu=False):\n\n if os.path.exists(checkpoint_path+'.pkl'):\n if use_tpu:\n number_of_tpu_cores = tpu_cluster_resolver.num_accelerators()['TPU']\n initialize_source, initialize_target = [[1]*max_length]*number_of_tpu_cores, [[1]*max_length]*number_of_tpu_cores\n initialize_set = tf.data.Dataset.from_tensor_slices((initialize_source, initialize_target))\n initialize_set = initialize_set.map(lambda source, target: (tf.cast(source, tf.int64), tf.cast(target, tf.int64))\n ).shuffle(buffer_size=BUFFER_SIZE).batch(batch_size).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE\n )\n initialize_set = strategy.experimental_distribute_dataset(initialize_set)\n\n for inp, tar in initialize_set:\n distributed_train_step(inp, tar)\n\n else:\n initialize_set = tf.ones([batch_size, max_length], tf.int64)\n train_step(initialize_set, initialize_set)\n \n try:\n weight_utils.load_weights_from_pickle(checkpoint_path, optimizer, transformer)\n except:\n print('Failed to load checkpoints.')\n\n else:\n print('No available checkpoints.')",
"_____no_output_____"
]
],
[
[
"# 学習実行",
"_____no_output_____"
],
[
"checkpoints/gpu/model -> /checkpoints_EX/gpu/model",
"_____no_output_____"
]
],
[
[
"# Transformer\ntransformer = model.Transformer(num_layers, d_model, num_heads, dff,\n source_vocab_size, target_vocab_size, \n pe_input=source_vocab_size, \n pe_target=target_vocab_size,\n rate=dropout_rate)\n\n# Learning Rate\nlearning_rate = model.CustomSchedule(d_model)\n\n# Optimizer\noptimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, \n epsilon=1e-9)\n\n# Loss\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n\n# Loss Function\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_mean(loss_)\n\n# Metrics\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n# Checkpoint\ncheckpoint_path = \"/content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\"\n\ntrain_step_signature = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n]\[email protected](input_signature=train_step_signature)\ndef train_step(inp, tar):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n \n enc_padding_mask, combined_mask, dec_padding_mask = model.create_masks(inp, tar_inp)\n \n with tf.GradientTape() as tape:\n predictions, _ = transformer(inp, tar_inp, \n True, \n enc_padding_mask, \n combined_mask, \n dec_padding_mask)\n loss = loss_function(tar_real, predictions)\n\n gradients = tape.gradient(loss, transformer.trainable_variables) \n optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))\n \n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n# Initialize Weight\ninitialize_weight(checkpoint_path, optimizer, transformer, MAX_LENGTH, BATCH_SIZE, use_tpu=USE_TPU)\n\nEPOCHS = 30\nbatch = 0\n\nfor epoch in range(EPOCHS):\n start = time.time()\n \n train_loss.reset_states()\n train_accuracy.reset_states()\n \n for inp, tar in trainset:\n train_step(inp, tar)\n \n if batch % 50 == 0:\n print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(\n epoch + 1, batch, train_loss.result(), train_accuracy.result()))\n \n batch+=1\n \n if (epoch + 1) % 5 == 0:\n print('Saving checkpoint for epoch {} at {}'.format(epoch+1, checkpoint_path))\n weight_utils.save_weights_as_pickle(checkpoint_path, optimizer, transformer)\n \n print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1, \n train_loss.result(), \n train_accuracy.result()))\n\n print ('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start))",
"No available checkpoints.\nEpoch 1 Batch 0 Loss 0.4056 Accuracy 0.0000\nEpoch 1 Batch 50 Loss 0.3922 Accuracy 0.0077\nEpoch 1 Batch 100 Loss 0.3736 Accuracy 0.0123\nEpoch 1 Loss 0.3676 Accuracy 0.0134\nTime taken for 1 epoch: 21.844099044799805 secs\n\nEpoch 2 Batch 150 Loss 0.3455 Accuracy 0.0169\nEpoch 2 Batch 200 Loss 0.3346 Accuracy 0.0169\nEpoch 2 Batch 250 Loss 0.3308 Accuracy 0.0169\nEpoch 2 Loss 0.3275 Accuracy 0.0170\nTime taken for 1 epoch: 7.889256954193115 secs\n\nEpoch 3 Batch 300 Loss 0.3023 Accuracy 0.0186\nEpoch 3 Batch 350 Loss 0.2892 Accuracy 0.0192\nEpoch 3 Batch 400 Loss 0.2808 Accuracy 0.0203\nEpoch 3 Loss 0.2790 Accuracy 0.0204\nTime taken for 1 epoch: 7.875979661941528 secs\n\nEpoch 4 Batch 450 Loss 0.2457 Accuracy 0.0248\nEpoch 4 Batch 500 Loss 0.2350 Accuracy 0.0258\nEpoch 4 Loss 0.2251 Accuracy 0.0269\nTime taken for 1 epoch: 7.966558933258057 secs\n\nEpoch 5 Batch 550 Loss 0.2013 Accuracy 0.0298\nEpoch 5 Batch 600 Loss 0.1910 Accuracy 0.0317\nEpoch 5 Batch 650 Loss 0.1813 Accuracy 0.0332\nSaving checkpoint for epoch 5 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 5 Loss 0.1764 Accuracy 0.0337\nTime taken for 1 epoch: 8.25453233718872 secs\n\nEpoch 6 Batch 700 Loss 0.1515 Accuracy 0.0378\nEpoch 6 Batch 750 Loss 0.1420 Accuracy 0.0394\nEpoch 6 Batch 800 Loss 0.1349 Accuracy 0.0406\nEpoch 6 Loss 0.1325 Accuracy 0.0409\nTime taken for 1 epoch: 8.086697101593018 secs\n\nEpoch 7 Batch 850 Loss 0.1088 Accuracy 0.0452\nEpoch 7 Batch 900 Loss 0.1033 Accuracy 0.0465\nEpoch 7 Batch 950 Loss 0.0972 Accuracy 0.0472\nEpoch 7 Loss 0.0972 Accuracy 0.0472\nTime taken for 1 epoch: 7.810199499130249 secs\n\nEpoch 8 Batch 1000 Loss 0.0764 Accuracy 0.0505\nEpoch 8 Batch 1050 Loss 0.0719 Accuracy 0.0516\nEpoch 8 Loss 0.0688 Accuracy 0.0521\nTime taken for 1 epoch: 7.882658004760742 secs\n\nEpoch 9 Batch 1100 Loss 0.0562 Accuracy 0.0555\nEpoch 9 Batch 1150 Loss 0.0547 Accuracy 0.0543\nEpoch 9 Batch 1200 Loss 0.0511 Accuracy 0.0549\nEpoch 9 Loss 0.0498 Accuracy 0.0550\nTime taken for 1 epoch: 8.144599914550781 secs\n\nEpoch 10 Batch 1250 Loss 0.0411 Accuracy 0.0562\nEpoch 10 Batch 1300 Loss 0.0399 Accuracy 0.0567\nEpoch 10 Batch 1350 Loss 0.0380 Accuracy 0.0566\nSaving checkpoint for epoch 10 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 10 Loss 0.0377 Accuracy 0.0566\nTime taken for 1 epoch: 8.362977266311646 secs\n\nEpoch 11 Batch 1400 Loss 0.0318 Accuracy 0.0570\nEpoch 11 Batch 1450 Loss 0.0317 Accuracy 0.0573\nEpoch 11 Loss 0.0307 Accuracy 0.0571\nTime taken for 1 epoch: 8.18845248222351 secs\n\nEpoch 12 Batch 1500 Loss 0.0290 Accuracy 0.0612\nEpoch 12 Batch 1550 Loss 0.0268 Accuracy 0.0578\nEpoch 12 Batch 1600 Loss 0.0264 Accuracy 0.0577\nEpoch 12 Loss 0.0261 Accuracy 0.0575\nTime taken for 1 epoch: 8.074068069458008 secs\n\nEpoch 13 Batch 1650 Loss 0.0262 Accuracy 0.0573\nEpoch 13 Batch 1700 Loss 0.0248 Accuracy 0.0577\nEpoch 13 Batch 1750 Loss 0.0243 Accuracy 0.0576\nEpoch 13 Loss 0.0245 Accuracy 0.0577\nTime taken for 1 epoch: 8.125264883041382 secs\n\nEpoch 14 Batch 1800 Loss 0.0242 Accuracy 0.0581\nEpoch 14 Batch 1850 Loss 0.0232 Accuracy 0.0580\nEpoch 14 Batch 1900 Loss 0.0231 Accuracy 0.0576\nEpoch 14 Loss 0.0231 Accuracy 0.0576\nTime taken for 1 epoch: 8.105902433395386 secs\n\nEpoch 15 Batch 1950 Loss 0.0229 Accuracy 0.0575\nEpoch 15 Batch 2000 Loss 0.0226 Accuracy 0.0577\nSaving checkpoint for epoch 15 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 15 Loss 0.0227 Accuracy 0.0576\nTime taken for 1 epoch: 8.284334659576416 secs\n\nEpoch 16 Batch 2050 Loss 0.0222 Accuracy 0.0577\nEpoch 16 Batch 2100 Loss 0.0222 Accuracy 0.0574\nEpoch 16 Batch 2150 Loss 0.0218 Accuracy 0.0579\nEpoch 16 Loss 0.0217 Accuracy 0.0578\nTime taken for 1 epoch: 8.095520257949829 secs\n\nEpoch 17 Batch 2200 Loss 0.0215 Accuracy 0.0582\nEpoch 17 Batch 2250 Loss 0.0218 Accuracy 0.0572\nEpoch 17 Batch 2300 Loss 0.0219 Accuracy 0.0576\nEpoch 17 Loss 0.0218 Accuracy 0.0575\nTime taken for 1 epoch: 7.916942358016968 secs\n\nEpoch 18 Batch 2350 Loss 0.0199 Accuracy 0.0586\nEpoch 18 Batch 2400 Loss 0.0205 Accuracy 0.0580\nEpoch 18 Loss 0.0205 Accuracy 0.0579\nTime taken for 1 epoch: 8.108551740646362 secs\n\nEpoch 19 Batch 2450 Loss 0.0206 Accuracy 0.0584\nEpoch 19 Batch 2500 Loss 0.0208 Accuracy 0.0581\nEpoch 19 Batch 2550 Loss 0.0209 Accuracy 0.0577\nEpoch 19 Loss 0.0207 Accuracy 0.0577\nTime taken for 1 epoch: 8.028493165969849 secs\n\nEpoch 20 Batch 2600 Loss 0.0211 Accuracy 0.0563\nEpoch 20 Batch 2650 Loss 0.0208 Accuracy 0.0575\nEpoch 20 Batch 2700 Loss 0.0204 Accuracy 0.0577\nSaving checkpoint for epoch 20 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 20 Loss 0.0202 Accuracy 0.0578\nTime taken for 1 epoch: 8.122268915176392 secs\n\nEpoch 21 Batch 2750 Loss 0.0200 Accuracy 0.0570\nEpoch 21 Batch 2800 Loss 0.0200 Accuracy 0.0577\nEpoch 21 Batch 2850 Loss 0.0200 Accuracy 0.0579\nEpoch 21 Loss 0.0200 Accuracy 0.0579\nTime taken for 1 epoch: 8.118796586990356 secs\n\nEpoch 22 Batch 2900 Loss 0.0199 Accuracy 0.0577\nEpoch 22 Batch 2950 Loss 0.0198 Accuracy 0.0580\nEpoch 22 Loss 0.0200 Accuracy 0.0577\nTime taken for 1 epoch: 7.864755868911743 secs\n\nEpoch 23 Batch 3000 Loss 0.0220 Accuracy 0.0584\nEpoch 23 Batch 3050 Loss 0.0198 Accuracy 0.0579\nEpoch 23 Batch 3100 Loss 0.0198 Accuracy 0.0579\nEpoch 23 Loss 0.0198 Accuracy 0.0578\nTime taken for 1 epoch: 8.238301515579224 secs\n\nEpoch 24 Batch 3150 Loss 0.0205 Accuracy 0.0590\nEpoch 24 Batch 3200 Loss 0.0197 Accuracy 0.0582\nEpoch 24 Batch 3250 Loss 0.0194 Accuracy 0.0580\nEpoch 24 Loss 0.0195 Accuracy 0.0579\nTime taken for 1 epoch: 8.257582187652588 secs\n\nEpoch 25 Batch 3300 Loss 0.0199 Accuracy 0.0576\nEpoch 25 Batch 3350 Loss 0.0196 Accuracy 0.0580\nSaving checkpoint for epoch 25 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 25 Loss 0.0197 Accuracy 0.0579\nTime taken for 1 epoch: 8.398127555847168 secs\n\nEpoch 26 Batch 3400 Loss 0.0206 Accuracy 0.0662\nEpoch 26 Batch 3450 Loss 0.0198 Accuracy 0.0580\nEpoch 26 Batch 3500 Loss 0.0197 Accuracy 0.0577\nEpoch 26 Loss 0.0198 Accuracy 0.0578\nTime taken for 1 epoch: 8.083030939102173 secs\n\nEpoch 27 Batch 3550 Loss 0.0193 Accuracy 0.0578\nEpoch 27 Batch 3600 Loss 0.0201 Accuracy 0.0577\nEpoch 27 Batch 3650 Loss 0.0199 Accuracy 0.0577\nEpoch 27 Loss 0.0197 Accuracy 0.0577\nTime taken for 1 epoch: 8.034573316574097 secs\n\nEpoch 28 Batch 3700 Loss 0.0191 Accuracy 0.0579\nEpoch 28 Batch 3750 Loss 0.0191 Accuracy 0.0584\nEpoch 28 Batch 3800 Loss 0.0191 Accuracy 0.0581\nEpoch 28 Loss 0.0191 Accuracy 0.0579\nTime taken for 1 epoch: 8.070529460906982 secs\n\nEpoch 29 Batch 3850 Loss 0.0201 Accuracy 0.0582\nEpoch 29 Batch 3900 Loss 0.0195 Accuracy 0.0580\nEpoch 29 Loss 0.0193 Accuracy 0.0581\nTime taken for 1 epoch: 7.996587514877319 secs\n\nEpoch 30 Batch 3950 Loss 0.0183 Accuracy 0.0553\nEpoch 30 Batch 4000 Loss 0.0192 Accuracy 0.0572\nEpoch 30 Batch 4050 Loss 0.0191 Accuracy 0.0575\nSaving checkpoint for epoch 30 at /content/drive/My Drive/Colab Notebooks/Transformer/checkpoints_EX/gpu/model\nSave checkpoints\nEpoch 30 Loss 0.0192 Accuracy 0.0577\nTime taken for 1 epoch: 8.247257471084595 secs\n\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a219a72cdc398b32a7609135c88abf95c4a2f53
| 285,987 |
ipynb
|
Jupyter Notebook
|
fundamentals_2018.9/distributions/example.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null |
fundamentals_2018.9/distributions/example.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null |
fundamentals_2018.9/distributions/example.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null | 310.181128 | 54,088 | 0.926399 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport scipy.stats as stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nimport statsmodels.api as sm\n\nsns.set(style=\"whitegrid\")",
"_____no_output_____"
]
],
[
[
"# Example\n\nWe're now in a position to return to our housing data for King County, Washington to make some of these more abstract concepts concrete.",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(\"../resources/data/kc_house_data.csv\")",
"_____no_output_____"
]
],
[
[
"## Waterfront \nLet's look at a few variables from that data set. The first one we'll look at is `waterfront`. If you remember, waterfront indicates whether or not the property is on the water. There are two possible outcomes and so this makes the Bernoulli distribution a good model for this data.",
"_____no_output_____"
]
],
[
[
"data[\"waterfront\"].value_counts().sort_index()",
"_____no_output_____"
]
],
[
[
"To set the context, remember that, overall, we have a \"home sales process\" that we're interested in. The property having a waterfront is a binary feature of each home sale and we're going to model it using a Bernoulli distribution.\n\nThe single parameter of the Bernoulli distribution is $p$, the probability of \"success\". In this case, \"success\" means \"is waterfront\". We can estimate $p$ using the Method of Moments:",
"_____no_output_____"
]
],
[
[
"p = np.mean(data[\"waterfront\"])\nprint(\"p = \", p)",
"p = 0.007541757275713691\n"
]
],
[
[
"So there's a 0.7% chance that the next house sold is a water front property. Note that we can't say anything about, say, the next house to come up for sale because the data only covers houses sold and not houses offered for sale (people may take their houses off the market if they don't sell or if they decided not to sell).\n\nAlso notice how we went from a descriptive statistic (0.7% of the homes sold have a water front) to a model (there is a 0.7% probability that the next home sold will have a water front).\n\nBecause we have only limited data from the data generating process, this estimate actually has some uncertainty associated with it. Is it really 0.7% or is it 0.6% or 1.0%? We will address this issue in the next chapter. For now, we're going to take our models at face value.\n\nBy turning to modeling, we've opened up a lot more interesting questions we can ask though. How does `waterfront` affect `view`, $P(view|waterfront)$?",
"_____no_output_____"
]
],
[
[
"frequencies = pd.crosstab( data[\"waterfront\"], data[\"view\"]).apply(lambda r: r/r.sum(), axis=1)\nprint(frequencies)\n\nsns.heatmap( frequencies)",
"view 0 1 2 3 4\nwaterfront \n0 0.908578 0.015431 0.044522 0.022890 0.008578\n1 0.000000 0.006135 0.049080 0.116564 0.828221\n"
]
],
[
[
"This gives us two *multinomial* distributions, $P(view|waterfront=0)$ and $P(view|waterfront=1)$. Our parameters are $p_0$, $p_1$, $p_2$, $p_3$, and $p_4$ for each value of waterfront. Because the sum the parameters must equal one, we don't need to estimate the $p_4$'s directly.\n\nIf we have $waterfront=0$, then there's a 90.9% probability you have a \"0\" view. However, if you have a $waterfront=1$, then there's a 0.0% probability you have a \"0\" view and an 82.8% probability you have the best view, \"4\".\n\nSo we're pretty much guaranteed a good view if the property is water front. What about the reverse?",
"_____no_output_____"
]
],
[
[
"frequencies = pd.crosstab( data[\"view\"], data[\"waterfront\"]).apply(lambda r: r/r.sum(), axis=1)\nprint(frequencies)\n\nsns.heatmap( frequencies)",
"waterfront 0 1\nview \n0 1.000000 0.000000\n1 0.996988 0.003012\n2 0.991693 0.008307\n3 0.962745 0.037255\n4 0.576803 0.423197\n"
]
],
[
[
"Here we have 5 Bernoulli distributions. Given a value for view, we have some probability of being waterfront. If the view is \"4\", then the probability of waterfront is 42%. Isn't that interesting? While having waterfront nearly guarantees the best view, having the best view doesn't guarantee by a long shot (it's nearly a toss up) having waterfront property.",
"_____no_output_____"
],
[
"## Bedrooms\n\nNow let's look at bedrooms in a house. In the EDA chapter, we treated this variable as both a categorical and a numerical variable. However, it seems like \"bedrooms per house\" makes the Poisson distribution an obvious choice. Using the Poisson distribution will also be good because it lets us estimate the probabilities of room counts we haven't seen. Let's start with that.\n\nLet's get our basic descriptive statistics:",
"_____no_output_____"
]
],
[
[
"data[\"bedrooms\"].describe()",
"_____no_output_____"
]
],
[
[
"According to the previous section, the Method of Moments estimator for the $\\lambda$ parameter of the Poisson distribution is $m_1$.",
"_____no_output_____"
]
],
[
[
"from scipy.stats import poisson",
"_____no_output_____"
],
[
"proportions = data[\"bedrooms\"].value_counts(normalize=True).sort_index()\nxs = range( len( proportions))\nwidth = 1/1.5\n\nlamb = np.mean(data[\"bedrooms\"]) # m1\nys = [poisson.pmf( x, lamb, 0) for x in xs]\n\nfigure = plt.figure(figsize=(10, 6))\n\naxes = figure.add_subplot(1, 1, 1)\naxes.bar(xs, proportions, width, color=\"dimgray\", align=\"center\")\naxes.set_xlabel(\"Bedrooms\")\naxes.set_xticks(xs)\naxes.set_xticklabels(proportions.axes[0])\naxes.set_title( \"Relative Frequency of bedrooms\")\naxes.set_ylabel( \"Percent\")\naxes.xaxis.grid(False)\naxes.plot( xs, ys, color=\"darkred\", marker=\"o\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"Our first problem is that pesky 33 room house. Aside from that, the Poisson distribution is not a good model for this data. We can see that it severely underestimates the number of 3 and 4 bedroom houses and overestimates the number of 1, 2, and 5 bedroom houses.\n\nNote that this is our general criteria for picking models: does it work. We never ask \"is the data normally distributed\" because data isn't normally distributed. Normal distributions don't exist out there in the real world. The question is always about modeling, is this a good model?\n\nAnd that's a good time to note that a multinomial model probably is the better model here:",
"_____no_output_____"
]
],
[
[
"data[\"bedrooms\"].value_counts(normalize=True).sort_index()",
"_____no_output_____"
]
],
[
[
"It's also worth noting that means and rates such as these may be all that you need to do to answer your question or solve your problem. I have often mentioned that in the beginning, such models are all that many organizations need to start.",
"_____no_output_____"
],
[
"## Living Space Square Footage\n\nNow we turn our attention to a continuous, numerical variable: `sqft_living`:",
"_____no_output_____"
]
],
[
[
"data[\"sqft_living\"].describe()",
"_____no_output_____"
]
],
[
[
"So we have our first model. The mean is 2079 and we can set our expectation that \"on average\" (which we now know is a way of saying \"to minimize our error\") a home that sells next month is likely to have 2079 square feet of living space. Of course, it won't be exact but over the course of all those predictions, our error will be minimized.\n\nIn many ways, this is the difference between merely describing data (and just reporting the values) and using data to build models and predicting future values.\n\nIf an accountant tells you we had $23,000,000 in purchases last month. That's descriptive. If you take that number and use it as an estimate for next month, that's a model.",
"_____no_output_____"
],
[
"We might like to ask more interesting questions than, what is the average square footage of the next house likely to be. We could ask a question like, what is the probability of a house with over 3,000 square feet. This is one of the ways in which distributional modeling is useful. We've already seen this to some degree. What's the probability of a house with 6 rooms? 1.25%.\n\nBut first we need to pick a model.\n\nEven before we look at the histogram, the \"square foot\" should give us pause for thought. It's unlikely that anything involving *squares* is going to result in a Normal distribution because the Normal distribution is usually associated with additive processes.",
"_____no_output_____"
]
],
[
[
"figure = plt.figure(figsize=(10, 6))\n\naxes = figure.add_subplot(1, 1, 1)\naxes.hist(data[ \"sqft_living\"], color=\"DimGray\", density=True)\naxes.set_xlabel( \"sqft\")\naxes.set_ylabel( \"Density\")\naxes.set_title(\"Density Histogram of sqft_living; default bins\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"Definitely not normally distributed. Let's do two things. First, we want smaller bins so we can see the detail in the data. Second, let's plot a Normal distribution on top of the histogram.",
"_____no_output_____"
]
],
[
[
"from scipy.stats import norm",
"_____no_output_____"
],
[
"figure = plt.figure(figsize=(10,6))\n\naxes = figure.add_subplot(1, 1, 1)\nn, bins, patches = axes.hist(data[ \"sqft_living\"], color=\"DimGray\", density=True,bins=20, alpha=0.75)\naxes.set_xlabel( \"sqft\")\naxes.set_ylabel( \"Density\")\naxes.set_title(\"Density Histogram of sqft_living with Normal plot\")\n\nxs = [(b2 + b1)/2 for b1, b2 in zip(bins, bins[1:])] \n\nmean = np.mean(data[\"sqft_living\"])\nstd = np.std(data[\"sqft_living\"])\nys = [norm.pdf( k, loc=mean, scale=std) for k in xs]\naxes.plot(xs, ys, color=\"darkred\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"Notice that we have to estimate the Normal distribution at the middle of the bins instead of the bin edges.\n\nIn general, this is probably a poor model. It strongly overestimates high square footage homes and underestimates low square footage homes. For what it's worth, it is often suggested that you look at *cumulative distributions*, not probability distributions.",
"_____no_output_____"
]
],
[
[
"figure = plt.figure(figsize=(20, 8))\n\nmn = np.min(data[\"sqft_living\"])\nmx = np.max(data[\"sqft_living\"])\nmean = np.mean( data[ \"sqft_living\"])\nstd = np.std( data[ \"sqft_living\"])\n\naxes = figure.add_subplot(1, 2, 1)\n\nvalues, base = np.histogram(data[ \"sqft_living\"], bins=11, density=True)\ncumulative = np.cumsum(values)\naxes.plot(base[:-1], cumulative, color=\"steelblue\")\naxes.set_xlim((mn, mx))\n\nsampled_data = [mean + r * std for r in np.random.standard_normal(10000)]\nvalues2, base = np.histogram(sampled_data, bins=base, density=True)\ncumulative2 = np.cumsum(values2)\naxes.plot( base[:-1], cumulative2, color=\"firebrick\")\naxes.set_xlim((np.min( data[ \"sqft_living\"]), np.max( data[ \"sqft_living\"])))\naxes.set_xlabel( \"Empirical v. Theoretical: Normal Distribution\")\n\naxes = figure.add_subplot(1, 2, 2)\n\ndifferences = cumulative2 - cumulative\naxes.plot(base[:-1], differences, color='firebrick')\naxes.set_xlim((mn, mx))\naxes.hlines(0, 0, 14000, linestyles=\"dotted\")\naxes.set_xlabel( \"Empirical v. Theoretical: Normal Distribution, Difference\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"First, it's worth noting that in order to get data from the theoretical distribution I resorted to Monte Carlo simulation:\n\n```\nsampled_data = [mean + r * std for r in np.random.standard_normal(10000)]\nvalues2, base = np.histogram(sampled_data, bins=base, density=True)\n```\n\nThis isn't the only way to do this but it shows you that there are alternative approaches.\n\nSecond, we have taken to heart what we learned in the Visualization chapter and we have also plotted the *difference* between the curves because that's actually what we're interested in. While we can see there are differences between the blue and red lines in the left chart, the differences are obvious in the right chart.\n\nYou will often see \"PP\" or \"QQ\" plots mentioned. They're not my favorite but a QQ-plot plots quantiles of data. For a particular parameterization of a distribution, a certain percent of the data should appear in each quantile. If the reference distribution and empirical distributions are largely the same, they will have the same percentages per quantile.\n\nIf you plot the empirical quantiles against the theoretical quantiles (as in a scatter plot), they should appear on or near the $x = y$ line if the reference distribution is a good match for your data.",
"_____no_output_____"
]
],
[
[
"figure = plt.figure(figsize=(6, 6))\n\naxes = figure.add_subplot(1, 1, 1)\nstats.probplot(data[ \"sqft_living\"], dist=\"norm\", plot=axes)\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"The answer is still \"no\".\n\nLet's see if we can do better. In the discussion about the Normal distribution we noted that we sometimes need to use a Log Normal distribution and based on our EDA from the previous chapter, this is probably where we should have started.",
"_____no_output_____"
]
],
[
[
"data[\"log_sqft_living\"] = data[ \"sqft_living\"].apply(lambda x: np.log10(x))",
"_____no_output_____"
],
[
"figure = plt.figure(figsize=(20, 8))\n\nmn = np.min(data[\"log_sqft_living\"])\nmx = np.max(data[\"log_sqft_living\"])\nmean = np.mean( data[ \"log_sqft_living\"])\nstd = np.std( data[ \"log_sqft_living\"])\n\naxes = figure.add_subplot(1, 2, 1)\n\nvalues, base = np.histogram(data[ \"log_sqft_living\"], bins=11, density=True)\ncumulative = np.cumsum(values)\naxes.plot(base[:-1], cumulative, color=\"steelblue\")\naxes.set_xlim((mn, mx))\n\nsampled_data = [mean + r * std for r in np.random.standard_normal(10000)]\nvalues2, base = np.histogram(sampled_data, bins=base, density=True)\ncumulative2 = np.cumsum(values2)\naxes.plot( base[:-1], cumulative2, color=\"firebrick\")\naxes.set_xlim((np.min( data[ \"log_sqft_living\"]), np.max( data[ \"log_sqft_living\"])))\naxes.set_xlabel( \"Empirical v. Theoretical: Normal Distribution\")\n\naxes = figure.add_subplot(1, 2, 2)\n\ndifferences = cumulative2 - cumulative\naxes.plot(base[:-1], differences, color='firebrick')\naxes.set_xlim((mn, mx))\naxes.hlines(0, 0, 14000, linestyles=\"dotted\")\naxes.set_xlabel( \"Empirical v. Theoretical: Normal Distribution, Difference\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"Unfortunately, changing to the log scale, changed the density scale and we can't compare what the error actually is here on the right. On the left, this fit looks perfect. Let's switch back to a PDF (probability density function):",
"_____no_output_____"
]
],
[
[
"figure = plt.figure(figsize=(10,6))\n\naxes = figure.add_subplot(1, 1, 1)\nn, bins, patches = axes.hist(data[ \"log_sqft_living\"], color=\"DimGray\", density=True,bins=20, alpha=0.75)\naxes.set_xlabel( \"sqft\")\naxes.set_ylabel( \"Density\")\naxes.set_title(\"Density Histogram of log_sqft_living with Normal plot\")\n\nxs = [(b2 + b1)/2 for b1, b2 in zip(bins, bins[1:])] \n\nmean = np.mean(data[\"log_sqft_living\"])\nstd = np.std(data[\"log_sqft_living\"])\nys = [norm.pdf( k, loc=mean, scale=std) for k in xs]\naxes.plot(xs, ys, color=\"darkred\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"So what can we do with this model?\n\nSuppose we want to know the probability of a house that sells in the next month has 3000 or more feet.\n\n1\\. What is the log of 3000?",
"_____no_output_____"
]
],
[
[
"np.log10(3000)",
"_____no_output_____"
]
],
[
[
"2\\. We're dealing with probability densities in continuous distributions. There are three basic questions we can ask:\n1. What is the probability of x or larger (use Survival Function).\n2. What is the probability of x or less (use CDF).\n3. What is the probability of x to y (CDF - CDF).\n\nOur current question is the first one:",
"_____no_output_____"
]
],
[
[
"stats.norm.sf(np.log10(3000),mean,std)",
"_____no_output_____"
]
],
[
[
"There is a 14.2% probability that a sold home has a square footage of 3,000 or greater.\n\nWhat about less than 1,200 square feet?",
"_____no_output_____"
]
],
[
[
"stats.norm.cdf(np.log10(1200),mean,std)",
"_____no_output_____"
]
],
[
[
"There is a 13.9% probability that a sold home has square footage of 1,000 or less.\n\nWhat about between 2,000 and 3,000 square feet?",
"_____no_output_____"
]
],
[
[
"stats.norm.cdf(np.log10(3000),mean,std) - stats.norm.cdf(np.log10(2000),mean,std)",
"_____no_output_____"
]
],
[
[
"There is a 31.1% probability that the square footage of that sold home is between 2,000 and 3,000 square feet.",
"_____no_output_____"
],
[
"These kinds of distributional models are incredibly useful. I started to implement such a model at a company I worked at.\n\nWe had a workflow environment (like Oozie or Airflow) that ran jobs for us every day. Unfortunately, some of the jobs would get stuck. If you restart a job, it has to start all over again and that could mean repeating some lengthy computations. The goal was to model the duration of workflows so that you could say \"warn me if the execution time of this workflow exceeds 10%\". \n\nIn other words, the monitor could poll a workflow and determine the probability that it would take the \"time so far\" to complete. Workflows are a good example of Exponentially distributed data (actually, Shifted Exponential). They're likely to take around some amount of time (45 minutes) but less likely to take much more than that amount of time.\n\nIf the current running time was, say, 67 minutes and the probability of taking that long was less than 10%, I could be warned that a workflow might be stuck.\n\nOne advantage to models is that if you use only the empirical distribution, you may not account for values that haven't been observed. Additionally, your model is only as good as your data. \n\nOne of the criticisms of the Fukushima Daiichi reactor in Japan was that it was built to withstand the largest recorded earthquake in Japan. That's using the empirical distribution. However, a model of earthquakes suggested a stronger earthquake was possible and there had been larger earthquakes recorded elsewhere in the world. Both of these suggest that the empirical distribution was a bad modeling choice.\n\nFinally, we reiterate that our model parameters are all based on a single sample. We'll see shortly how to deal with that issue using Bayesian inference.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a21afe6a44972ed112f81b7fde430e2cc92c528
| 7,045 |
ipynb
|
Jupyter Notebook
|
Array/0925/954. Array of Doubled Pairs.ipynb
|
YuHe0108/Leetcode
|
90d904dde125dd35ee256a7f383961786f1ada5d
|
[
"Apache-2.0"
] | 1 |
2020-08-05T11:47:47.000Z
|
2020-08-05T11:47:47.000Z
|
Array/0925/954. Array of Doubled Pairs.ipynb
|
YuHe0108/LeetCode
|
b9e5de69b4e4d794aff89497624f558343e362ad
|
[
"Apache-2.0"
] | null | null | null |
Array/0925/954. Array of Doubled Pairs.ipynb
|
YuHe0108/LeetCode
|
b9e5de69b4e4d794aff89497624f558343e362ad
|
[
"Apache-2.0"
] | null | null | null | 24.632867 | 103 | 0.411356 |
[
[
[
"说明:\n 给定具有偶数长度的整数A的数组,当且仅当有可能重新排序以使\n A [2 * i + 1] = 2 * A [2 * i] 当 0 <= i < len(A )/ 2。\n\n要求:重新排序,idx是奇数的位置是idx是偶数位置的二倍。\n\nExample 1:\n Input: [3,1,3,6]\n Output: false\n\nExample 2:\n Input: [2,1,2,6]\n Output: false\n\nExample 3:\n Input: [4,-2,2,-4]\n Output: true\n Explanation: We can take two groups, [-2,-4] and [2,4] to form [-2,-4,2,4] or [2,4,-2,-4].\n\nExample 4:\n Input: [1,2,4,16,8,4]\n Output: false\n\nNote:\n 1、0 <= A.length <= 30000\n 2、A.length is even\n 3、-100000 <= A[i] <= 100000",
"_____no_output_____"
]
],
[
[
"# i 可能的取值:0、2、4、6、len(A)\nfrom collections import Counter\n\nclass Solution:\n def canReorderDoubled(self, A):\n if not A: return True\n a_freq = Counter(A)\n seen = set()\n for a in A:\n if a in seen: continue\n if a_freq[a] == 0: \n seen.add(a)\n continue\n if a_freq[a * 2] >= a_freq[a] and a * 2 not in seen:\n a_freq[a * 2] -= a_freq[a]\n elif a % 2 == 0 and a_freq[a // 2] >= a_freq[a] and a // 2 not in seen:\n a_freq[a // 2] -= a_freq[a]\n else:\n return False\n return True",
"_____no_output_____"
],
[
"from collections import Counter\n\nclass Solution:\n def canReorderDoubled(self, A):\n if not A: return True\n a_freq = Counter(A)\n \n for a in sorted(a_freq.keys(), key=abs):\n if a_freq[a] == 0: \n continue\n if a == 0 and a_freq[0] % 2 == 0:\n a_freq[0] = 0\n continue\n if a_freq[a * 2] > 0:\n min_val = min(a_freq[a * 2], a_freq[a])\n a_freq[a * 2] -= min_val\n a_freq[a] -= min_val\n return all(not v for v in a_freq.values())",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4])",
"Counter({-6: 2, 2: 2, 4: 2, -3: 2, -4: 2, 8: 1, 3: 1, -2: 1, 6: 1, 1: 1, -8: 1})\n"
],
[
"from collections import Counter\n\nclass Solution:\n def canReorderDoubled(self, A):\n a_freq = Counter(A)\n for n in sorted(a_freq.keys(), key=abs):\n double = 2 * n\n while a_freq[n] > 0 and a_freq[double] > 0:\n double = 2 * n\n a_freq[n] -= 1\n a_freq[double] -= 1\n return all(not v for v in a_freq.values())",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4])",
"Counter({-6: 0, 2: 0, 4: 0, -3: 0, 8: 0, 3: 0, -2: 0, 6: 0, 1: 0, -4: 0, -8: 0})\n"
],
[
"if 0:\n print(2)",
"_____no_output_____"
],
[
"class Solution:\n def canReorderDoubled(self, A: List[int]) -> bool:\n c = Counter(A)\n for n in sorted(c.keys(), key=abs):\n while c[n] > 0 and c[(double := 2 * n)] > 0:\n c[n] -= 1\n c[double] -= 1\n return all(not v for v in c.values())",
"_____no_output_____"
],
[
"from collections import Counter\n\nclass Solution:\n def canReorderDoubled(self, A):\n if not A: return True\n a_freq = Counter(A)\n \n for a in sorted(a_freq.keys(), key=abs):\n if a_freq[a] == 0: \n continue\n if a == 0 and a_freq[0] % 2 == 0:\n a_freq[0] = 0\n continue\n if a_freq[a * 2] > 0:\n min_val = min(a_freq[a * 2], a_freq[a])\n a_freq[a * 2] -= min_val\n a_freq[a] -= min_val\n return all(not v for v in a_freq.values())",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4])",
"1\n2\n-2\n-3\n3\n4\n-4\n-6\n6\n8\n-8\n"
]
]
] |
[
"raw",
"code"
] |
[
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21b36a753ca7fa204a1f9b9ded0136d2cff595
| 200,034 |
ipynb
|
Jupyter Notebook
|
Recommendations_with_IBM.ipynb
|
a-atef/Recommendation-Systems-IBM-Articles
|
53904622c4f2a2ede47f1d7f5867989cd113c5d5
|
[
"MIT"
] | 1 |
2020-10-19T21:15:43.000Z
|
2020-10-19T21:15:43.000Z
|
Recommendations_with_IBM.ipynb
|
a-atef/Recommendation-Systems-IBM-Articles
|
53904622c4f2a2ede47f1d7f5867989cd113c5d5
|
[
"MIT"
] | null | null | null |
Recommendations_with_IBM.ipynb
|
a-atef/Recommendation-Systems-IBM-Articles
|
53904622c4f2a2ede47f1d7f5867989cd113c5d5
|
[
"MIT"
] | null | null | null | 77.143849 | 46,784 | 0.752007 |
[
[
[
"# Recommendations with IBM\n\nIn this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. \n\n\nYou may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**\n\nBy following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. \n\n\n## Table of Contents\n\nI. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>\nII. [Rank Based Recommendations](#Rank)<br>\nIII. [User-User Based Collaborative Filtering](#User-User)<br>\nIV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br>\nV. [Matrix Factorization](#Matrix-Fact)<br>\nVI. [Extras & Concluding](#conclusions)\n\nAt the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport project_tests as t\nimport pickle\nimport seaborn as sns\nfrom scipy import stats\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\n%matplotlib inline\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('stopwords')\nnltk.download('abc')\n\ndf = pd.read_csv('data/user-item-interactions.csv')\ndf_content = pd.read_csv('data/articles_community.csv')\ndel df['Unnamed: 0']\ndel df_content['Unnamed: 0']\n\n# Show df to get an idea of the data\ndf.head()",
"[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n[nltk_data] Downloading package abc to /root/nltk_data...\n[nltk_data] Package abc is already up-to-date!\n"
],
[
"# inspect the first row\ndf.iloc[0]['title']",
"_____no_output_____"
],
[
"# show df_content to get an idea of the data\ndf_content.head()",
"_____no_output_____"
],
[
"print(df_content.iloc[0])",
"doc_body Skip navigation Sign in SearchLoading...\\r\\n\\r...\ndoc_description Detect bad readings in real time using Python ...\ndoc_full_name Detect Malfunctioning IoT Sensors with Streami...\ndoc_status Live\narticle_id 0\nName: 0, dtype: object\n"
]
],
[
[
"### <a class=\"anchor\" id=\"Exploratory-Data-Analysis\">Part I : Exploratory Data Analysis</a>\n\nUse the dictionary and cells below to provide some insight into the descriptive statistics of the data.\n\n`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. ",
"_____no_output_____"
]
],
[
[
"# group by email\ndef group_by_title(df, column = \"email\"):\n \"\"\"group the user-item interactions dataframe by column.\n \n Args:\n df (Dataframe): a dataframe object \n column (string): column to group by \n per column.\n \n Returns:\n Dataframe: dataframe of user article interaction counts sorted in descending order\n \n \"\"\"\n \n df_title_counts = df.groupby(['email']).size().reset_index(name='counts')\n df_title_counts = df_title_counts.sort_values(by=['counts'], ascending=False)\n return df_title_counts\n\nuser_article_counts = group_by_title(df)\nuser_article_counts.head()",
"_____no_output_____"
],
[
"print( \"The number of articles are {} and users interacted {} times these articles\" \\\n .format(user_article_counts.shape[0], user_article_counts[\"counts\"].sum()))",
"The number of articles are 5148 and users interacted 45976 times these articles\n"
],
[
"def histogram (df, column = \"counts\", title=\"Distribution of user articles interactions\"):\n \"\"\"Create a distribution of user article interactions.\n \n Args:\n df (Dataframe): a dataframe object \n column (string): column that holds article counts \n title (string): the title of the distribution chart\n \n Returns:\n figure: a matplotlib distribution figure of article counts\n \n \"\"\"\n \n sns.set(color_codes=True)\n plt.figure(figsize=(15,8))\n sns.distplot(df[column], kde=False, hist_kws=dict(edgecolor=\"k\", linewidth=2))\n plt.xlabel(title)\n plt.ylabel('Frequency');\nhistogram(user_article_counts)",
"_____no_output_____"
],
[
"# fill in the median and maximum number of user_article interactios below\nmedian_val = user_article_counts[\"counts\"].median() # 50% of individuals interact with 3 number of articles or fewer.\nmax_views_by_user = user_article_counts[\"counts\"].max() # The maximum number of user-article interactions by any 1 user is 364.",
"_____no_output_____"
]
],
[
[
"`2.` Explore and remove duplicate articles from the **df_content** dataframe. ",
"_____no_output_____"
]
],
[
[
"# find and explore duplicate articles\ndf_content.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1056 entries, 0 to 1055\nData columns (total 5 columns):\ndoc_body 1042 non-null object\ndoc_description 1053 non-null object\ndoc_full_name 1056 non-null object\ndoc_status 1056 non-null object\narticle_id 1056 non-null int64\ndtypes: int64(1), object(4)\nmemory usage: 41.3+ KB\n"
],
[
"# get duplicate articles\ndf_content[df_content.duplicated(subset=\"article_id\")]",
"_____no_output_____"
],
[
"# remove any rows that have the same article_id - only keep the first\ndf_content_clean = df_content.drop_duplicates(subset=\"article_id\")",
"_____no_output_____"
],
[
"assert df_content_clean.shape[0] + 5 == df_content.shape[0]",
"_____no_output_____"
]
],
[
[
"`3.` Use the cells below to find:\n\n**a.** The number of unique articles that have an interaction with a user. \n**b.** The number of unique articles in the dataset (whether they have any interactions or not).<br>\n**c.** The number of unique users in the dataset. (excluding null values) <br>\n**d.** The number of user-article interactions in the dataset.",
"_____no_output_____"
]
],
[
[
"# the number of unique articles that have an interaction with a user.\ndf.article_id.nunique()",
"_____no_output_____"
],
[
"# the number of unique articles in the dataset (whether they have any interactions or not).\ndf_content_clean.article_id.nunique()",
"_____no_output_____"
],
[
"# the number of unique users in the dataset. (excluding null values) \ndf.email.nunique()",
"_____no_output_____"
],
[
"# the number of user-article interactions in the dataset.\ndf.shape[0]",
"_____no_output_____"
],
[
"unique_articles = 714 # The number of unique articles that have at least one interaction\ntotal_articles = 1051 # The number of unique articles on the IBM platform\nunique_users = 5148 # The number of unique users\nuser_article_interactions = 45993 # The number of user-article interactions",
"_____no_output_____"
]
],
[
[
"`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).",
"_____no_output_____"
]
],
[
[
"# most viewed article\narticle_counts = df.groupby(['article_id']).count()\narticle_counts['email'].max()",
"_____no_output_____"
],
[
"article_counts.sort_values(\"email\", ascending=False).iloc[0,:]",
"_____no_output_____"
],
[
"most_viewed_article_id = \"1429.0\" # the most viewed article in the dataset as a string with one value following the decimal \nmax_views = 937 # the most viewed article in the dataset was viewed how many times?",
"_____no_output_____"
],
[
"## No need to change the code here - this will be helpful for later parts of the notebook\n# Run this cell to map the user email to a user_id column and remove the email column\n\ndef email_mapper():\n coded_dict = dict()\n cter = 1\n email_encoded = []\n \n for val in df['email']:\n if val not in coded_dict:\n coded_dict[val] = cter\n cter+=1\n \n email_encoded.append(coded_dict[val])\n return email_encoded\n\nemail_encoded = email_mapper()\ndel df['email']\ndf['user_id'] = email_encoded\n\n# show header\ndf.head()",
"_____no_output_____"
],
[
"## If you stored all your results in the variable names above, \n## you shouldn't need to change anything in this cell\n\nsol_1_dict = {\n '`50% of individuals have _____ or fewer interactions.`': median_val,\n '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,\n '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,\n '`The most viewed article in the dataset was viewed _____ times.`': max_views,\n '`The article_id of the most viewed article is ______.`': most_viewed_article_id,\n '`The number of unique articles that have at least 1 rating ______.`': unique_articles,\n '`The number of unique users in the dataset is ______`': unique_users,\n '`The number of unique articles on the IBM platform`': total_articles\n}\n\n# Test your dictionary against the solution\nt.sol_1_test(sol_1_dict)",
"It looks like you have everything right here! Nice job!\n"
]
],
[
[
"### <a class=\"anchor\" id=\"Rank\">Part II: Rank-Based Recommendations</a>\n\nUnlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.\n\n`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"def get_top_articles(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n # Your code here\n top_articles = df.groupby(['article_id', 'title']).size()\\\n .reset_index(name='counts').sort_values('counts', ascending=False)[:n].title.tolist()\n return top_articles # Return the top article titles from df (not df_content)\n\ndef get_top_article_ids(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article ids \n \n '''\n # Your code here\n top_articles = df.groupby(\"article_id\").count()[\"title\"].sort_values(ascending=False).index[:n].astype('str')\n return top_articles.tolist() # Return the top article ids",
"_____no_output_____"
],
[
"print(get_top_articles(10))\nprint(get_top_article_ids(10))",
"['use deep learning for image classification', 'insights from new york car accident reports', 'visualize car data with brunel', 'use xgboost, scikit-learn & ibm watson machine learning apis', 'predicting churn with the spss random tree algorithm', 'healthcare python streaming application demo', 'finding optimal locations of new store using decision optimization', 'apache spark lab, part 1: basic concepts', 'analyze energy consumption in buildings', 'gosales transactions for logistic regression model']\n['1429.0', '1330.0', '1431.0', '1427.0', '1364.0', '1314.0', '1293.0', '1170.0', '1162.0', '1304.0']\n"
],
[
"# Test your function by returning the top 5, 10, and 20 articles\ntop_5 = get_top_articles(5)\ntop_10 = get_top_articles(10)\ntop_20 = get_top_articles(20)\n\n# Test each of your three lists from above\nt.sol_2_test(get_top_articles)",
"Your top_5 looks like the solution list! Nice job.\nYour top_10 looks like the solution list! Nice job.\nYour top_20 looks like the solution list! Nice job.\n"
]
],
[
[
"### <a class=\"anchor\" id=\"User-User\">Part III: User-User Based Collaborative Filtering</a>\n\n\n`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. \n\n* Each **user** should only appear in each **row** once.\n\n\n* Each **article** should only show up in one **column**. \n\n\n* **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. \n\n\n* **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. \n\nUse the tests to make sure the basic structure of your matrix matches what is expected by the solution.",
"_____no_output_____"
]
],
[
[
"# create the user-article matrix with 1's and 0's\ndef create_user_item_matrix(df):\n '''\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '''\n # Fill in the function here\n # unstack the user-item interaction dataframe\n user_item = df.drop_duplicates().groupby(['user_id', 'article_id']).size().unstack()\n \n # fill missing values with 0\n user_item = user_item.fillna(0)\n \n # convert int\n user_item = user_item.astype('int')\n \n return user_item # return the user_item matrix \n\nuser_item = create_user_item_matrix(df)",
"_____no_output_____"
],
[
"## Tests: You should just need to run this cell. Don't change the code.\nassert user_item.shape[0] == 5149, \"Oops! The number of users in the user-article matrix doesn't look right.\"\nassert user_item.shape[1] == 714, \"Oops! The number of articles in the user-article matrix doesn't look right.\"\nassert user_item.sum(axis=1)[1] == 36, \"Oops! The number of articles seen by user 1 doesn't look right.\"\nprint(\"You have passed our quick tests! Please proceed!\")",
"You have passed our quick tests! Please proceed!\n"
]
],
[
[
"`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. \n\nUse the tests to test your function.",
"_____no_output_____"
]
],
[
[
"def find_similar_users(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user_id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '''\n # compute similarity of each user to the provided user\n user_similr = user_item.loc[user_id,:].dot(user_item.T)\n \n # sort by similarity\n user_similr = user_similr.sort_values(ascending=False)\n \n # create list of just the ids\n # remove the own user's id\n most_similar_users = user_similr.loc[~(user_similr.index==user_id)].index.values.tolist()\n \n return most_similar_users # return a list of the users in order from most to least similar\n",
"_____no_output_____"
],
[
"# Do a spot check of your function\nprint(\"The 10 most similar users to user 1 are: {}\".format(find_similar_users(1)[:10]))\nprint(\"The 5 most similar users to user 3933 are: {}\".format(find_similar_users(3933)[:5]))\nprint(\"The 3 most similar users to user 46 are: {}\".format(find_similar_users(46)[:3]))",
"The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 131, 3870, 46, 4201, 5041]\nThe 5 most similar users to user 3933 are: [1, 23, 3782, 4459, 203]\nThe 3 most similar users to user 46 are: [4201, 23, 3782]\n"
]
],
[
[
"`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. ",
"_____no_output_____"
]
],
[
[
"def get_article_names(article_ids, df=df):\n '''\n INPUT:\n article_ids - (list) a list of article ids\n df - (pandas dataframe) df as defined at the top of the notebook\n \n OUTPUT:\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the title column)\n '''\n # Your code here\n article_names = []\n \n # select articles with the same article_id and drop duplicates\n article_names = df[df['article_id'].isin(article_ids)]['title'].drop_duplicates().values.tolist()\n \n return article_names # Return the article names associated with list of article ids\n\n\ndef get_user_articles(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '''\n # Your code here\n user_idx = user_item.loc[user_id, :] #get all articles for this user id\n article_ids = user_idx[user_idx == 1].index.values.astype('str').tolist() #get articles user interacted with\n article_names = get_article_names(article_ids) # get article names\n \n return article_ids, article_names # return the ids and names\n\n\ndef user_user_recs(user_id, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n Users who are the same closeness are chosen arbitrarily as the 'next' user\n \n For the user where the number of recommended articles starts below m \n and ends exceeding m, the last items are chosen arbitrarily\n \n '''\n # Your code here\n most_similar_users = find_similar_users(user_id) # get most similar users\n user_article_ids = set(get_user_articles(user_id)[0]) # get article ids \n \n recs = []\n # create recommendations for this user\n for user_neighb in most_similar_users:\n neighb_article_ids = set(get_user_articles(user_neighb)[0])\n recs += list(set(neighb_article_ids) - set(user_article_ids))\n if len(recs) > m:\n break\n \n recs = recs[:m]\n \n return recs # return your recommendations for this user_id ",
"_____no_output_____"
],
[
"# Check Results\nget_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1",
"_____no_output_____"
],
[
"# Test your functions here - No need to change this code - just run this cell\nassert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0'])\nassert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])\nassert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])\nassert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])\nprint(\"If this is all you see, you passed all of our tests! Nice job!\")",
"If this is all you see, you passed all of our tests! Nice job!\n"
]
],
[
[
"`4.` Now we are going to improve the consistency of the **user_user_recs** function from above. \n\n* Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.\n\n\n* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.",
"_____no_output_____"
]
],
[
[
"def get_top_sorted_users(user_id, df=df, user_item=user_item):\n '''\n INPUT:\n user_id - (int)\n df - (pandas dataframe) df as defined at the top of the notebook \n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '''\n # Your code here\n colName = ['neighbor_id', 'similarity', 'num_interactions'] # column names\n neighbors_df = pd.DataFrame(columns= colName) # create dataframe to hold top users\n \n # populate the dataframe\n for id in user_item.index.values:\n if id != user_id:\n neighbor_id = id\n \n # get user to user similarity\n similarity = user_item[user_item.index == user_id].dot(user_item.loc[id].T).values[0]\n \n # get number of interactions for user ot article\n num_interactions = user_item.loc[id].values.sum()\n neighbors_df.loc[neighbor_id] = [neighbor_id, similarity, num_interactions]\n \n neighbors_df['similarity'] = neighbors_df['similarity'].astype('int')\n neighbors_df['neighbor_id'] = neighbors_df['neighbor_id'].astype('int')\n neighbors_df = neighbors_df.sort_values(by = ['similarity', 'neighbor_id'], ascending = [False, True])\n \n return neighbors_df # return the dataframe\n\n\ndef user_user_recs_part2(user_id, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n '''\n \n # Your code here\n # get similar users\n neighbours = get_top_sorted_users(user_id)\n top_similar_users = neighbours['neighbor_id'].values.tolist()\n recs = [] # recommended article Id's\n \n # get articles read by the user\n user_article_ids = list(set(get_user_articles(user_id)[0]))\n \n for neighbour_id in top_similar_users:\n recs += df[df['user_id'] == neighbour_id]['article_id'].values.tolist()\n \n recs = list(set(recs))\n \n # selecting articles not seen by User_id\n recs = [ x for x in recs if x not in user_article_ids]\n \n recs_df = df[df.article_id.isin(recs)][['article_id', 'title']].drop_duplicates().head(m) \n recs = recs_df['article_id'].values.tolist() # get ids\n rec_names = recs_df['title'].values.tolist() # get title\n \n \n return recs, rec_names",
"_____no_output_____"
],
[
"# Quick spot check - don't change this code - just use it to test your functions\nrec_ids, rec_names = user_user_recs_part2(20, 10)\nprint(\"The top 10 recommendations for user 20 are the following article ids:\")\nprint(rec_ids)\nprint()\nprint(\"The top 10 recommendations for user 20 are the following article names:\")\nprint(rec_names)",
"The top 10 recommendations for user 20 are the following article ids:\n[1430.0, 1314.0, 1429.0, 1338.0, 1276.0, 1432.0, 593.0, 1185.0, 993.0, 14.0]\n\nThe top 10 recommendations for user 20 are the following article names:\n['using pixiedust for fast, flexible, and easier data analysis and experimentation', 'healthcare python streaming application demo', 'use deep learning for image classification', 'ml optimization using cognitive assistant', 'deploy your python model as a restful api', 'visualize data with the matplotlib library', 'upload files to ibm data science experience using the command line', 'classify tumors with machine learning', 'configuring the apache spark sql context', 'got zip code data? prep it for analytics. – ibm watson data lab – medium']\n"
]
],
[
[
"`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.",
"_____no_output_____"
]
],
[
[
"### Tests with a dictionary of results\nuser1_most_sim = get_top_sorted_users(1).iloc[0].neighbor_id # Find the user that is most similar to user 1 \nuser131_10th_sim = get_top_sorted_users(131).iloc[9].neighbor_id # Find the 10th most similar user to user 131",
"_____no_output_____"
],
[
"## Dictionary Test Here\nsol_5_dict = {\n 'The user that is most similar to user 1.': user1_most_sim, \n 'The user that is the 10th most similar to user 131': user131_10th_sim,\n}\n\nt.sol_5_test(sol_5_dict)",
"This all looks good! Nice job!\n"
]
],
[
[
"`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.",
"_____no_output_____"
],
[
"**Provide your response here.**",
"_____no_output_____"
],
[
"I will choose ```user_user_recs_part2```. It's a good start to recommend articles from the most active users and make sure these articles are the most interacted articles as well. For new users, we can ask them about their preferences, then recommend top articles that are matching this preference. Once we have more data on them, we can move to matrix factorization. ",
"_____no_output_____"
],
[
"`7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.",
"_____no_output_____"
]
],
[
[
"new_user = '0.0'\n\n# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.\n# Provide a list of the top 10 article ids you would give to \nnew_user_recs = get_top_article_ids(10) # Your recommendations here",
"_____no_output_____"
],
[
"assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), \"Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users.\"\n\nprint(\"That's right! Nice job!\")",
"That's right! Nice job!\n"
]
],
[
[
"### <a class=\"anchor\" id=\"Content-Recs\">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a>\n\nAnother method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. \n\n`1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations.\n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.",
"_____no_output_____"
]
],
[
[
"def tokenize(x):\n '''\n Tokenize a string into words. \n \n Args:\n x(string): string to tokezine.\n \n Returns:\n (list): list of lemmatized words\n '''\n # get stop words\n stop_words = (set(stopwords.words('english')) | set(nltk.corpus.abc.words()))\n tokens = word_tokenize(x) # split each article title into individual words\n lemmatizer = WordNetLemmatizer()\n clean_tokens=[]\n for token in tokens:\n #clean each token from whitespace and punctuation, and conver to root word\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n filtered = [word for word in clean_tokens if word not in stop_words and word.isalpha()]\n return filtered",
"_____no_output_____"
],
[
"def make_content_recs(data_id, user_id=True, m=10, df=df):\n '''\n This recommender goes through each article title and nltk library to finds the most common words\n (related to content) throughout all the articles.\n \n The recommender will look at the sums of words in the title of each article \n and based on the number of matches and popularity of an article.\n \n Args:\n data_id (str) - id of either user or article\n user_id (bool) - if true, make recs based on user\n m (int) - number of recommendations to give based on term\n Returns:\n recs (list) - list of article ids that are recommended\n rec_names (list) - list of article names that are recommended \n '''\n \n if(user_id):\n user_id = data_id\n try:\n # get past articles read by the user\n article_ids, _ = get_user_articles(user_id)\n except KeyError: # user does not exist\n print('User Doesn\\'t Exist, Recommending Top Articles')\n recs = get_top_article_ids(m)\n return recs, get_article_names(recs)\n \n else:\n article_ids = data_id\n \n title_data = df.drop_duplicates(subset='article_id') #drop duplicates \n titles = title_data[title_data.article_id.isin(list(map(float, article_ids)))].title # get article titles\n \n #tokenize the words in each article title\n title_words=[]\n tokenized = tokenize(titles.str.cat(sep=' '))\n title_words.extend(tokenized)\n \n #find the highest occuring words\n common_words = pd.value_counts(title_words).sort_values(ascending=False)[:10].index\n\n top_matches={}\n # measure of similarity: count number of occurences of each common word in other article titles\n for word in common_words:\n word_count = pd.Series(title_data.title.str.count(word).fillna(0)) #gets occurences of each word in title\n top_matches[word] = word_count\n \n # most common words\n top_matches = pd.DataFrame(top_matches) \n top_matches['top_matches'] = top_matches.sum(axis=1)\n top_matches['article_id'] = title_data.article_id.astype(float)\n \n # get most interacted with articles\n article_occurences = pd.DataFrame({'occurences':df.article_id.value_counts()})\n\n # sort matches by most popular articles\n top_matches = top_matches.merge(article_occurences, left_on='article_id', right_index=True)\n top_matches.sort_values(['top_matches', 'occurences'], ascending=False, inplace=True) \n \n # drop already read articles\n recs_df = top_matches[~top_matches.article_id.isin(list(map(float, article_ids)))]\n \n # get rec id and names\n recs = recs_df.article_id[:m].values.astype(str)\n rec_names = get_article_names(recs)\n \n return recs, rec_names",
"_____no_output_____"
]
],
[
[
"`2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender?\n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.\n\n\nThis content based recommender scans through previously interacted articles. The nltk library finds the most common words in the titles of each article.\n\nBased on these most common words, the recommender looks at the sums of words relevant words in the title of each article, and based on the number of matches in the titles as well as the general popularity of the article it gives back the best recommendations.\n\nIf the user has not read any articles yet, then we can't really give any content based recommendations, and just return back some of the most popular articles.\n\nThere is a lot of potential improvement and optimization for this recommender. For example one could construct a custom NLTK corpus which would filter out article words. Currently I use a combination of a couple standard NLTK corpora. Furthermore, If df_content had information for all articles we could expand this recommender to look through not only the title but also the body of the articles.",
"_____no_output_____"
],
[
"**Write an explanation of your content based recommendation system here.**",
"_____no_output_____"
],
[
"`3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations.\n\nWe are using the NLTK library to search for articles with similar keywords. If the user has no history yet, then no content-based recommendation is given, and we will return some of the most popular articles.\n\nWe can improve this further by looking at similar keywords but semantically not just the exact keyword. Also, if the user doesn't like a specific article because it has deep learning contents, it doesn't mean that he or she will disklike every article with deep learning content. It will be interesting to augment content-based recommendation with some ML algorithm that can handle such situations. \n\n### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.",
"_____no_output_____"
]
],
[
[
"# make recommendations for a brand new user\nmake_content_recs('0.0', user_id=True)",
"User Doesn't Exist, Recommending Top Articles\n"
],
[
"# make a recommendations for a user who only has interacted with article id '1427.0'\nmake_content_recs(['1427.0'], user_id=False)",
"_____no_output_____"
]
],
[
[
"### <a class=\"anchor\" id=\"Matrix-Fact\">Part V: Matrix Factorization</a>\n\nIn this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.\n\n`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. ",
"_____no_output_____"
]
],
[
[
"# Load the matrix here\nuser_item_matrix = pd.read_pickle('user_item_matrix.p')",
"_____no_output_____"
],
[
"# quick look at the matrix\nuser_item_matrix.head()",
"_____no_output_____"
]
],
[
[
"`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.",
"_____no_output_____"
]
],
[
[
"# Perform SVD on the User-Item Matrix Here\nu, s, vt = np.linalg.svd(user_item_matrix) # use the built in to get the three matrices",
"_____no_output_____"
],
[
"print(\"Number of Nans in the users to item interactions matrix is: {}\".format(np.isnan(user_item_matrix).sum().sum()))\nprint(\"Number of Nans in the users to latent features matrix is: {}\".format(np.isnan(u).sum().sum()))\nprint(\"Number of Nans in the segma matrix is: {}\".format(np.isnan(s).sum().sum()))\nprint(\"Number of Nans in the items to latent features matrix is: {}\".format(np.isnan(vt).sum().sum()))",
"Number of Nans in the users to item interactions matrix is: 0\nNumber of Nans in the users to latent features matrix is: 0\nNumber of Nans in the segma matrix is: 0\nNumber of Nans in the items to latent features matrix is: 0\n"
]
],
[
[
"**Provide your response here.**\n\n",
"_____no_output_____"
],
[
"We can use Singular Value Decomposition because **there are no missing values (NANs) in our data.**",
"_____no_output_____"
],
[
"`3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.",
"_____no_output_____"
]
],
[
[
"num_latent_feats = np.arange(10,700+10,20)\nsum_errs = []\n\nfor k in num_latent_feats:\n # restructure with k latent features\n s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]\n \n # take dot product\n user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))\n \n # compute error for each prediction to actual value\n diffs = np.subtract(user_item_matrix, user_item_est)\n \n # total errors and keep track of them\n err = np.sum(np.sum(np.abs(diffs)))\n sum_errs.append(err)\n \nplt.figure(figsize=(15,10))\nplt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);\nplt.xlabel('Number of Latent Features');\nplt.ylabel('Accuracy');\nplt.title('Accuracy vs. Number of Latent Features');",
"_____no_output_____"
]
],
[
[
"`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. \n\nUse the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: \n\n* How many users can we make predictions for in the test set? \n* How many users are we not able to make predictions for because of the cold start problem?\n* How many articles can we make predictions for in the test set? \n* How many articles are we not able to make predictions for because of the cold start problem?",
"_____no_output_____"
]
],
[
[
"df_train = df.head(40000)\ndf_test = df.tail(5993)\n\ndef create_test_and_train_user_item(df_train, df_test):\n '''\n INPUT:\n df_train - training dataframe\n df_test - test dataframe\n \n OUTPUT:\n user_item_train - a user-item matrix of the training dataframe \n (unique users for each row and unique articles for each column)\n user_item_test - a user-item matrix of the testing dataframe \n (unique users for each row and unique articles for each column)\n test_idx - all of the test user ids\n test_arts - all of the test article ids\n \n '''\n # Your code here\n # create user item matrix for the train dataset\n user_item_train = create_user_item_matrix(df_train)\n \n # create the test dataset\n user_item_test = create_user_item_matrix(df_test)\n \n # get the ids of the train dataset and test dataset\n train_idx = set(user_item_train.index)\n test_idx = set(user_item_test.index)\n \n # get shared rows\n shared_rows = train_idx.intersection(test_idx)\n \n # get columns in train and test datasets\n train_arts = set(user_item_train.columns)\n test_arts = set(user_item_test.columns)\n \n # get shared columns\n shared_cols = train_arts.intersection(test_arts)\n \n # Creating new user-item matrix for tets with common values\n user_item_test = user_item_test.ix[shared_rows, shared_cols]\n \n return user_item_train, user_item_test, test_idx, test_arts\n\nuser_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:41: DeprecationWarning: \n.ix is deprecated. Please use\n.loc for label based indexing or\n.iloc for positional indexing\n\nSee the documentation here:\nhttp://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated\n"
],
[
"print(user_item_test.shape[0])\nprint(len(test_idx) - user_item_test.shape[0])\nprint(user_item_test.shape[1])\nprint(len(test_arts) - user_item_test.shape[1])",
"20\n662\n574\n0\n"
],
[
"# Replace the values in the dictionary below\na = 662 \nb = 574 \nc = 20 \nd = 0 \n\n\nsol_4_dict = {\n 'How many users can we make predictions for in the test set?': c, \n 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, \n 'How many movies can we make predictions for in the test set?': b,\n 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d\n}\n\n# this should be article not movies. it was bugging me and wasted some time on it\nt.sol_4_test(sol_4_dict)",
"Awesome job! That's right! All of the test movies are in the training data, but there are only 20 test users that were also in the training set. All of the other users that are in the test set we have no data on. Therefore, we cannot make predictions for these users using SVD.\n"
]
],
[
[
"`5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.\n\nUse the cells below to explore how well SVD works towards making predictions for recommendations on the test data. ",
"_____no_output_____"
]
],
[
[
"# fit SVD on the user_item_train matrix\nu_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below",
"_____no_output_____"
],
[
"# Use these cells to see how well you can use the training \n# decomposition to predict on test data\ndef svd_algorithm(u_train, s_train, vt_train):\n \"\"\" Return the results of the svd algorithm.\n \n Args:\n u_train (np.array): user item interaction matrix \n s_train (np.array): sigma matrix\n vt_train (np.array): v transpose matrix\n \n Returns:\n Dataframe: dataframe of user article interaction counts sorted in descending order\n \n \"\"\"\n\n num_latent_feats = np.arange(10,700+10,20)\n sum_errs_train = []\n sum_errs_test = []\n all_errs = []\n\n for k in num_latent_feats:\n # ge u_test and vt_test\n row_idxs = user_item_train.index.isin(test_idx)\n col_idxs = user_item_train.columns.isin(test_arts)\n u_test = u_train[row_idxs, :]\n vt_test = vt_train[:, col_idxs]\n \n # split data \n s_train_lat, u_train_lat, vt_train_lat = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :]\n u_test_lat, vt_test_lat = u_test[:, :k], vt_test[:k, :]\n\n # dot product:\n user_item_train_preds = np.around(np.dot(np.dot(u_train_lat, s_train_lat), vt_train_lat))\n user_item_test_preds = np.around(np.dot(np.dot(u_test_lat, s_train_lat), vt_test_lat))\n all_errs.append(1 - ((np.sum(user_item_test_preds)+np.sum(np.sum(user_item_test))) \\\n /(user_item_test.shape[0]*user_item_test.shape[1])))\n\n # calculate the error of each prediction\n diffs_train = np.subtract(user_item_train, user_item_train_preds)\n diffs_test = np.subtract(user_item_test, user_item_test_preds)\n\n # get total Error\n err_train = np.sum(np.sum(np.abs(diffs_train)))\n err_test = np.sum(np.sum(np.abs(diffs_test)))\n\n sum_errs_train.append(err_train)\n sum_errs_test.append(err_test)\n \n # plot accuracy for train and test vs number of latent features\n plt.figure(figsize=(15,10))\n\n # latent features and training\n plt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/(user_item_train.shape[0]*user_item_test.shape[1]), label='Train', color='darkred')\n\n # latent features and testing\n plt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/(user_item_test.shape[0]*user_item_test.shape[1]), label='Test', color='darkblue')\n\n plt.plot(num_latent_feats, all_errs, label='Total Error', color = \"orange\")\n plt.xlabel('Number of Latent Features')\n plt.ylabel('Accuracy')\n plt.legend();",
"_____no_output_____"
],
[
"# call the svd algorithm\nsvd_algorithm(u_train, s_train, vt_train)",
"_____no_output_____"
]
],
[
[
"`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? ",
"_____no_output_____"
],
[
"**Your response here.**\n\n- Test accuracy decreases as the number of latent features increases for the testing dataset.\n- In this project, only 20 users had records of old interactions.\n- To solve the cold-start problem, we can deploy rank based recommendation method or content based recommendation.",
"_____no_output_____"
],
[
"<a id='conclusions'></a>\n### Extras\nUsing your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here!\n\n\n## Conclusion\n\n> Congratulations! You have reached the end of the Recommendations with IBM project! \n\n> **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/#!/rubrics/2322/view). You should also probably remove all of the \"Tips\" like this one so that the presentation is as polished as possible.\n\n\n## Directions to Submit\n\n> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).\n\n> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.\n\n> Once you've done this, you can submit your project by clicking on the \"Submit Project\" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations! ",
"_____no_output_____"
]
],
[
[
"from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a21cd77be33e0a975e988c119829dbe0fa64192
| 591,544 |
ipynb
|
Jupyter Notebook
|
Part01CleaningData.ipynb
|
radviandeploy/jakpartment_deploy
|
1bcae8f585dd86bdc6e54822e655f0bca6d8d690
|
[
"Apache-2.0"
] | null | null | null |
Part01CleaningData.ipynb
|
radviandeploy/jakpartment_deploy
|
1bcae8f585dd86bdc6e54822e655f0bca6d8d690
|
[
"Apache-2.0"
] | null | null | null |
Part01CleaningData.ipynb
|
radviandeploy/jakpartment_deploy
|
1bcae8f585dd86bdc6e54822e655f0bca6d8d690
|
[
"Apache-2.0"
] | 3 |
2020-11-28T19:26:29.000Z
|
2021-11-20T09:47:46.000Z
| 151.367451 | 99,564 | 0.862161 |
[
[
[
"# Part 1: Data Wrangling",
"_____no_output_____"
],
[
"## Introduction\n\nThis project is a self-made end to end machine learning project in which I scrape a website called 'Jendela 360'. The scraped dataset is saved in a csv file named 'Apartment Data Raw'. The dataset contains the details of apartment units available to be rented in Jakarta and its surrouding (Jabodetabek region) on December 2nd, 2020. The data discussed here might not be up-to-date. \n\nProblem Statement of this project:\n\"Based on the scraped data of apartments in Jakarta and its surrounding, the writer aims to construct a machine learning model to predict the annual rent price of apartment units. If possible, the writer aims to find which feature/factors has the most immpact on an apartment unit's annual rent price.\"\n\nIn the first notebook, we are going to load the raw dataset and conduct data wrangling to draw insights and clean the data. Our goal is to have a cleaned dataset at the end of this notebook, so we can use the cleaned data to create and test regression models in the second notebook.\n\nLast but not least, this project is non-profit and made for learning purposes only.",
"_____no_output_____"
],
[
"## Importing Packages",
"_____no_output_____"
]
],
[
[
"# Essentials\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport random\n\n# Plots\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Importing Dataset",
"_____no_output_____"
]
],
[
[
"raw_df = pd.read_csv('Web Scraping/Apartment Dataset Raw.csv')\n#We are going to save the unaltered dataset as raw_df, and use the dataframe 'df' to do the next data wrangling operations\n\ndf = raw_df\ndf.head()",
"_____no_output_____"
],
[
"df = df.rename({'Unnamed: 0' : 'Index'}, axis = 'columns')",
"_____no_output_____"
]
],
[
[
"## Data Cleaning",
"_____no_output_____"
],
[
"### Raw Data Shape and Column Description",
"_____no_output_____"
]
],
[
[
"print(df.columns)",
"Index(['Index', 'URL', 'Unit_Name', 'Unit_ID', 'Apt_Name', 'No_Rooms',\n 'Bathroom', 'Street', 'Locality', 'Region', 'Longitude', 'Latitude',\n 'Furnished', 'Area', 'Floor', 'Tower', 'AC', 'Water_Heater',\n 'Dining_Set', 'Electricity', 'Bed', 'Access_Card', 'Kitchen', 'Fridge',\n 'Washing_Machine', 'TV', 'ATM', 'TV_Cable', 'Grocery', 'Internet',\n 'Swim_Pool', 'Laundry', 'Security', 'Basketball', 'Multipurpose_Room',\n 'Gym', 'Jogging', 'Tennis', 'Restaurant', 'Playground',\n 'Total_Facilities', 'Currency', 'Annual_Price', 'Monthly_Price',\n 'Deposit_Currency', 'Deposit_Charge', 'Service_Currency',\n 'Service_Charge'],\n dtype='object')\n"
],
[
"df.shape",
"_____no_output_____"
]
],
[
[
"Each row represents a unique unit of apartment which was displayed on Jendela 360 for rent on 18th October 2020. We have 5339 rows and 47 columns. The columns represent various characteristics of each unit, and is described as follows.\n\nThe following columns describe the identification data of each unit (location, name, etc).\n* Index: the index of each row (self-ecplanatory) starting at 0.\n* URL: the URL each apartment unit page on Jendela 360 website.\n* Unit_Name: the apartment unit name on its page.\n* Unit_ID: the ID of each page (the last seven characters of the URL). Unique for each apartment unit.\n* Apt_Name: the apartment building name of the unit. \n* Street: the street address of the unit.\n* Locality: the local district of the unit.\n* Region: the city of the unit.\n* Longitude and Latitude: the geographical longitude and latitude coordinate of the unit\n* Floor: the floor location of the unit.\n* Tower: the name of the tower in which the unit is located in.\n\nThe following columns describe the facilities of each apartment unit. The two columns which houses numerical (quantitative) data about each apartment unit's facilities are:\n* No_Rooms: the number of bedrooms in each apartment unit.\n* Area: the area in meter suqared of each apartment unit.\n\nThe other columns which describe the facilities of each unit are categorical in nature. The value of each column is '1' if the facility is present, and '0' if the facility is not present. These columns are:\n* Furnished (1 represents that the unit is fully furnished, and vice versa)\n* AC\n* Water_Heater\n* Dining_Set\n* Electricity \n* Bed\n* Access_Card\n* Kitchen\n* Fridge\n* Washing_Machine\n* TV\n* ATM\n* TV_Cable\n* Grocery \n* Internet\n* Swim_Pool (swimming pool)\n* Laundry\n* Security\n* Basketball (basketball field)\n* Multipurpose_room\n* Gym\n* Jogging (jogging track)\n* Tennis (tennis field)\n* Restaurant\n* Playground\n\nThe following columns describe the fee of each unit. The only fee that each apartment has is the annual rent price. Not all apartment units are available to be rented on a monthly term. There are also cases where the deposit and service charges are not listed. Furthermore, it will be very easy to predict the annual price if we know the monthly price, as we just need to multiply it by 12. That's why we are going to remove every fee column in the dataset and only take the annual rent price (in rupiah) as the dependent variable of our model.\n\n* Currency: the currency unit of the listed price.\n* Monthly_Price: the monthly payment fee if the tenant wishes to rent it on monthly term.\n* Annual_Price: the annual payment fee if the tenant wishes to rent it on yearly term.\n* Deposit_Currency: the currency unit of the listed deposit charge.\n* Deposit_Charge: the initial deposit charge.\n* Service_Currency: the currency unit of the service charge.\n* Service_Charge: the service charge of the unit.",
"_____no_output_____"
],
[
"### Omiting ERROR Rows\n\nThe web scraper uses a ```try:...except:``` block to keep on reading and scraping new pages even if the current iteration raises an error. This is done so the scraping process could be automated, and if a web page raises an error, we don't have to restart the scraping process from the beginning again. If a page raises an error, the whole row (except the URL) will be filled with the string 'ERROR'. The best way to find 'ERROR' rows is to find which rows that have an 'ERROR' Apt_Name column, as that is the features that exists in all apartment unit web pages.\n\nIn this step, we are going to remove all 'ERROR' rows.",
"_____no_output_____"
]
],
[
[
"df.shape",
"_____no_output_____"
],
[
"df = df[df.Apt_Name != 'ERROR']",
"_____no_output_____"
],
[
"df = df.reset_index(drop = True, inplace=False)\ndf.shape",
"_____no_output_____"
]
],
[
[
"We can see that there are 18 rows which are omitted. These rows are the 'ERROR' rows.",
"_____no_output_____"
],
[
"### Identifying the Dependent/Outcome Variable",
"_____no_output_____"
],
[
"Referring to the initial problem statement of this project, we hereby decide that the annual rent price of the apartment will be our dependent variable for the regression model. Furthermore, we should not look at the values from monthly price, deposit charge, and service charge, as we would like to predict the annual rent price only using the apartment unit's identification data (location) and facilities. \n\nAfter deciding which variable will be our outcome variable, we should make sure that the annual price data is in the same currency unit. If the currency of the annual rent price is in dollars, we have to convert it to Rupiah.\n\nThe assumption used is that 1 USD = 14,700 IDR.",
"_____no_output_____"
]
],
[
[
"df.Currency.value_counts()",
"_____no_output_____"
]
],
[
[
"We see that there are 5200 apartment unit rent prices which are listed in Rupiah, 57 prices which are listed in US Dollars. We need to convert the price of these 57 apartment units from USD to IDR. To convert it, we need to multiply the Annual_Price value by 14700 if the value of Currency equals to 'USD'. However, before doing any of that, we need to make sure that the values in Annual_Price columns are read as numbers by pandas. ",
"_____no_output_____"
]
],
[
[
"df.Annual_Price",
"_____no_output_____"
]
],
[
[
"As we can see, the 'Annual_Price' has the data type of object. This means we need to convert it to float first, before multiplying it by 14700 if it is in USD to convert it properly. ",
"_____no_output_____"
]
],
[
[
"Rupiah_Annual_Price = list()\ncurrency_changed = 0\nfor i, price in enumerate(df.Annual_Price):\n if df.Currency[i] == 'USD':\n Rupiah_Annual_Price.append(float(price)*14700)\n currency_changed += 1\n else:\n Rupiah_Annual_Price.append(float(price))\n \ndf['Rupiah_Annual_Price'] = Rupiah_Annual_Price\nprint(currency_changed)",
"52\n"
]
],
[
[
"The currency_changed counter is used to tell us how many currency conversion has been done, and we are glad to see that there are 57 currency conversions, which is the same number of 'USD' occurences in the 'Currency' column of our dataset.\n\nNext, we are going to remove the columns which are no longer needed ('Currency', 'Annual_Price' 'Monthly_Price', 'Deposit_Currency', 'Deposit_Charge', 'Service_Currency', 'Service_Charge'). \n\nWe are then renaming the 'Rupiah_Annual_Price' to 'AnnualPrice'.",
"_____no_output_____"
]
],
[
[
"df = df.drop(['Currency', 'Annual_Price', 'Monthly_Price', 'Deposit_Currency', 'Deposit_Charge', 'Service_Currency', \n 'Service_Charge'], axis = 'columns')\ndf = df.rename({'Rupiah_Annual_Price':'AnnualPrice'}, axis = 'columns')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis",
"_____no_output_____"
]
],
[
[
"df.columns",
"_____no_output_____"
]
],
[
[
"In this step, we are going to do some data exploration to gain more insights on our dataset. First, we'll drop columns which we think might not be insightful for our model. We'll drop the 'Street' and 'Tower' column as it's quite difficult to parse and does not supply us with any insightful information. The 'Street' column is irrelevant as we have 'Locality', 'Region', as well as 'Longitude' & 'Latitude' column to draw geospatial insights form. The 'Tower' column is dropped because it's the name of the tower of each unit, and each apartment complex has different tower names. We suspect that the 'Unit_Name' and 'Apt_Name' might be dropped too, but we'll inspect them in a little bit to see if there are any insights we can draw from those columns. \n\nNote: We'll keep the 'URL' and 'Unit_ID' until we finish exploring the data in case we want to check on specific apartment units.",
"_____no_output_____"
]
],
[
[
"df = df.drop(['Street', 'Tower'], axis = 'columns')",
"_____no_output_____"
]
],
[
[
"Next, we are going to inspect the 'Unit_Name' and 'Apt_Name' columns.",
"_____no_output_____"
]
],
[
[
"df[['Apt_Name', 'Unit_Name']].head()",
"_____no_output_____"
]
],
[
[
"It seems that the 'Apt_Name' column just indicates the overall name of our Apartment complex, while the 'Unit_Name' mentions the number of bedrooms, and in some case, the furnished status of the apartment. Interestingly, the furnished status in 'Unit_Name' are divided into three levels: 'Non Furnished', 'Semi Furnished', and 'Fully Furnished'. However, in our 'Furnished' column, there are only two levels: 'Non Furnished' and 'Fully Furnished'.\n\nWe can add a new level to our 'Furnished' feature by creating a 'Semi Furnished' level if the 'Unit_Name' of a particular row has the word 'semi' in it. We'll create a new column called 'FurnishedNew' for this feature.",
"_____no_output_____"
]
],
[
[
"FurnishedNew = list()\nfor i in range(len(df['Index'])):\n if df.Furnished[i] == '1':\n FurnishedNew.append('Full')\n elif df.Furnished[i] == '0':\n if 'semi' in df.Unit_Name[i].lower():\n FurnishedNew.append('Semi')\n else:\n FurnishedNew.append('Non')\ndf['FurnishedNew'] = FurnishedNew",
"_____no_output_____"
],
[
"df.FurnishedNew.value_counts()",
"_____no_output_____"
]
],
[
[
"We'll see if this new feature is better than the existing 'Furnished' column. If this feature makes the model worse, then we'll simply use the two level 'Furnished' feature. We'll then drop the 'Apt_Name' and 'Unit_Name' column.",
"_____no_output_____"
]
],
[
[
"df = df.drop(['Unit_Name', 'Apt_Name'], axis = 'columns')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"Next, we are going to analyse each column and see if it is a good feature for our model or not. We will be plotting each feature against the predicted value, the 'AnnualPrice'. While there are other ways to perform feature selection which are relatively more automated, the writer chooses to do this to gain more insights personally on the dataset.",
"_____no_output_____"
],
[
"#### Number of Bedrooms",
"_____no_output_____"
]
],
[
[
"bedroom_df = df[['URL','No_Rooms', 'AnnualPrice']]\nbedroom_df.No_Rooms.value_counts()",
"_____no_output_____"
]
],
[
[
"The apartment units in our dataset have 0 till 6 'number of bedrooms'. What does '0' number of bedroom means? During the scraping process, the writer discover that studio apartment units are written as having '0' number of bedrooms in the ```.json``` schema of the web page. We can then use ```df.groupby``` to see the average annual rent price of each category.",
"_____no_output_____"
]
],
[
[
"avg_no_rooms = bedroom_df.groupby('No_Rooms')['AnnualPrice'].mean().reset_index().rename({'AnnualPrice':'Average Annual Price'}, axis = 'columns')\nprint(avg_no_rooms)\navg_no_rooms.plot(x = 'No_Rooms', y = 'Average Annual Price', kind = 'bar', figsize = [5,5])",
" No_Rooms Average Annual Price\n0 0 7.622950e+08\n1 1 8.501359e+07\n2 2 1.028243e+08\n3 3 2.196271e+08\n4 4 4.380461e+08\n5 6 1.000000e+09\n"
]
],
[
[
"First column and we're already greeted with a surprise. Why is the studio apartment unit's average price higher than the average price of apartment units with 6 bedrooms? This is why exploring our dataset manually, or the way I prefer to say it - 'personally', is important. This data does not match our common sense, and we need to investigate it. The first thing to do in this situation is try to check for outliers.",
"_____no_output_____"
]
],
[
[
"studio_check = bedroom_df['No_Rooms'] == '0'\nsns.boxplot(x = bedroom_df[studio_check].AnnualPrice)",
"_____no_output_____"
]
],
[
[
"First, we filter the 'AnnualPrice' column by 'No_Rooms' category. After selecting the annual rent prices of apartment units which are studio-typed, we can draw the boxplot using seaborn and we see there are two outliers. Let's check these out.",
"_____no_output_____"
]
],
[
[
"bedroom_df[studio_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
]
],
[
[
"After sorted by Annual Price, the top two apartment units have prices that are clearly beyond what's 'the norm' for studio apartment units. Using ```pd.set_option('display.max_colwidth', None)```, we can get the URL for these two apartment units, and then see for ourselves in their respective page.",
"_____no_output_____"
]
],
[
[
"pd.set_option('display.max_colwidth', None)\nbedroom_df[studio_check].sort_values(by=['AnnualPrice'], ascending=False).head(2).URL",
"_____no_output_____"
]
],
[
[
"Upon looking at the first link, we see that this 25 meter squared, studio apartment, is priced at fifty four million dollars. I think we can see the problem here. There are a few pages in which the currency used is wrong. Even apartment with 6 bedrooms are not priced fifty four million dollars a year. This unit's price should be fifty four million rupiah. \n\nThe second unit in question also shares the problem. This time, the studio apartment is priced at thirty million dollars. We first need to clean this mess before we continue exploring the other columns. Let's also check if other number of bedrooms share the same issue.",
"_____no_output_____"
]
],
[
[
"br2_check = bedroom_df['No_Rooms'] == '2'\nsns.boxplot(x = bedroom_df[br2_check].AnnualPrice)",
"_____no_output_____"
],
[
"bedroom_df[br2_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
]
],
[
[
"Turns out the problem isn't unique to studio apartments. We have to solve this issue first then, and unfortunately this can only be done in a relatively manual manner (checking the URL one by one). I'll get back after resolving this issue.",
"_____no_output_____"
],
[
"#### Finding and Fixing Outliers based on Number of Bedrooms",
"_____no_output_____"
],
[
"Create boolean identifiers",
"_____no_output_____"
]
],
[
[
"studio_check = bedroom_df['No_Rooms'] == '0'\nbr1_check = bedroom_df['No_Rooms'] == '1'\nbr2_check = bedroom_df['No_Rooms'] == '2'\nbr3_check = bedroom_df['No_Rooms'] == '3'\nbr4_check = bedroom_df['No_Rooms'] == '4'\nbr5_check = bedroom_df['No_Rooms'] == '5'\nbr6_check = bedroom_df['No_Rooms'] == '6'",
"_____no_output_____"
]
],
[
[
"Fix for No_Rooms = '0' (Studio-Type)",
"_____no_output_____"
]
],
[
[
"bedroom_df[studio_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
],
[
"df.loc[df.Unit_ID == 'sgpa014', 'AnnualPrice'] = 54000000",
"_____no_output_____"
],
[
"df.loc[df.Unit_ID == 'pgva007', 'AnnualPrice'] = 30000000",
"_____no_output_____"
]
],
[
[
"Fix for No_Rooms = '1' (One Bedroom)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br1_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
],
[
"sns.boxplot(x = bedroom_df[br1_check].AnnualPrice)",
"_____no_output_____"
]
],
[
[
"I think the rent price for 1 bedroom appartment units are skewed to the right. None of the five highest apartment units (of one bedroom) have annual rent prices displayed in dollars. However, we're going to remove one point which is the highest priced apartment unit as it's quite far from the rest of the data points. ",
"_____no_output_____"
]
],
[
[
"i = df[((df.Unit_ID == 'frrb001'))].index\ndf = df.drop(i)",
"_____no_output_____"
]
],
[
[
"Fix for 'No_Rooms' = '2' (Two Bedrooms)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br2_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
],
[
"sns.boxplot(x = bedroom_df[br2_check].AnnualPrice)",
"_____no_output_____"
],
[
"df.loc[df.Unit_ID == 'blmc009', 'AnnualPrice'] = 50400000",
"_____no_output_____"
]
],
[
[
"Fix for 'No_Rooms' = '3' (Three Bedrooms)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br3_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
],
[
"sns.boxplot(x = bedroom_df[br3_check].AnnualPrice)",
"_____no_output_____"
]
],
[
[
"It turns out that the highest bedroom price is still in Rupiah. However, the rightmost data point is considerably far away from the rest of the data points, and we'll consider it as an outlier to be removed.",
"_____no_output_____"
]
],
[
[
"i = df[((df.Unit_ID == 'esdd002'))].index\ndf = df.drop(i)",
"_____no_output_____"
]
],
[
[
"Fix for 'No_Rooms' = '4' (Four Bedrooms)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br4_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
],
[
"sns.boxplot(x = bedroom_df[br4_check].AnnualPrice)",
"_____no_output_____"
]
],
[
[
"Although there seems to be two outliers, upon further checking they don't seem to be a case of misused currency. However, those two rightmost points are considerably far away from the rest of the other data points, and we'll consider them as outliers to be removed. These two prices are even higher than apartment units with 6 bedrooms, and do not represent the norm. ",
"_____no_output_____"
]
],
[
[
"i = df[((df.Unit_ID == 'pkrf001'))].index\ndf = df.drop(i)\ni = df[((df.Unit_ID == 'ppre001'))].index\ndf = df.drop(i)",
"_____no_output_____"
]
],
[
[
"Fix for 'No_Rooms' = '5' (Five Bedrooms)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br5_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
]
],
[
[
"There are no apartment units in our dataset which has five bedrooms. We are not going to remove anything for now.",
"_____no_output_____"
],
[
"Fix for 'No_Rooms' = '6' (Six Bedrooms)",
"_____no_output_____"
]
],
[
[
"bedroom_df[br6_check].sort_values(by=['AnnualPrice'], ascending=False).head(5)",
"_____no_output_____"
]
],
[
[
"There is only one aaprtment unit with six bedrooms. We are not going to remove anything for now - however, we might combine the units with 4, 5, and 6 into one category.",
"_____no_output_____"
]
],
[
[
"br456_check = (bedroom_df.No_Rooms== '4') | (bedroom_df.No_Rooms == '5') | (bedroom_df.No_Rooms == '6')",
"_____no_output_____"
],
[
"sns.boxplot(x = bedroom_df[br456_check].AnnualPrice)",
"_____no_output_____"
]
],
[
[
"#### Checking on the Updated Dataframe for No_Rooms Feature",
"_____no_output_____"
]
],
[
[
"New_No_Rooms = list()\nfor i, br_no in enumerate(df.No_Rooms):\n br_float = int(br_no)\n if br_float >= 4:\n New_No_Rooms.append(4)\n else:\n New_No_Rooms.append(br_float)\n \ndf.drop(['No_Rooms'], axis = 'columns')\ndf['No_Rooms'] = New_No_Rooms",
"_____no_output_____"
],
[
"bedroom_df_updated = df[['URL','No_Rooms', 'AnnualPrice']]\navg_no_rooms = bedroom_df_updated.groupby('No_Rooms')['AnnualPrice'].mean().reset_index().rename({'AnnualPrice':'Average Annual Price'}, axis = 'columns')\nprint(avg_no_rooms)\navg_no_rooms.plot(x = 'No_Rooms', y = 'Average Annual Price', kind = 'bar', figsize = [5,5])",
" No_Rooms Average Annual Price\n0 0 4.913592e+07\n1 1 8.462027e+07\n2 2 1.027738e+08\n3 3 2.181231e+08\n4 4 3.530135e+08\n"
],
[
"sns.boxplot(x = \"No_Rooms\", y = 'AnnualPrice', data = df)",
"_____no_output_____"
]
],
[
[
"There we go. Now it made sense - the more number of bedrooms an apartment unit has, the higher the annual rent price. However, there no apartment units which are priced way above the other units in the same category. Through evaluating outliers and checking on the source data, we have 'cleaned' the 'No_Rooms' feature for now. \n\nThe last step taken for this feature column is grouping the categories of '4', '5', and '6'. There are only 3 units out of our more than 5000 rows which have 5 and 6 bedrooms, and that is not quite representative. \n\nNow, we might ask, why the new category (of units with 4 and more bedrooms) are given the value '4'? Shouldn't it be '4 and more'? \n\nYes. It represents the number of bedrooms of 4 and more. However, this categorical variable will be treated as ordinal variable in the machine learning model. That's why we have to keep the values as integers. We'll just have to keep in our mind later when writing the final report, that the number '4' in No_Rooms feature not only represents units with 4 bedrooms, but also units with more than 4 bedrooms. ",
"_____no_output_____"
],
[
"#### Analyzing Location Feature Columns",
"_____no_output_____"
],
[
"The next part of our features to be discussed are the columns which describe where our unit is on the map. There are four columns being discussed here - two which are categorical ('Locality' and 'Region'), as well as two continuous columns ('Longitude' and 'Latitude'). First let's look at the 'Region' columns.",
"_____no_output_____"
]
],
[
[
"df.Region.value_counts()",
"_____no_output_____"
]
],
[
[
"Whoa. Turns out the scraped pages also includes apartment units from outside Jakarta and its surroundings. To stay true to our problem statement, we'll remove regions outside 'Jabodetabek'. ",
"_____no_output_____"
]
],
[
[
"df = df[(df.Region == 'Jakarta Selatan') | (df.Region == 'Jakarta Barat') | (df.Region == 'Jakarta Pusat') | (df.Region == 'Jakarta Timur') | (df.Region == 'Jakarta Utara') | (df.Region == 'Tangerang') | (df.Region == 'Bekasi') | (df.Region == 'Depok') | (df.Region == 'Bogor')]",
"_____no_output_____"
]
],
[
[
"Let's visualize the data using a boxplot again. Now, we're investigating if differences in regions affect annual rent price.",
"_____no_output_____"
]
],
[
[
"dims = (12,8)\nfig, ax = plt.subplots(figsize=dims)\nsns.boxplot(x = \"Region\", y = 'AnnualPrice', data = df, ax=ax)",
"_____no_output_____"
],
[
"JakBar = df['Region'] == 'Jakarta Barat'\ndf[JakBar][['URL', 'AnnualPrice']].sort_values(by = ['AnnualPrice'], ascending=False).head(1)",
"_____no_output_____"
]
],
[
[
"From the visualization, we can see that the region in DKI Jakarta with the highest average annual rent price is 'Jakarta Selatan', followed by 'Jakarta Pusat', 'Jakarta Barat', 'Jakarta Utara', and 'Jakarta Timur' consecutively. Regions outside Jakarta have lower average prices than regions inside Jakarta. This distribution makes sense, as it is quite a common knowledge for Jakartans to know that the region with the highest property price in Jakarta is 'Jakarta Selatan'. \n\nThere seems to be an outlier in 'Jakarta Barat', but upon further checking - it's the only unit with 6 bedrooms, so the price reflects more of its number of rooms than its region. We will not remove this data point for now.",
"_____no_output_____"
],
[
"There are a few options on how we are going to use the locations columns in our model:\n\nOption 1: Uses one hot encoding on Region. This seems to be the go-to-solution if we wishes to make location a categorical variable. We'll divide the area into six major Regions - West, North, South, East, Center Jakarta, and outside Jakarta (we group Bogor, Depok, Tangerang, and Bekasi into one Region).\n\nOption 2: Uses one hot encoding on Locality. There are over 90 different local districts in this data set, and one hot encoding would mean that we'll have 90+ extra feature columns of zeros and ones. Furthermore, a lot of these local districts have only one apartment unit.\n\nOption 3: Uses the 'Longitude' and 'Latitude' column as continuous variables. This could be the case if we notice a pattern on the longitude and latitude data. We could also do clustering algorithm on longitude and latitude data. \n\nWe'll look into the 'Longitude' and 'Latitude' columns first.",
"_____no_output_____"
]
],
[
[
"print(df.Longitude.dtype)\nprint(df.Latitude.dtype)",
"object\nobject\n"
]
],
[
[
"It seems that these two columns are classified as 'object' and not 'float' by Pandas. We need to transform them first.",
"_____no_output_____"
]
],
[
[
"df = df.reset_index(drop = True, inplace=False)\nLongitude_Float = list()\nLatitude_Float = list()\n\nfor i in range(len(df.Index)):\n Longitude_Float.append(float(df.Longitude[i]))\n Latitude_Float.append(float(df.Latitude[i]))\n\ndf.drop(['Longitude', 'Latitude'], axis = 'columns')\ndf['Longitude'] = Longitude_Float\ndf['Latitude'] = Latitude_Float",
"_____no_output_____"
],
[
"df.Longitude.plot()",
"_____no_output_____"
]
],
[
[
"After converting both columns to float, let's visualize each column to analyze if there are any outliers. As the geographical location chosen for this project is quite close to each other, there shouldn't be any outliers. The 'Longitude' dataset makes sense: all our apartment units have Longitude between 106.6 until 107.2.",
"_____no_output_____"
]
],
[
[
"df.Latitude.plot()",
"_____no_output_____"
]
],
[
[
"The 'Latitude' feature column, however, seems to have yet another issue related to an error in data entering. Most of the apartment units have Latitude around -6, which makes sense, as Jakarta (and its surrounding) are located slightly beneath the Equator. Howver, there are a few data points which have latitude of 6. This is suspicious as it could very well be a case of forgetting to add '-' (the negative sign) during data entry process for these apartment units. For now, let's assume this to be the case, and put a negative value on the latitude feature of these apartment units.",
"_____no_output_____"
]
],
[
[
"Latitude_fixed = [la if la<0 else -1*la for la in df.Latitude]\ndf.drop(['Latitude'], axis = 'columns')\ndf['Latitude'] = Latitude_fixed",
"_____no_output_____"
],
[
"df.Latitude.plot()",
"_____no_output_____"
]
],
[
[
"This distribution made more sense as the value of 'Latitude' ranges from -6.6 to -6.1, not a big margin, and the three data points with the lowest 'Latitude' seems to be apartment units outside Jakarta (maybe in Bogor/Depok).",
"_____no_output_____"
],
[
"#### Analyzing Furnished Status Feature Column\n\nNow, let's visualize and take a look at the two columns describing the furnished status of each apartment unit - the original 'Furnished', and our newly created 'FurnishedNew'. ",
"_____no_output_____"
]
],
[
[
"fig, (ax1, ax2) = plt.subplots(1, 2)\nsns.scatterplot(x = \"Furnished\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax1)\nsns.scatterplot(x = \"FurnishedNew\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax2)\nfig.tight_layout()",
"_____no_output_____"
]
],
[
[
"There are two takeaways from this: first, the discrepancy between non-furnished and fully furnished apartment units' prices doesn't seem to be that big. Second, our new column, 'FeatureNew', shows that semi-furnished apartments have lower prices compared to non-furnished ones. \n\nWhat should we make of this? Our new feature column doesn't seem to work well - this might be because not all apartment units which are semi-furnished write that they are 'semi-furnished' in their page name. The population of 'semi-furnished' apartments may be much more than what was being labeled as 'Semi'. This explains two things: why adding an extra category doesn't work well, and why the discrepancy between '0' and '1' is not that far away from each other.\n\nThis could indicate that 'Furnished' is not a good predictor for AnnualPrice, but we'll decide it later in the next feature engineering section.",
"_____no_output_____"
],
[
"#### Analyzing Floor Position of Apartment Units\n\nThe feature column we're looking at this section is the 'Floor' column. We'll see if there are differences in annual rent price between units with different floor positions.",
"_____no_output_____"
]
],
[
[
"sns.boxplot(x = \"Floor\", y = 'AnnualPrice', data = df)",
"_____no_output_____"
]
],
[
[
"Not only the discrepancy among all floor locations seem to be miniscule, we also have quite a few apartment units with no labels of their floor location. For now, let's not use this categorical variable in our model.",
"_____no_output_____"
]
],
[
[
"df = df.drop(['Floor'], axis = 'columns')\ndf = df.reset_index(drop = True, inplace=False)",
"_____no_output_____"
]
],
[
[
"#### Analyzing Area of Units to AnnualPrice",
"_____no_output_____"
]
],
[
[
"Area_Float = list()\n\nfor i in range(len(df.Index)):\n Area_Float.append(float(df.Area[i]))\n\ndf.drop(['Area'], axis = 'columns')\ndf['Area'] = Area_Float",
"_____no_output_____"
],
[
"dims = (8,5)\nfig, ax = plt.subplots(figsize=dims)\nsns.scatterplot(x = \"Area\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax)",
"_____no_output_____"
]
],
[
[
"Based on the above plot, we can see that the general trend is that AnnualPrice increases as Area increases. We also see that as number of bedrooms increases, area also increases. However, there are a few data points which are scattered far from the others that we need to investigate. They could be outliers and we should remove them.",
"_____no_output_____"
]
],
[
[
"df[['URL', 'Area']].sort_values(by=['Area'], ascending = False).head(12)",
"_____no_output_____"
]
],
[
[
"There are six apartment units with areas above 500 meter squared. That's a huge apartment unit - two of them even reaches more than seven thousand meter squared. These units are not what in most people's mind when they're looking to rent an apartment unit - as these units come in the form of condominium or penthouse. We'll be removing these six units from our data set. In the deployment stage of this machine learning model, we'll limit the maximum Area to be 350 meter squared, as that is already a very big apartment unit.",
"_____no_output_____"
]
],
[
[
"i = df[((df.Unit_ID == 'tacc001'))].index\ndf = df.drop(i)\n\ni = df[((df.Unit_ID == 'tacc002'))].index\ndf = df.drop(i)\n\ni = df[((df.Unit_ID == 'kmvd027'))].index\ndf = df.drop(i)\n\ni = df[((df.Unit_ID == 'mqrd023'))].index\ndf = df.drop(i)\n\ni = df[((df.Unit_ID == 'csbe001'))].index\ndf = df.drop(i)\n\ni = df[((df.Unit_ID == 'stme008'))].index\ndf = df.drop(i)",
"_____no_output_____"
],
[
"df = df.reset_index(drop = True, inplace=False)\ndf.shape",
"_____no_output_____"
],
[
"dims = (8,5)\nfig, ax = plt.subplots(figsize=dims)\nsns.scatterplot(x = \"Area\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax)",
"_____no_output_____"
]
],
[
[
"This visualization made more sense as there are no far outliers. However, something we notice is that there are some apartment units which are listed as having 0 Area. We'll simply remove these units as it's impossible for apartment units to have 0 meter squared of area. We'll consider than 20 meter squared is the minimum apartment unit area. \n\nWe'll be also removing apartments with more than 250 meter squared because they don't seem to be that common, and most people's preferences are apartments below 200 meter squared.",
"_____no_output_____"
]
],
[
[
"df = df[df['Area']>20]\ndf = df[df['Area']<250]",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"#### Checking Categorical Facility Features\n\nOur last sets of features are the facilities that each unit has. During the web scraping process, I added a column in which it counts how many of these features that the unit has, and store them in a column called 'Total_Facilities'. Let's first take a look at this column, before diving into other facilities one-by-one.",
"_____no_output_____"
]
],
[
[
"Facilities_Int = list()\nfor i, count in enumerate(df.Total_Facilities):\n Facilities_Int.append(int(count))\n\ndf.drop(['Total_Facilities'], axis = 'columns')\ndf['Total_Facilities'] = Facilities_Int\n\nsns.boxplot(x=\"Total_Facilities\", data = df)",
"_____no_output_____"
],
[
"sns.scatterplot(x=\"Total_Facilities\", y = \"AnnualPrice\", data = df)",
"_____no_output_____"
]
],
[
[
"It seems that most apartment units have at least 10 facilities. The more facilities a unit has, the higher its rent price is. Let's take a look at the units which has features less than 10, and see if they actually have less than 10 features, or there are some errors here.",
"_____no_output_____"
]
],
[
[
"df[['URL', 'Total_Facilities', 'AnnualPrice', 'Furnished']].sort_values(by = ['Total_Facilities'], ascending = True).head(10)",
"_____no_output_____"
]
],
[
[
"The apartment units with low Total_Facilities tend to be Non-Furnished units. However, there's an oddball here - the unit with 0 'Total_Facilities' is a fully-furnished unit! Upon further investigation, based on the photos of the room, there are indeed facilities and it might be some errors in inputing the data (or the unit owner/seller does not describe the facilities fully). We are going to remove that unit from our dataset. As for the other fully-furnished unit with only 3 total facilities, the page and pictures show that it is indeed quite a blank unit. There are beds and sofas - but there is no fancy facilities like TV or Internet. ",
"_____no_output_____"
]
],
[
[
"i = df[((df.Unit_ID == 'spsa001'))].index\ndf = df.drop(i)\ndf = df.reset_index(drop = True, inplace=False)",
"_____no_output_____"
]
],
[
[
"Next, we are going to draw boxplots of each facilities. To recall, if a facility is present in a unit, it will has value '1', if not, it will have the value of '0'. We would like to see if the presence of these facilities impact the annual rent price of apartment units. We'll remove facilities whose existence (or inexistence) does not impact the annual rent price. ",
"_____no_output_____"
]
],
[
[
"fig, (ax1, ax2) = plt.subplots(1, 2)\nsns.scatterplot(x = \"Furnished\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax1)\nsns.scatterplot(x = \"FurnishedNew\", y = 'AnnualPrice', data = df, hue = 'No_Rooms', ax=ax2)\nfig.tight_layout()",
"_____no_output_____"
],
[
"fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8),\n (ax9, ax10, ax11, ax12), (ax13, ax14, ax15, ax16),\n (ax17, ax18, ax19, ax20), (ax21, ax22, ax23, ax24)) = plt.subplots(6, 4, figsize = (15,25))\nfig.suptitle('Facilities and AnnualPrice Visualization')\nfor i, ax in enumerate(fig.get_axes()):\n column = df.columns[i+11]\n sns.boxplot(x = column, y = 'AnnualPrice', data = df, ax = ax)",
"_____no_output_____"
]
],
[
[
"Based on the visualization above, for each facilities, the trend is clear - the presence of facilities affects the unit annual rent price positively. This proves to be quite troublesome when we want to do feature selection - we don't know which facility is less important than the other. We'll keep most facilities for the most part, but we'll reomve two of them right away - 'Electricity' and 'Access Card'. Why? Because most apartment units have them - it's not a 'facility' anymore - it is a necessity. There are 300-400 apartments which are listed as having no 'Electricity', but it doesn't really make sense. We do this because we are thinking about the deployment phase of our model. Our future users won't choose to have an apartment unit without 'Electricity' or 'Access Card'.\n\nThis concludes our first part. To recap, we have:\n- removed uninsightful columns\n- checked and removed outliers\n- fixed abnormal data (latitude and misused currency)\n- visualize features\n\nWe also now have a rough understanding on the annual rent price of apartment units in Jakarta: the most expensive apartments are usually found at Jakarta Selatan - and the more area a unit occupies, the more bedrooms & facilities it has, the higher its annual rent price is. \n\nIn the next part, we are going to:\n- scale numerical features\n- split the dataset into testing and training set\n- create and evaluate baseline model\n- conduct feature engineering based on the feedback gained on baseline model\n- test new models and decide which model is the best",
"_____no_output_____"
]
],
[
[
"df.to_csv('Cleaned Apartment Data.csv')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a21d5ea22dd9b25116fb23ac6f21bd1c4c5b0e6
| 19,908 |
ipynb
|
Jupyter Notebook
|
samples/week02/03-shape-example.ipynb
|
digitalideation/digcre_h2101
|
57a4565ee78550d0448be4719c1f24dc077088bc
|
[
"MIT"
] | null | null | null |
samples/week02/03-shape-example.ipynb
|
digitalideation/digcre_h2101
|
57a4565ee78550d0448be4719c1f24dc077088bc
|
[
"MIT"
] | null | null | null |
samples/week02/03-shape-example.ipynb
|
digitalideation/digcre_h2101
|
57a4565ee78550d0448be4719c1f24dc077088bc
|
[
"MIT"
] | null | null | null | 40.463415 | 4,786 | 0.683695 |
[
[
[
"# Reference\n\nTo run this code you will need to install [Matplotlib](https://matplotlib.org/users/installing.html) and [Numpy](https://www.scipy.org/install.html)\n\nIf you like to run the example locally follow the instructions provided on [Keras website](https://keras.io/#installation)\n\nIt's __strongly__ suggested to use a Python environments manager such as [Conda](https://conda.io/docs/) or some kind of [VirutalEnv](#)\n\n[](https://colab.research.google.com/github/digitalideation/digcre_h2101/blob/master/samples/week02/03-shape-example.ipynb)\n\n---",
"_____no_output_____"
],
[
"# A second look at a neural network\n\nLet's try to adapt the shape classification model built with the `toyNN` in js before.\n\nWe first need to create a dataset _manually_ to do we will define a `draw_shape` function that will help generating some random shape",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport math\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 0 = rectangle, 1 = triangle, 2 = ellipse\n# return shape\ndef draw_shape(max_size, type):\n \n # Random size and fixed coordinate\n# s = math.floor(random.randrange(1, max_size-4))\n# x = math.floor(max_size/2)\n# y = math.floor(max_size/2)\n\n # Not so random size and random coordinate\n s = int(random.randrange(max_size/2, max_size))\n x = int(random.randrange(int(s/2), max_size-int(s/2)))\n y = int(random.randrange(int(s/2), max_size-int(s/2)))\n\n type = type%3\n \n if type == 0:\n art = plt.Rectangle((x-s/2, y-s/2), s, s, color='r')\n\n if type == 1:\n verts = [\n (x-s/2, y-s/2),\n (x, y+s/2),\n (x+s/2, y-s/2)\n ]\n art = plt.Polygon(verts, color='r')\n\n if type == 2:\n art = plt.Circle((x, y), s/2, color='r')\n \n return art",
"_____no_output_____"
]
],
[
[
"We also define a helper function that convert a matplotlib figure to a np array",
"_____no_output_____"
]
],
[
[
"# https://stackoverflow.com/a/7821917\ndef fig2rgb_array(fig):\n fig.canvas.draw()\n buf = fig.canvas.tostring_rgb()\n ncols, nrows = fig.canvas.get_width_height()\n return np.frombuffer(buf, dtype=np.uint8).reshape(nrows, ncols, 3)",
"_____no_output_____"
]
],
[
[
"Let's test the function see if it works as expected",
"_____no_output_____"
]
],
[
[
"# Image and dataset size we are going to use\nimage_size = 48\ndataset_size = 5000\n\n# Create plot's figure and axes\n# https://stackoverflow.com/a/638443\nfig = plt.figure(figsize=(1,1), dpi=image_size)\nax = fig.add_subplot(111)\n\n# Setting for the axes\nax.set_xlim(0,image_size)\nax.set_ylim(0,image_size)\n# ax.axis('off')\n\n# Draw a random shape\nart = draw_shape(image_size,random.randint(0,2))\n# Add the shape to the plot\n# https://stackoverflow.com/a/29184075\nplt.gcf().gca().add_artist(art)\n# gcf() means Get Current Figure\n# gca() means Get Current Axis\n\n# convert the figure to an array\ndata = fig2rgb_array(fig)\nprint(data.shape)",
"(48, 48, 3)\n"
]
],
[
[
"Let's create a loop that will generate a small dataset for us",
"_____no_output_____"
]
],
[
[
"def generate_dataset(image_size, dataset_size):\n\n # Those variable will contain the images and associated labels\n images = np.zeros((dataset_size, image_size, image_size, 3))\n labels = np.zeros((dataset_size))\n \n # The plot figure we will use to generate the shapes\n fig = plt.figure(figsize=(1,1), dpi=image_size)\n\n for i in range(dataset_size):\n \n # Clear the figure\n fig.clf()\n \n # Recreate the axes\n ax = fig.add_subplot(111)\n ax.set_xlim(0, image_size)\n ax.set_ylim(0, image_size)\n ax.axis('off')\n \n # Define label\n label = i%3\n art = draw_shape(image_size, label)\n plt.gcf().gca().add_artist(art)\n \n # Add values to the arrays\n images[i] = fig2rgb_array(fig)\n labels[i] = label\n \n return images, labels\n\n# Generate our dataset\nimages, labels = generate_dataset(image_size, dataset_size)\nprint(images.shape)\nprint(labels.shape)",
"(5000, 48, 48, 3)\n(5000,)\n"
]
],
[
[
"Eventually we can save our dataset for later, since it takes quite some time to generate it 😉",
"_____no_output_____"
]
],
[
[
"np.save('datasets/shape-example-shapes1.npy', images)\nnp.save('datasets/shape-example-labels1.npy', labels)",
"_____no_output_____"
]
],
[
[
"If we need to load it we can then use the following code",
"_____no_output_____"
]
],
[
[
"images = np.load('datasets/shape-example-shapes1.npy')\nlabels = np.load('datasets/shape-example-labels1.npy')",
"_____no_output_____"
]
],
[
[
"We split our dataset manually in training and testing set",
"_____no_output_____"
]
],
[
[
"# Define the size of the training set, here we use 80% of the total samples for training\ntrain_size = int(dataset_size*.8)\n\n# TODO: We should shuffle the dataset\n\n# Split the dataset into train and test dataset\ntrain_images, test_images = images[:train_size], images[train_size:]\ntrain_labels, test_labels = labels[:train_size], labels[train_size:]\n\n# Verify the data\nprint(train_images.shape)\nprint(train_labels.shape)\n\n# sample_images = []\n# for label, image in list(zip(train_labels, train_images))[:10]:\n# fig1, ax1 = plt.subplots()\n# ax1.axis('off')\n# plt.title(label)\n# fig1.add_subplot(111).imshow(image/255)\n\nfull_image = np.concatenate(train_images[:12]/255, axis=1)\nplt.figure(figsize=(16,4))\nplt.imshow(full_image)",
"(4000, 48, 48, 3)\n(4000,)\n"
]
],
[
[
"Now we can create our model",
"_____no_output_____"
]
],
[
[
"from tensorflow import keras\nfrom tensorflow.keras import layers\nmodel = keras.Sequential([\n layers.Dense(512, activation=\"sigmoid\"),\n layers.Dense(3, activation=\"softmax\")\n])",
"_____no_output_____"
]
],
[
[
"And compile it",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"Before training, we will preprocess our data by reshaping it into the shape that the network expects, and scaling it so that all values are in the `[0, 1]` interval. Then we also need to categorically encode the labels.",
"_____no_output_____"
]
],
[
[
"# Reshape data\ntrain_images = train_images.reshape((len(train_images), 3 * image_size * image_size))\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((len(test_images), 3 * image_size * image_size))\ntest_images = test_images.astype('float32') / 255\n\n# Encode to categorical\nfrom tensorflow.keras.utils import to_categorical\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)",
"_____no_output_____"
]
],
[
[
"Then we can start the training",
"_____no_output_____"
]
],
[
[
"model.fit(train_images, train_labels, epochs=100, batch_size=128)",
"_____no_output_____"
],
[
"test_loss, test_acc = model.evaluate(test_images, test_labels)",
"1000/1000 [==============================] - 0s 173us/step\n"
],
[
"print('test_acc:', test_acc)",
"test_acc: 0.9850000143051147\n"
],
[
"for label, image in list(zip(test_labels, test_images)):\n prediction = model.predict(np.array([image,]))\n if not prediction.argmax() == label.argmax():\n image = image.reshape(48, 48, 3)\n fig1, ax1 = plt.subplots()\n ax1.axis('off')\n plt.title('predicted:' + str(prediction.argmax()))\n fig1.add_subplot(111).imshow(image) ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a21dc8aedc9d25948fd3e139d5db6586c449b9b
| 7,791 |
ipynb
|
Jupyter Notebook
|
parrot/examples/.ipynb_checkpoints/indexing-checkpoint.ipynb
|
pytrec/pytrec
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | 14 |
2019-07-21T16:39:43.000Z
|
2021-06-21T09:13:28.000Z
|
parrot/examples/.ipynb_checkpoints/indexing-checkpoint.ipynb
|
pytrec/pytrec
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | null | null | null |
parrot/examples/.ipynb_checkpoints/indexing-checkpoint.ipynb
|
pytrec/pytrec
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | 1 |
2019-05-16T17:55:36.000Z
|
2019-05-16T17:55:36.000Z
| 48.092593 | 1,439 | 0.628802 |
[
[
[
"import os,sys\nmodule_path = os.path.abspath(os.path.join('/Users/tu/Desktop/trec/Parrot/venv/src'))\nsys.path.append(module_path)\nfrom parrot.indexing import *\n\nindex_folder = \"/Users/tu/Desktop/trec/task1/ap90-15/\"\ndoc_folder = \"/Users/tu/Desktop/trec/collection/disk3/\"\ndoc_patterns = [\"AP90[0-9]{4}\\.Z\"]\n\nindex = build_index(doc_folder, doc_patterns, index_folder)\n\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a21ecc08b36fa01601a38451282be8626de99ee
| 27,449 |
ipynb
|
Jupyter Notebook
|
notebooks/spacy.ipynb
|
datascisteven/Automated-Hate-Tweet-Detection
|
ae4029f877f68ae0e8502e13edd31705f1fd066b
|
[
"MIT"
] | 2 |
2021-05-24T15:27:10.000Z
|
2022-03-23T04:06:36.000Z
|
notebooks/spacy.ipynb
|
datascisteven/Automated-Hate-Tweet-Detection
|
ae4029f877f68ae0e8502e13edd31705f1fd066b
|
[
"MIT"
] | null | null | null |
notebooks/spacy.ipynb
|
datascisteven/Automated-Hate-Tweet-Detection
|
ae4029f877f68ae0e8502e13edd31705f1fd066b
|
[
"MIT"
] | null | null | null | 39.156919 | 706 | 0.419032 |
[
[
[
"!python3 -m spacy download en_core_web_sm\n\n",
"Collecting en-core-web-sm==3.1.0\n Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.1.0/en_core_web_sm-3.1.0-py3-none-any.whl (13.6 MB)\n\u001b[K |████████████████████████████████| 13.6 MB 4.4 MB/s \n\u001b[?25hRequirement already satisfied: spacy<3.2.0,>=3.1.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from en-core-web-sm==3.1.0) (3.1.0)\nRequirement already satisfied: blis<0.8.0,>=0.4.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (0.7.4)\nRequirement already satisfied: jinja2 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (3.0.1)\nRequirement already satisfied: pathy>=0.3.5 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (0.6.0)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.0.5)\nRequirement already satisfied: thinc<8.1.0,>=8.0.7 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (8.0.7)\nRequirement already satisfied: wasabi<1.1.0,>=0.8.1 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (0.8.2)\nRequirement already satisfied: spacy-legacy<3.1.0,>=3.0.7 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (3.0.8)\nRequirement already satisfied: setuptools in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (57.1.0)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (3.0.5)\nRequirement already satisfied: numpy>=1.15.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (1.19.3)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.25.1)\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (1.0.5)\nRequirement already satisfied: pydantic!=1.8,!=1.8.1,<1.9.0,>=1.7.4 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (1.8.2)\nRequirement already satisfied: srsly<3.0.0,>=2.4.1 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.4.1)\nRequirement already satisfied: packaging>=20.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (21.0)\nRequirement already satisfied: typer<0.4.0,>=0.3.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (0.3.2)\nRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (4.61.2)\nRequirement already satisfied: catalogue<2.1.0,>=2.0.4 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.0.4)\nRequirement already satisfied: pyparsing>=2.0.2 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from packaging>=20.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.4.7)\nRequirement already satisfied: smart-open<6.0.0,>=5.0.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from pathy>=0.3.5->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (5.1.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from pydantic!=1.8,!=1.8.1,<1.9.0,>=1.7.4->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (3.7.4.3)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (1.26.6)\nRequirement already satisfied: idna<3,>=2.5 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2021.5.30)\nRequirement already satisfied: chardet<5,>=3.0.2 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (4.0.0)\nRequirement already satisfied: click<7.2.0,>=7.1.1 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from typer<0.4.0,>=0.3.0->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (7.1.2)\nRequirement already satisfied: MarkupSafe>=2.0 in /Users/examsherpa/opt/anaconda3/envs/nlp-env/lib/python3.8/site-packages (from jinja2->spacy<3.2.0,>=3.1.0->en-core-web-sm==3.1.0) (2.0.1)\nInstalling collected packages: en-core-web-sm\nSuccessfully installed en-core-web-sm-3.1.0\n\u001b[38;5;2m✔ Download and installation successful\u001b[0m\nYou can now load the package via spacy.load('en_core_web_sm')\n"
],
[
"import spacy \nimport en_core_web_sm \nnlp = en_core_web_sm.load()",
"_____no_output_____"
],
[
"import pickle\n# Load: insert variable to load pickle into\ntrain = pickle.load(open('../extra/pickle/train_bal.pickle', 'rb'))\nvalid = pickle.load(open('../extra/pickle/val_bal.pickle', 'rb'))",
"_____no_output_____"
],
[
"train.head()",
"_____no_output_____"
],
[
"train.tweet[0]",
"_____no_output_____"
],
[
"import pandas as pd\ns = train.tweet[0]\ndoc = nlp(s)\n\nrows = []\nfor token in doc:\n rows.append([token, token.lemma_, token.is_stop])\n\ndf = pd.DataFrame(rows, columns=['token', 'lemma', 'stopwords'])\ndf",
"_____no_output_____"
],
[
"import re\n\nre_token_match = spacy.tokenizer._get_regex_pattern(nlp.Defaults.token_match)\nre_token_match = f\"({re_token_match}|#\\\\w+)\"\nnlp.tokenizer.token_match = re.compile(re_token_match).match",
"_____no_output_____"
],
[
"doc = nlp(s)\n\nrows = []\nfor token in doc:\n rows.append([token, token.lemma_, token.is_stop])\n\ndf = pd.DataFrame(rows, columns=['token', 'lemma', 'stopwords'])\ndf",
"_____no_output_____"
],
[
"def preprocess(s, nlp, features):\n s = s.lower()\n doc = nlp(s)\n lemmas = []\n for token in doc:\n lemmas.sppend(token.lemma_)\n features |= set(lemmas)\n freq = {\"#\":0,\"@\":0,\"URL\":0}\n for words in lemmas:\n freq[str(words)] = 0\n for token in doc:\n if '#' in str(token): freq['#'] += 1\n if \"@\" in str(token): freq['@'] += 1\n if \"https://\" in str(token): freq['URL'] += 1\n freq[str(token.lemma_)] += 1\n\n return features, freq",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21f005b3d1cf74dfa50cfcb66d9599c81ea250
| 503,854 |
ipynb
|
Jupyter Notebook
|
doc/nb/ELG_SNR.ipynb
|
sdss/desimodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 2 |
2017-07-18T19:22:38.000Z
|
2021-12-17T16:02:01.000Z
|
doc/nb/ELG_SNR.ipynb
|
sdss/desimodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 134 |
2016-02-07T03:48:48.000Z
|
2022-02-21T17:50:09.000Z
|
doc/nb/ELG_SNR.ipynb
|
sdss/desimodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 3 |
2017-07-12T21:36:19.000Z
|
2022-01-11T16:15:44.000Z
| 513.61264 | 154,952 | 0.928616 |
[
[
[
"# ELG Signal-to-Noise Calculations",
"_____no_output_____"
],
[
"This notebook provides a standardized calculation of the DESI emission-line galaxy (ELG) signal-to-noise (SNR) figure of merit, for tracking changes to simulation inputs and models. See the accompanying technical note [DESI-3977](https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=3977) for details.",
"_____no_output_____"
]
],
[
[
"%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"import astropy.table\nimport astropy.cosmology\nimport astropy.io.fits as fits\nimport astropy.units as u",
"_____no_output_____"
]
],
[
[
"Parts of this notebook assume that the [desimodel package](https://github.com/desihub/desimodel) is installed (both its git and svn components) and its `data/` directory is accessible via the `$DESIMODEL` environment variable:",
"_____no_output_____"
]
],
[
[
"import os.path\nassert 'DESIMODEL' in os.environ\nassert os.path.exists(os.path.join(os.getenv('DESIMODEL'), 'data', 'spectra', 'spec-sky.dat'))",
"_____no_output_____"
]
],
[
[
"Document relevant version numbers:",
"_____no_output_____"
]
],
[
[
"import desimodel\nimport specsim",
"_____no_output_____"
],
[
"print(f'Using desimodel {desimodel.__version__}, specsim {specsim.__version__}')",
"Using desimodel 0.9.6.dev431, specsim 0.13.dev793\n"
]
],
[
[
"## ELG Spectrum",
"_____no_output_____"
],
[
"All peaks are assumed to have the same log-normal rest lineshape specified by a velocity dispersion $\\sigma_v$, total flux $F_0$ and central wavelength $\\lambda_0$ as:\n$$\nf(\\lambda; F_0, \\lambda_0) = \\frac{F_0}{\\sqrt{2\\pi}\\,\\lambda\\,\\sigma_{\\log}}\\, \\exp\\left[\n-\\frac{1}{2}\\left( \\frac{\\log_{10}\\lambda - \\log_{10}\\lambda_0}{\\sigma_{\\log}}\\right)^2\\right]\\; ,\n$$\nwhere\n$$\n\\sigma_{\\log} \\equiv \\frac{\\sigma_v}{c \\log 10} \\; .\n$$",
"_____no_output_____"
],
[
"We use the pretabulated spectrum in `$DESIMODEL/data/spectra/spec-elg-o2flux-8e-17-average-line-ratios.dat` described in Section 2.3 of DESI-867-v1,\nwhich consists of only the following emission lines:\n - \\[OII](3727A) and \\[OII](3730A)\n - H-beta\n - \\[OIII](4960A) and \\[OIII](5008A)\n - H-alpha\n\nNote that H-alpha is never observable for $z > 0.5$, as is always the case for DESI ELG targets.\nContinuum is omitted since we are primarily interested in how well the \\[OII] doublet can be identified and measured.\nAll lines are assumed to have the same velocity dispersion of 70 km/s.",
"_____no_output_____"
]
],
[
[
"elg_spec = astropy.table.Table.read(\n os.path.join(os.environ['DESIMODEL'], 'data', 'spectra', 'spec-elg-o2flux-8e-17-average-line-ratios.dat'),\n format='ascii')\nelg_wlen0 = elg_spec['col1'].data\nelg_flux0 = 1e-17 * elg_spec['col2'].data",
"_____no_output_____"
]
],
[
[
"## DESI ELG Sample",
"_____no_output_____"
],
[
"Look up the expected redshift distribution of DESI ELG targets from `$DESIMODEL/data/targets/nz_elg.dat`. Note that the [OII] doublet falls off the spectrograph around z = 1.63.",
"_____no_output_____"
]
],
[
[
"def get_elg_nz():\n # Read the nz file from $DESIMODEL.\n full_name = os.path.join(os.environ['DESIMODEL'], 'data', 'targets', 'nz_elg.dat')\n table = astropy.table.Table.read(full_name, format='ascii')\n\n # Extract the n(z) histogram into numpy arrays.\n z_lo, z_hi = table['col1'], table['col2']\n assert np.all(z_hi[:-1] == z_lo[1:])\n z_edge = np.hstack((z_lo, [z_hi[-1]]))\n nz = table['col3']\n \n # Trim to bins where n(z) > 0.\n non_zero = np.where(nz > 0)[0]\n lo, hi = non_zero[0], non_zero[-1] + 1\n nz = nz[lo: hi]\n z_edge = z_edge[lo: hi + 1]\n \n return nz, z_edge",
"_____no_output_____"
],
[
"elg_nz, elg_z_edge = get_elg_nz()",
"_____no_output_____"
]
],
[
[
"Calculate n(z) weights corresponding to an array of ELG redshifts:",
"_____no_output_____"
]
],
[
[
"def get_nz_weight(z):\n \"\"\"Calculate n(z) weights corresponding to input z values.\n \"\"\"\n nz = np.zeros_like(z)\n idx = np.digitize(z, elg_z_edge)\n sel = (idx > 0) & (idx <= len(elg_nz))\n nz[sel] = elg_nz[idx[sel] - 1]\n return nz",
"_____no_output_____"
]
],
[
[
"Sample random redshifts from n(z):",
"_____no_output_____"
]
],
[
[
"def generate_elg_z(n=100, seed=123):\n cdf = np.cumsum(elg_nz)\n cdf = np.hstack(([0], cdf / cdf[-1]))\n gen = np.random.RandomState(seed)\n return np.interp(gen.rand(n), cdf, elg_z_edge)\n \nz=generate_elg_z(n=20000)\nplt.hist(z, bins=elg_z_edge, histtype='stepfilled')\nplt.xlim(elg_z_edge[0], elg_z_edge[-1])\nprint(f'Mean ELG redshift is {np.mean(z):.3f}')",
"Mean ELG redshift is 1.003\n"
]
],
[
[
"Define a background cosmology for the angular-diameter distance used to scale galaxy angular sizes:",
"_____no_output_____"
]
],
[
[
"LCDM = astropy.cosmology.Planck15",
"_____no_output_____"
]
],
[
[
"Generate random ELG profiles for each target. The mean half-light radius is 0.45\" and scales with redshift.",
"_____no_output_____"
]
],
[
[
"def generate_elg_profiles(z, seed=123, verbose=False):\n \"\"\"ELG profiles are assumed to be disk (Sersic n=1) only.\n \"\"\"\n gen = np.random.RandomState(seed)\n nsrc = len(z)\n source_fraction = np.zeros((nsrc, 2))\n source_half_light_radius = np.zeros((nsrc, 2))\n source_minor_major_axis_ratio = np.zeros((nsrc, 2))\n source_position_angle = 360. * gen.normal(size=(nsrc, 2))\n # Precompute cosmology scale factors.\n angscale = (\n LCDM.angular_diameter_distance(1.0) /\n LCDM.angular_diameter_distance(z)).to(1).value\n if verbose:\n print(f'mean n(z) DA(1.0)/DA(z) = {np.mean(angscale):.3f}')\n # Disk only with random size and ellipticity.\n source_fraction[:, 0] = 1.\n source_half_light_radius[:, 0] = 0.427 * np.exp(0.25 * gen.normal(size=nsrc)) * angscale\n source_minor_major_axis_ratio[:, 0] = np.minimum(0.99, 0.50 * np.exp(0.15 * gen.normal(size=nsrc)))\n if verbose:\n print(f'mean HLR = {np.mean(source_half_light_radius[:, 0]):.3f}\"')\n return dict(\n source_fraction=source_fraction,\n source_half_light_radius=source_half_light_radius,\n source_minor_major_axis_ratio=source_minor_major_axis_ratio,\n source_position_angle=source_position_angle)",
"_____no_output_____"
]
],
[
[
"Diagnostic plot showing the assumed ELG population (Figure 1 of DESI-3977):",
"_____no_output_____"
]
],
[
[
"def plot_elg_profiles(save=None):\n z = generate_elg_z(50000)\n sources = generate_elg_profiles(z, verbose=True)\n fig, ax = plt.subplots(2, 2, figsize=(8, 6))\n ax = ax.flatten()\n ax[0].hist(sources['source_minor_major_axis_ratio'][:, 0], range=(0,1), bins=25)\n ax[0].set_xlabel('ELG minor/major axis ratio')\n ax[0].set_xlim(0, 1)\n ax[1].hist(z, bins=np.arange(0.6, 1.8, 0.1))\n ax[1].set_xlim(0.6, 1.7)\n ax[1].set_xlabel('ELG redshift')\n ax[2].hist(sources['source_half_light_radius'][:, 0], bins=25)\n ax[2].set_xlabel('ELG half-light radius [arcsec]')\n ax[2].set_xlim(0.1, 1.1)\n ax[3].scatter(z, sources['source_half_light_radius'][:, 0], s=0.5, alpha=0.5)\n ax[3].set_xlabel('ELG redshift')\n ax[3].set_ylabel('ELG half-light radius [arcsec]')\n ax[3].set_xlim(0.6, 1.7)\n ax[3].set_ylim(0.1, 1.1)\n plt.tight_layout()\n if save:\n plt.savefig(save)\n \nplot_elg_profiles(save='elg-sample.png')",
"mean n(z) DA(1.0)/DA(z) = 1.021\nmean HLR = 0.450\"\n"
]
],
[
[
"## Simulated SNR",
"_____no_output_____"
],
[
"Given an initialized simulator object, step through different redshifts and calculate the SNR recorded by all fibers for a fixed ELG spectrum. Save the results to a FITS file that can be used by `plot_elg_snr()`.",
"_____no_output_____"
]
],
[
[
"def calculate_elg_snr(simulator, save, description,\n z1=0.6, z2=1.65, dz=0.002, zref=1.20,\n seed=123, wlen=elg_wlen0, flux=elg_flux0):\n \"\"\"Calculate the ELG [OII] SNR as a function of redshift.\n \n Parameters\n ----------\n simulator : specsim.simulator.Simulator\n Instance of an initialized Simulator object to use. Each fiber will\n be simulated independently to study variations across the focal plane.\n save : str\n Filename to use for saving FITS results.\n description : str\n Short description for the saved file header, also used for plots later.\n z1 : float\n Minimum ELG redshift to calculate.\n z2 : float\n Maximum ELG redshift to calculate.\n dz : float\n Spacing of equally spaced grid to cover [z1, z2]. z2 will be increased\n by up to dz if necessary.\n zref : float\n Reference redshift used to save signal, noise and fiberloss. Must be\n on the grid specified by (z1, z2, dz).\n seed : int or None\n Random seed used to generate fiber positions and galaxy profiles.\n wlen : array\n 1D array of N rest wavelengths in Angstroms.\n flux : array\n 1D array of N corresponding rest fluxes in erg / (s cm2 Angstrom).\n \"\"\"\n zooms = (3715., 3742.), (4850., 4875.), (4950., 5020.)\n gen = np.random.RandomState(seed=seed)\n \n # Generate random focal plane (x,y) positions for each fiber in mm units.\n nfibers = simulator.num_fibers\n focal_r = np.sqrt(gen.uniform(size=nfibers)) * simulator.instrument.field_radius\n phi = 2 * np.pi * gen.uniform(size=nfibers)\n xy = (np.vstack([np.cos(phi), np.sin(phi)]) * focal_r).T\n\n # Build the grid of redshifts to simulate.\n nz = int(np.ceil((z2 - z1) / dz)) + 1\n z2 = z1 + (nz - 1) * dz\n z_grid = np.linspace(z1, z2, nz)\n iref = np.argmin(np.abs(z_grid - zref))\n assert np.abs(zref - z_grid[iref]) < 1e-5, 'zref not in z_grid'\n snr2 = np.zeros((4, nz, simulator.num_fibers))\n \n # Initialize the results.\n hdus = fits.HDUList()\n hdus.append(fits.PrimaryHDU(\n header=fits.Header({'SEED': seed, 'NFIBERS': nfibers, 'DESCRIBE': description})))\n \n # Zero-pad the input spectrum if necessary.\n wlo = 0.99 * desi.simulated['wavelength'][0] / (1 + z2)\n if wlen[0] > wlo:\n wlen = np.hstack([[wlo], wlen])\n flux = np.hstack([[0.], flux])\n\n # Simulate the specified rest-frame flux.\n simulator.source.update_in(\n 'ELG [OII] doublet', 'elg',\n wlen * u.Angstrom, flux * u.erg/(u.s * u.cm**2 * u.Angstrom), z_in=0.)\n\n # Simulate each redshift.\n for i, z in enumerate(z_grid):\n # Redshift the ELG spectrum.\n simulator.source.update_out(z_out=z)\n source_flux = np.tile(simulator.source.flux_out, [nfibers, 1])\n # Generate source profiles for each target at this redshift. Since the seed is\n # fixed, only the redshift scaling of the HLR will change.\n sources = generate_elg_profiles(np.full(nfibers, z), seed=seed)\n # Simulate each source.\n simulator.simulate(source_fluxes=source_flux, focal_positions=xy, **sources)\n # Calculate the quadrature sum of SNR in each camera, by fiber.\n for output in simulator.camera_output:\n rest_wlen = output['wavelength'] / (1 + z)\n # Loop over emission lines.\n for j, (lo, hi) in enumerate(zooms):\n sel = (rest_wlen >= lo) & (rest_wlen < hi)\n if not np.any(sel):\n continue\n # Sum SNR2 over pixels.\n pixel_snr2 = output['num_source_electrons'][sel] ** 2 / output['variance_electrons'][sel]\n snr2[j, i] += pixel_snr2.sum(axis=0)\n if i == iref:\n # Save the fiberloss fraction and total variance tabulated on the simulation grid.\n table = astropy.table.Table(meta={'ZREF': zref})\n sim = simulator.simulated\n table['WLEN'] = sim['wavelength'].data\n table['FLUX'] = sim['source_flux'].data\n table['FIBERLOSS'] = sim['fiberloss'].data\n table['NSRC'] = sim['num_source_electrons_b'] + sim['num_source_electrons_r'] + sim['num_source_electrons_z']\n table['SKYVAR'] = sim['num_sky_electrons_b'] + sim['num_sky_electrons_r'] + sim['num_sky_electrons_z']\n table['NOISEVAR'] = (\n sim['read_noise_electrons_b'] ** 2 + sim['read_noise_electrons_r'] ** 2 + sim['read_noise_electrons_z'] ** 2 +\n sim['num_dark_electrons_b'] + sim['num_dark_electrons_r'] + sim['num_dark_electrons_z'])\n hdus.append(fits.table_to_hdu(table))\n hdus[-1].name = 'REF'\n \n # Calculate the n(z) weighted mean SNR for [OII], using the median over fibers at each redshift.\n snr_oii = np.median(np.sqrt(snr2[0]), axis=-1)\n wgt = get_nz_weight(z_grid)\n snr_oii_eff = np.sum(snr_oii * wgt) / np.sum(wgt)\n print(f'n(z)-weighted effective [OII] SNR = {snr_oii_eff:.3f}')\n\n # Save the SNR vs redshift arrays for each emission line.\n table = astropy.table.Table(meta={'SNREFF': snr_oii_eff})\n table['Z'] = z_grid\n table['ZWGT'] = wgt\n table['SNR_OII'] = np.sqrt(snr2[0])\n table['SNR_HBETA'] = np.sqrt(snr2[1])\n table['SNR_OIII'] = np.sqrt(snr2[2])\n hdus.append(fits.table_to_hdu(table))\n hdus[-1].name = 'SNR'\n\n hdus.writeto(save, overwrite=True)",
"_____no_output_____"
]
],
[
[
"Calculate flux limits in bins of redshift, to compare with SRD L3.1.3:",
"_____no_output_____"
]
],
[
[
"def get_flux_limits(z, snr, nominal_flux=8., nominal_snr=7., ax=None):\n fluxlim = np.zeros_like(snr)\n nonzero = snr > 0\n fluxlim[nonzero] = nominal_flux * (nominal_snr / snr[nonzero])\n bins = np.linspace(0.6, 1.6, 6)\n nlim = len(bins) - 1\n medians = np.empty(nlim)\n for i in range(nlim):\n sel = (z >= bins[i]) & (z < bins[i + 1])\n medians[i] = np.median(fluxlim[sel])\n if ax is not None:\n zmid = 0.5 * (bins[1:] + bins[:-1])\n dz = 0.5 * (bins[1] - bins[0])\n ax.errorbar(zmid, medians, xerr=dz, color='b', fmt='o', zorder=10, capsize=3)\n return fluxlim, medians",
"_____no_output_____"
]
],
[
[
"Plot a summary of the results saved by `calculate_elg_snr()`. Shaded bands show the 5-95 percentile range, with the median drawn as a solid curve. The fiberloss in the lower plot is calculated at the redshift `zref` specified in `calculate_elg_snr()` (since the ELG size distribution is redshift dependent).",
"_____no_output_____"
]
],
[
[
"def plot_elg_snr(name, save=True):\n \"\"\"Plot a summary of results saved by calculate_elg_snr().\n \n Parameters\n ----------\n name : str\n Name of the FITS file saved by calculate_elg_snr().\n \"\"\"\n hdus = fits.open(name)\n hdr = hdus[0].header\n nfibers = hdr['NFIBERS']\n description = hdr['DESCRIBE']\n \n fig, axes = plt.subplots(2, 1, figsize=(8, 6))\n plt.suptitle(description, fontsize=14)\n\n snr_table = astropy.table.Table.read(hdus['SNR'])\n snr_oii_eff = snr_table.meta['SNREFF']\n ref_table = astropy.table.Table.read(hdus['REF'])\n zref = ref_table.meta['ZREF']\n \n ax = axes[0]\n color = 'rgb'\n labels = '[OII]', 'H$\\\\beta$', '[OIII]'\n z_grid = snr_table['Z'].data\n for i, tag in enumerate(('SNR_OII', 'SNR_HBETA', 'SNR_OIII')):\n snr = snr_table[tag].data\n snr_q = np.percentile(snr, (5, 50, 95), axis=-1)\n ax.fill_between(z_grid, snr_q[0], snr_q[2], color=color[i], alpha=0.25, lw=0)\n ax.plot(z_grid, snr_q[1], c=color[i], ls='-', label=labels[i])\n ax.plot([], [], 'k:', label='n(z)')\n ax.legend(ncol=4)\n ax.set_xlabel('ELG redshift')\n ax.set_ylabel(f'Total signal-to-noise ratio')\n ax.axhline(7, c='k', ls='--')\n rhs = ax.twinx()\n rhs.plot(z_grid, snr_table['ZWGT'], 'k:')\n rhs.set_yticks([])\n ax.set_xlim(z_grid[0], z_grid[-1])\n ax.set_ylim(0, 12)\n rhs.set_ylim(0, None)\n ax.text(0.02, 0.03, f'n(z)-wgtd [OII] SNR={snr_oii_eff:.3f}',\n fontsize=12, transform=ax.transAxes)\n \n # Calculate the median [OII] flux limits.\n _, fluxlim = get_flux_limits(z_grid, np.median(snr_table['SNR_OII'], axis=-1))\n\n # Print latex-format results for DESI-3977 Table 2.\n print(f'&{snr_oii_eff:7.3f}', end='')\n for m in fluxlim:\n print(f' &{m:5.1f}', end='')\n print(' \\\\\\\\')\n\n ax = axes[1]\n wlen = ref_table['WLEN'].data\n dwlen = wlen[1] - wlen[0]\n sky_q = np.percentile(ref_table['SKYVAR'].data, (5, 50, 95), axis=-1)\n sky_q[sky_q > 0] = 1 / sky_q[sky_q > 0]\n ax.fill_between(wlen, sky_q[0], sky_q[2], color='b', alpha=0.5, lw=0)\n ax.plot([], [], 'b-', label='sky ivar')\n ax.plot(wlen, sky_q[1], 'b.', ms=0.25, alpha=0.5)\n noise_q = np.percentile(ref_table['NOISEVAR'].data, (5, 50, 95), axis=-1)\n noise_q[noise_q > 0] = 1 / noise_q[noise_q > 0]\n ax.fill_between(wlen, noise_q[0], noise_q[2], color='r', alpha=0.25, lw=0)\n ax.plot(wlen, noise_q[1], c='r', ls='-', label='noise ivar')\n floss_q = np.percentile(ref_table['FIBERLOSS'].data, (5, 50, 95), axis=-1)\n ax.plot([], [], 'k-', label='fiberloss')\n rhs = ax.twinx()\n rhs.fill_between(wlen, floss_q[0], floss_q[2], color='k', alpha=0.25, lw=0)\n rhs.plot(wlen, floss_q[1], 'k-')\n rhs.set_ylim(0.2, 0.6)\n rhs.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.1))\n rhs.set_ylabel('Fiberloss')\n ax.set_xlabel('Wavelength [A]')\n ax.set_ylabel(f'Inverse Variance / {dwlen:.1f}A')\n ax.set_xlim(wlen[0], wlen[-1])\n ax.set_ylim(0, 0.25)\n ax.legend(ncol=3)\n \n plt.subplots_adjust(wspace=0.1, top=0.95, bottom=0.08, left=0.10, right=0.92)\n \n if save:\n base, _ = os.path.splitext(name)\n plot_name = base + '.png'\n plt.savefig(plot_name)\n print(f'Saved {plot_name}')",
"_____no_output_____"
]
],
[
[
"## Examples",
"_____no_output_____"
],
[
"Demonstrate this calculation for the baseline DESI configuration with 100 fibers:",
"_____no_output_____"
]
],
[
[
"import specsim.simulator",
"_____no_output_____"
],
[
"desi = specsim.simulator.Simulator('desi', num_fibers=100)",
"_____no_output_____"
]
],
[
[
"**NOTE: the next cell takes about 15 minutes to run.**",
"_____no_output_____"
]
],
[
[
"%time calculate_elg_snr(desi, save='desimodel-0.9.6.fits', description='desimodel 0.9.6')",
"n(z)-weighted effective [OII] SNR = 6.764\nCPU times: user 12min 31s, sys: 3min 32s, total: 16min 4s\nWall time: 16min 5s\n"
]
],
[
[
"Plot the results (Figure 2 of DESI-3977):",
"_____no_output_____"
]
],
[
[
"plot_elg_snr('desimodel-0.9.6.fits')",
"& 6.764 & 9.5 & 7.9 & 8.4 & 7.3 & 8.0 \\\\\nSaved desimodel-0.9.6.png\n"
]
],
[
[
"Check that the results with GalSim are compatible with those using the (default) fastsim mode of fiberloss calculations:",
"_____no_output_____"
]
],
[
[
"desi.instrument.fiberloss_method = 'galsim'",
"_____no_output_____"
]
],
[
[
"**NOTE: the next cell takes about 30 minutes to run.**",
"_____no_output_____"
]
],
[
[
"%time calculate_elg_snr(desi, save='desimodel-0.9.6-galsim.fits', description='desimodel 0.9.6 (galsim)')",
"n(z)-weighted effective [OII] SNR = 6.572\nCPU times: user 25min 1s, sys: 3min 44s, total: 28min 45s\nWall time: 28min 47s\n"
],
[
"plot_elg_snr('desimodel-0.9.6-galsim.fits')",
"& 6.572 & 9.9 & 8.1 & 8.6 & 7.5 & 8.2 \\\\\nSaved desimodel-0.9.6-galsim.png\n"
]
],
[
[
"This comparison shows that the \"fastsim\" fiberloss fractions are about 1% (absolute) higher than \"galsim\", leading to a slight increase in signal and therefore SNR. The reason for this increase is that \"fastsim\" assumes a fixed minor / major axis ratio of 0.7 while our ELG population has a distribution of ratios with a median of 0.5. The weighted [OII] SNR values are 6.764 (fastsim) and 6.572 (galsim), which agree at the few percent level.\n\nWe use GalSim fiberloss calculations consistently in Figure 2 and Table 2 of DESI-3977.",
"_____no_output_____"
],
[
"### CDR Comparison",
"_____no_output_____"
],
[
"Compare with the CDR forecasts based on desimodel 0.3.1 and documented in DESI-867, using data from this [FITS file](https://desi.lbl.gov/svn/docs/technotes/spectro/elg-snr/trunk/data/elg_snr2_desimodel-0-3-1.fits):",
"_____no_output_____"
]
],
[
[
"desi867 = astropy.table.Table.read('elg_snr2_desimodel-0-3-1.fits', hdu=1)",
"_____no_output_____"
]
],
[
[
"Check that we can reproduce the figures from DESI-867:",
"_____no_output_____"
]
],
[
[
"def desi_867_fig1():\n z = desi867['Z']\n snr_all = np.sqrt(desi867['SNR2'])\n snr_oii = np.sqrt(desi867['SNR2_OII'])\n fig = plt.figure(figsize=(6, 5))\n plt.plot(z, snr_all, 'k-', lw=1, label='all lines')\n plt.plot(z, snr_oii, 'r-', lw=1, label='[OII] only')\n plt.legend(fontsize='large')\n plt.axhline(7, c='b', ls='--')\n plt.ylim(0, 22)\n plt.xlim(z[0], z[-1])\n plt.xticks([0.5, 1.0, 1.5])\n plt.xlabel('Redshift')\n plt.ylabel('S/N')\n \ndesi_867_fig1()",
"_____no_output_____"
],
[
"def desi_867_fig2():\n z = desi867['Z']\n snr_all = np.sqrt(desi867['SNR2'])\n snr_oii = np.sqrt(desi867['SNR2_OII'])\n flux_limit_all, _ = get_flux_limits(z, snr_all)\n flux_limit_oii, medians = get_flux_limits(z, snr_oii)\n fig = plt.figure(figsize=(6, 5))\n plt.plot(z, flux_limit_all, 'k-', lw=1, label='all lines')\n plt.plot(z, flux_limit_oii, 'r-', lw=1, label='[OII] only')\n plt.legend(loc='upper right', fontsize='large')\n _, _ = get_flux_limits(z, snr_oii, ax=plt.gca())\n plt.ylim(0, 40)\n plt.xlim(z[0], z[-1])\n plt.xticks([0.5, 1.0, 1.5])\n plt.xlabel('Redshift')\n plt.ylabel('[OII] Flux limit ($10^{-17}$ ergs cm$^{-2}$ s$^{-1}$)')\n \ndesi_867_fig2()",
"_____no_output_____"
]
],
[
[
"Print a summary for Table 2 of DESI-3977:",
"_____no_output_____"
]
],
[
[
"def cdr_summary():\n z = desi867['Z']\n snr_oii = np.sqrt(desi867['SNR2_OII'])\n wgt = get_nz_weight(z)\n snreff = np.sum(wgt * snr_oii) / wgt.sum()\n _, medians = get_flux_limits(z, snr_oii)\n print(f'0.3.1 (CDR) & {snreff:6.3f}', end='')\n for m in medians:\n print(f' &{m:5.1f}', end='')\n print(' \\\\\\\\')\n \ncdr_summary()",
"0.3.1 (CDR) & 6.262 & 10.1 & 8.6 & 8.9 & 7.6 & 8.3 \\\\\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a21fddfa75eb0a5a7763f25d6099514b3154535
| 4,292 |
ipynb
|
Jupyter Notebook
|
ui-tests/tests/notebooks/scattergl_update.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | null | null | null |
ui-tests/tests/notebooks/scattergl_update.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | null | null | null |
ui-tests/tests/notebooks/scattergl_update.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | null | null | null | 21.786802 | 126 | 0.508621 |
[
[
[
"from bqplot import *\n\nimport numpy as np\nimport pandas as pd\n\n# Test data\n\nnp.random.seed(0)\nprice_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[1.0, -0.8], [-0.8, 1.0]]), axis=0) + 100,\n columns=['Security 1', 'Security 2'], index=pd.date_range(start='01-01-2007', periods=150))\nsize = 100\nx_data = range(size)\ny_data = np.cumsum(np.random.randn(size) * 100.0)\nord_keys = np.array(['A', 'B', 'C', 'D', 'E', 'F'])\nordinal_data = np.random.randint(5, size=size)\n\nsymbols = ['Security 1', 'Security 2']\n\ndates_all = price_data.index.values\ndates_all_t = dates_all[1:]\nsec1_levels = np.array(price_data[symbols[0]].values.flatten())\nlog_sec1 = np.log(sec1_levels)\nsec1_returns = log_sec1[1:] - log_sec1[:-1]\n\nsec2_levels = np.array(price_data[symbols[1]].values.flatten())\n\n# First draw\n\nsc_x = DateScale()\nsc_y = LinearScale()\n\nscatt = ScatterGL(x=dates_all, y=sec2_levels, scales={'x': sc_x, 'y': sc_y})\nax_x = Axis(scale=sc_x, label='Date')\nax_y = Axis(scale=sc_y, orientation='vertical', tick_format='0.0f', label='Security 2')\n\nfig = Figure(marks=[scatt], axes=[ax_x, ax_y])\nfig",
"_____no_output_____"
],
[
"scatt.opacities = [0.3, 0.5, 1.]",
"_____no_output_____"
],
[
"scatt.colors = ['green', 'red', 'blue']",
"_____no_output_____"
],
[
"# Doesn't work?\n# scatt.y = -scatt.y",
"_____no_output_____"
],
[
"# Doesn't work?\n# scatt.default_size = 70",
"_____no_output_____"
],
[
"scatt.scales = dict(x=sc_x, y=sc_y, color=ColorScale())\nscatt.color = scatt.y",
"_____no_output_____"
],
[
"scatt.marker = 'square'",
"_____no_output_____"
],
[
"scatt.stroke = 'black'",
"_____no_output_____"
],
[
"scatt.stroke = None",
"_____no_output_____"
],
[
"scatt.fill = False",
"_____no_output_____"
],
[
"scatt.fill = True",
"_____no_output_____"
],
[
"scatt.selected_style = dict(fill='green')\nscatt.unselected_style = dict(fill='red')\nscatt.selected = list(range(50, 60))",
"_____no_output_____"
],
[
"scatt.selected = None",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a21fe337bae3d1a83b614ad9d83ecbaf8065c16
| 22,728 |
ipynb
|
Jupyter Notebook
|
nbs/06_cli.ipynb
|
ucsc-vama/chisel_nbdev
|
7fb7c80ac3777cdd3fab64538d5f8908f2e83c05
|
[
"Apache-2.0"
] | null | null | null |
nbs/06_cli.ipynb
|
ucsc-vama/chisel_nbdev
|
7fb7c80ac3777cdd3fab64538d5f8908f2e83c05
|
[
"Apache-2.0"
] | null | null | null |
nbs/06_cli.ipynb
|
ucsc-vama/chisel_nbdev
|
7fb7c80ac3777cdd3fab64538d5f8908f2e83c05
|
[
"Apache-2.0"
] | null | null | null | 34.966154 | 491 | 0.591297 |
[
[
[
"#hide\n#default_exp cli\nfrom nbdev.showdoc import show_doc",
"_____no_output_____"
]
],
[
[
"# Command line functions\n\n> Console commands added by the nbdev library",
"_____no_output_____"
]
],
[
[
"#export\nfrom nbdev.imports import *\nfrom chisel_nbdev.export_scala import *\nfrom chisel_nbdev.sync_scala import *\nfrom nbdev.merge import *\nfrom chisel_nbdev.export_scala2html import *\nfrom chisel_nbdev.clean_scala import *\nfrom chisel_nbdev.test_scala import *\nfrom fastcore.script import *",
"_____no_output_____"
]
],
[
[
"`nbdev` comes with the following commands. To use any of them, you must be in one of the subfolders of your project: they will search for the `settings.ini` recursively in the parent directory but need to access it to be able to work. Their names all begin with nbdev so you can easily get a list with tab completion.\n- `chisel_nbdev_build_docs` builds the documentation from the notebooks\n- `chisel_nbdev_build_lib` builds the library from the notebooks\n- `chisel_nbdev_bump_version` increments version in `settings.py` by one\n- `chisel_nbdev_clean_nbs` removes all superfluous metadata form the notebooks, to avoid merge conflicts\n- `chisel_nbdev_detach` exports cell attachments to `dest` and updates references\n- `chisel_nbdev_diff_nbs` gives you the diff between the notebooks and the exported library\n- `chisel_nbdev_fix_merge` will fix merge conflicts in a notebook file\n- `chisel_nbdev_install_git_hooks` installs the git hooks that use the last two command automatically on each commit/merge\n- `chisel_nbdev_nb2md` converts a notebook to a markdown file\n- `chisel_nbdev_new` creates a new nbdev project\n- `chisel_nbdev_read_nbs` reads all notebooks to make sure none are broken\n- `chisel_nbdev_test_nbs` runs tests in notebooks\n- `chisel_nbdev_trust_nbs` trusts all notebooks (so that the HTML content is shown)\n- `chisel_nbdev_update_lib` propagates any change in the library back to the notebooks",
"_____no_output_____"
],
[
"## Navigating from notebooks to script and back",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_build_lib)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the whole library is built from the notebooks in the `lib_folder` set in your `settings.ini`.",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_update_lib)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the whole library is treated. Note that this tool is only designed for small changes such as typo or small bug fixes. You can't add new cells in notebook from the library.",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_diff_nbs)",
"_____no_output_____"
]
],
[
[
"## Running tests",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_test_nbs)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the whole library is tested from the notebooks in the `lib_folder` set in your `settings.ini`.",
"_____no_output_____"
],
[
"## Building documentation",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_build_docs)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the whole documentation is build from the notebooks in the `lib_folder` set in your `settings.ini`, only converting the ones that have been modified since the their corresponding html was last touched unless you pass `force_all=True`. The index is also converted to make the README file, unless you pass along `mk_readme=False`.",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_nb2md)",
"_____no_output_____"
],
[
"show_doc(nbdev_detach)",
"_____no_output_____"
]
],
[
[
"## Other utils",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_read_nbs)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the all the notebooks in `lib_folder` are checked.",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_trust_nbs)",
"_____no_output_____"
]
],
[
[
"By default (`fname` left to `None`), the all the notebooks in `lib_folder` are trusted. To speed things up, only the ones touched since the last time this command was run are trusted unless you pass along `force_all=True`.",
"_____no_output_____"
]
],
[
[
"show_doc(nbdev_fix_merge)",
"_____no_output_____"
]
],
[
[
"When you have merge conflicts after a `git pull`, the notebook file will be broken and won't open in jupyter notebook anymore. This command fixes this by changing the notebook to a proper json file again and add markdown cells to signal the conflict, you just have to open that notebook again and look for `>>>>>>>` to see those conflicts and manually fix them. The old broken file is copied with a `.ipynb.bak` extension, so is still accessible in case the merge wasn't successful.\n\nMoreover, if `fast=True`, conflicts in outputs and metadata will automatically be fixed by using the local version if `trust_us=True`, the remote one if `trust_us=False`. With this option, it's very likely you won't have anything to do, unless there is a real conflict.",
"_____no_output_____"
]
],
[
[
"#export\ndef bump_version(version, part=2):\n version = version.split('.')\n version[part] = str(int(version[part]) + 1)\n for i in range(part+1, 3): version[i] = '0'\n return '.'.join(version)",
"_____no_output_____"
],
[
"test_eq(bump_version('0.1.1' ), '0.1.2')\ntest_eq(bump_version('0.1.1', 1), '0.2.0')",
"_____no_output_____"
],
[
"#export\n@call_parse\ndef nbdev_bump_version(part:Param(\"Part of version to bump\", int)=2):\n \"Increment version in `settings.py` by one\"\n cfg = Config()\n print(f'Old version: {cfg.version}')\n cfg.d['version'] = bump_version(Config().version, part)\n cfg.save()\n update_version()\n print(f'New version: {cfg.version}')",
"_____no_output_____"
]
],
[
[
"## Git hooks",
"_____no_output_____"
]
],
[
[
"#export\n@call_parse\ndef nbdev_install_git_hooks():\n \"Install git hooks to clean/trust notebooks automatically\"\n try: path = Config().config_file.parent\n except: path = Path.cwd()\n hook_path = path/'.git'/'hooks'\n fn = hook_path/'post-merge'\n hook_path.mkdir(parents=True, exist_ok=True)\n #Trust notebooks after merge\n fn.write_text(\"#!/bin/bash\\necho 'Trusting notebooks'\\nchisel_nbdev_trust_nbs\")\n os.chmod(fn, os.stat(fn).st_mode | stat.S_IEXEC)\n #Clean notebooks on commit/diff\n (path/'.gitconfig').write_text(\"\"\"# Generated by chisel_nbdev_install_git_hooks\n#\n# If you need to disable this instrumentation do:\n# git config --local --unset include.path\n#\n# To restore the filter\n# git config --local include.path .gitconfig\n#\n# If you see notebooks not stripped, checked the filters are applied in .gitattributes\n#\n[filter \"clean-nbs\"]\n clean = chisel_nbdev_clean_nbs --read_input_stream True\n smudge = cat\n required = true\n[diff \"ipynb\"]\n textconv = chisel_nbdev_clean_nbs --disp True --fname\n\"\"\")\n cmd = \"git config --local include.path ../.gitconfig\"\n print(f\"Executing: {cmd}\")\n run(cmd)\n print(\"Success: hooks are installed and repo's .gitconfig is now trusted\")\n try: nb_path = Config().path(\"nbs_path\")\n except: nb_path = Path.cwd()\n (nb_path/'.gitattributes').write_text(\"**/*.ipynb filter=clean-nbs\\n**/*.ipynb diff=ipynb\\n\")",
"_____no_output_____"
]
],
[
[
"This command installs git hooks to make sure notebooks are cleaned before you commit them to GitHub and automatically trusted at each merge. To be more specific, this creates:\n- an executable '.git/hooks/post-merge' file that contains the command `nbdev_trust_nbs`\n- a `.gitconfig` file that uses `nbev_clean_nbs` has a filter/diff on all notebook files inside `nbs_folder` and a `.gitattributes` file generated in this folder (copy this file in other folders where you might have notebooks you want cleaned as well)",
"_____no_output_____"
],
[
"## Starting a new project",
"_____no_output_____"
]
],
[
[
"#export\n_template_git_repo = \"https://github.com/fastai/nbdev_template.git\"",
"_____no_output_____"
],
[
"#export\nimport tarfile",
"_____no_output_____"
],
[
"#export\ndef extract_tgz(url, dest='.'):\n with urlopen(url) as u: tarfile.open(mode='r:gz', fileobj=u).extractall(dest)",
"_____no_output_____"
],
[
"#export\n@call_parse\ndef nbdev_new():\n \"Create a new nbdev project from the current git repo\"\n url = run('git config --get remote.origin.url')\n if not url: raise Exception('This does not appear to be a cloned git directory with a remote')\n author = run('git config --get user.name').strip()\n email = run('git config --get user.email').strip()\n if not (author and email): raise Exception('User name and email not configured in git')\n\n # download and untar template, and optionally notebooks\n FILES_URL = 'https://files.fast.ai/files/'\n extract_tgz(f'{FILES_URL}nbdev_files.tgz')\n path = Path()\n for o in (path/'nbdev_files').ls(): \n if not Path(f'./{o.name}').exists(): shutil.move(str(o), './') \n shutil.rmtree('nbdev_files')\n if first(path.glob('*.ipynb')): print(\"00_core.ipynb not downloaded since a notebook already exists.\")\n else: urlsave(f'{FILES_URL}00_core.ipynb')\n if not (path/'index.ipynb').exists(): urlsave(f'{FILES_URL}index.ipynb')\n\n # auto-config settings.ini from git\n settings_path = Path('settings.ini')\n settings = settings_path.read_text()\n owner,repo = repo_details(url)\n branch = run('git symbolic-ref refs/remotes/origin/HEAD').strip().split('/')[-1]\n settings = settings.format(lib_name=repo, user=owner, author=author, author_email=email, branch=branch)\n settings_path.write_text(settings)\n \n nbdev_install_git_hooks()\n if not (path/'LICENSE').exists() and not (path/'LICENSE.md').exists():\n warnings.warn('No LICENSE file found - you will need one if you will create pypi or conda packages.')",
"_____no_output_____"
]
],
[
[
"`nbdev_new` is a command line tool that creates a new nbdev project from the current directory, which must be a cloned git repo.\n\nAfter you run `nbdev_new`, please check the contents of `settings.ini` look good, and then run `nbdev_build_lib`.",
"_____no_output_____"
],
[
"## Export -",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.export import notebook2script\nnotebook2script()",
"Converted 00_export_scala.ipynb.\nConverted 01_sync_scala.ipynb.\nConverted 02_show_scaladoc.ipynb.\nConverted 03_export_scala2html.ipynb.\nConverted 04_test_scala.ipynb.\nConverted 06_cli.ipynb.\nConverted 07_clean_scala.ipynb.\nConverted ToImport.ipynb.\nConverted import_chisel_mod.ipynb.\nConverted import_composed_mod.ipynb.\nConverted index.ipynb.\nConverted test.ipynb.\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a220104043ccf5c8e3a42709a4a16319b1902db
| 8,574 |
ipynb
|
Jupyter Notebook
|
Practice.ipynb
|
ppant/CodingBat
|
96ab0f9a07106dd19912ae931bead2f9c195a741
|
[
"MIT"
] | null | null | null |
Practice.ipynb
|
ppant/CodingBat
|
96ab0f9a07106dd19912ae931bead2f9c195a741
|
[
"MIT"
] | null | null | null |
Practice.ipynb
|
ppant/CodingBat
|
96ab0f9a07106dd19912ae931bead2f9c195a741
|
[
"MIT"
] | null | null | null | 15.936803 | 94 | 0.415092 |
[
[
[
"import numpy as np\n",
"_____no_output_____"
],
[
"np_heights = np.array([[1.60,1.75],[1.56,1.70],[1.49,1.68]])",
"_____no_output_____"
],
[
"print (np_heights)",
"[[1.6 1.75]\n [1.56 1.7 ]\n [1.49 1.68]]\n"
],
[
"print (np_heights[0][1])",
"1.75\n"
],
[
"print (np_heights[:,0])\n",
"[1.6 1.56 1.49]\n"
],
[
"print (np.mean(np_heights[:,0]))",
"1.55\n"
],
[
"ls1 = [1,2,3]\n",
"_____no_output_____"
],
[
"ls2 = [4,5,6]",
"_____no_output_____"
],
[
"print (ls1 + ls2)",
"[1, 2, 3, 4, 5, 6]\n"
],
[
"x = np.array([10, 23, 14, 27])\ny = np.array([3, 5, 7, 26])",
"_____no_output_____"
],
[
"print(x)",
"[10 23 14 27]\n"
],
[
"print(y)",
"[ 3 5 7 26]\n"
],
[
"z = np.array([x,y])",
"_____no_output_____"
],
[
"print(z.shape)",
"(2, 4)\n"
],
[
"p = 1\nq = \"PradeepPant\"",
"_____no_output_____"
],
[
"print(q * p)",
"PradeepPant\n"
],
[
"x = [1,2,3,4,5]",
"_____no_output_____"
],
[
"print(x[1:4])",
"[2, 3, 4]\n"
],
[
"p = 2\nprint (type(p))",
"<class 'int'>\n"
],
[
"z = np.array([[1,1,1],[3,4,4]])",
"_____no_output_____"
],
[
"z\n",
"_____no_output_____"
],
[
"z[1:,:1]",
"_____no_output_____"
],
[
"z[:,:]",
"_____no_output_____"
],
[
"q = [34, 29, 13, 32, 17, 6]",
"_____no_output_____"
],
[
"q\n",
"_____no_output_____"
],
[
"q",
"_____no_output_____"
],
[
"q[-1:]",
"_____no_output_____"
],
[
"len(q)",
"_____no_output_____"
],
[
"np_heights = np.array([[1.75,1.65,1.8,1.5],[1.56,1.70,1.4,1.29],[1.49,1.68,1.3,1.8]])\nnp.sort(np_heights[0])",
"_____no_output_____"
],
[
"np_heights[:,0]",
"_____no_output_____"
],
[
"np.median(np_heights[:,0])",
"_____no_output_____"
],
[
"np.array([0, True, \"python\"])",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a222726f2ec75bd6743dcf0573dc03ad29c1466
| 380,534 |
ipynb
|
Jupyter Notebook
|
book1/intro/pandas_intro.ipynb
|
AFIT-CSCE623/pyprobml
|
05c4eb70ca2eaa9ee42cb9e4aa054df503e0fbbb
|
[
"MIT"
] | null | null | null |
book1/intro/pandas_intro.ipynb
|
AFIT-CSCE623/pyprobml
|
05c4eb70ca2eaa9ee42cb9e4aa054df503e0fbbb
|
[
"MIT"
] | null | null | null |
book1/intro/pandas_intro.ipynb
|
AFIT-CSCE623/pyprobml
|
05c4eb70ca2eaa9ee42cb9e4aa054df503e0fbbb
|
[
"MIT"
] | null | null | null | 276.752 | 141,158 | 0.886213 |
[
[
[
"<a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/intro/pandas_intro.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Manipulating and visualizing tabular data using pandas \n\n[Pandas](https://pandas.pydata.org/) is a widely used Python library for storing and manipulating tabular data, where feature columns may be of different types (e.g., scalar, ordinal, categorical, text). We give some examples of how to use it below. We also illustrate some ways to plot data using matplotlib.\n\nFor very large datasets, you might want to use [modin](https://github.com/modin-project/modin), which provides the same pandas API but scales to multiple cores, by using [dask](https://github.com/dask/dask) or [ray](https://github.com/ray-project/ray).",
"_____no_output_____"
],
[
"### Install necessary libraries\n\n\n",
"_____no_output_____"
]
],
[
[
"# Standard Python libraries\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport time\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport PIL\nimport imageio\n\nfrom IPython import display\n\nimport sklearn\n\nimport seaborn as sns;\nsns.set(style=\"ticks\", color_codes=True)\n\nimport pandas as pd\npd.set_option('precision', 2) # 2 decimal places\npd.set_option('display.max_rows', 20)\npd.set_option('display.max_columns', 30)\npd.set_option('display.width', 100) # wide windows\n\n",
"_____no_output_____"
]
],
[
[
"### Auto-mpg dataset <a class=\"anchor\" id=\"EDA-autompg\"></a>",
"_____no_output_____"
]
],
[
[
"url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'\ncolumn_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',\n 'Acceleration', 'Year', 'Origin', 'Name']\ndf = pd.read_csv(url, names=column_names, sep='\\s+', na_values=\"?\")\n\n# The last column (name) is a unique id for the car, so we drop it\ndf = df.drop(columns=['Name'])\n\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 398 entries, 0 to 397\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MPG 398 non-null float64\n 1 Cylinders 398 non-null int64 \n 2 Displacement 398 non-null float64\n 3 Horsepower 392 non-null float64\n 4 Weight 398 non-null float64\n 5 Acceleration 398 non-null float64\n 6 Year 398 non-null int64 \n 7 Origin 398 non-null int64 \ndtypes: float64(5), int64(3)\nmemory usage: 25.0 KB\n"
]
],
[
[
"We notice that there are only 392 horsepower rows, but 398 of the others.\nThis is because the HP column has 6 **missing values** (also called NA, or\nnot available).\nThere are 3 main ways to deal with this:\n- Drop the rows with any missing values using dropna()\n- Drop any columns with any missing values using drop()\n- Replace the missing vales with some other valye (eg the median) using fillna. (This is called missing value imputation.)\nFor simplicity, we adopt the first approach.\n",
"_____no_output_____"
]
],
[
[
"# Ensure same number of rows for all features.\ndf = df.dropna()\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 392 entries, 0 to 397\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MPG 392 non-null float64\n 1 Cylinders 392 non-null int64 \n 2 Displacement 392 non-null float64\n 3 Horsepower 392 non-null float64\n 4 Weight 392 non-null float64\n 5 Acceleration 392 non-null float64\n 6 Year 392 non-null int64 \n 7 Origin 392 non-null int64 \ndtypes: float64(5), int64(3)\nmemory usage: 27.6 KB\n"
],
[
"# Summary statistics\ndf.describe(include='all')",
"_____no_output_____"
],
[
"# Convert Origin feature from int to categorical factor\ndf['Origin'] = df.Origin.replace([1,2,3],['USA','Europe','Japan'])\ndf['Origin'] = df['Origin'].astype('category')\n\n# Let us check the categories (levels)\nprint(df['Origin'].cat.categories)\n\n# Let us check the datatypes of all the features\nprint(df.dtypes)",
"Index(['Europe', 'Japan', 'USA'], dtype='object')\nMPG float64\nCylinders int64\nDisplacement float64\nHorsepower float64\nWeight float64\nAcceleration float64\nYear int64\nOrigin category\ndtype: object\n"
],
[
"# Let us inspect the data. We see meaningful names for Origin.\ndf.tail()",
"_____no_output_____"
],
[
"# Create latex table from first 5 rows \ntbl = df[-5:].to_latex(index=False, escape=False)\nprint(tbl)",
"\\begin{tabular}{rrrrrrrl}\n\\toprule\n MPG & Cylinders & Displacement & Horsepower & Weight & Acceleration & Year & Origin \\\\\n\\midrule\n 27.0 & 4 & 140.0 & 86.0 & 2790.0 & 15.6 & 82 & USA \\\\\n 44.0 & 4 & 97.0 & 52.0 & 2130.0 & 24.6 & 82 & Europe \\\\\n 32.0 & 4 & 135.0 & 84.0 & 2295.0 & 11.6 & 82 & USA \\\\\n 28.0 & 4 & 120.0 & 79.0 & 2625.0 & 18.6 & 82 & USA \\\\\n 31.0 & 4 & 119.0 & 82.0 & 2720.0 & 19.4 & 82 & USA \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"# Plot mpg distribution for cars from different countries of origin\ndata = pd.concat( [df['MPG'], df['Origin']], axis=1)\nfig, ax = plt.subplots()\nax = sns.boxplot(x='Origin', y='MPG', data=data)\nax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)\n#plt.savefig(os.path.join(figdir, 'auto-mpg-origin-boxplot.pdf'))\nplt.show()",
"_____no_output_____"
],
[
"# Plot mpg distribution for cars from different years\ndata = pd.concat( [df['MPG'], df['Year']], axis=1)\nfig, ax = plt.subplots()\nax = sns.boxplot(x='Year', y='MPG', data=data)\nax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)\n#plt.savefig(os.path.join(figdir, 'auto-mpg-year-boxplot.pdf'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Iris dataset <a class=\"anchor\" id=\"EDA-iris\"></a>",
"_____no_output_____"
]
],
[
[
"# Get the iris dataset and look at it\nfrom sklearn.datasets import load_iris\niris = load_iris()\n# show attributes of this object\nprint(dir(iris))\n\n# Extract numpy arrays\nX = iris.data \ny = iris.target\nprint(np.shape(X)) # (150, 4)\nprint(np.c_[X[0:3,:], y[0:3]]) # concatenate columns",
"['DESCR', 'data', 'feature_names', 'filename', 'target', 'target_names']\n(150, 4)\n[[5.1 3.5 1.4 0.2 0. ]\n [4.9 3. 1.4 0.2 0. ]\n [4.7 3.2 1.3 0.2 0. ]]\n"
],
[
"# The data is sorted by class. Let's shuffle the rows.\nN = np.shape(X)[0]\nrng = np.random.RandomState(42)\nperm = rng.permutation(N)\nX = X[perm]\ny = y[perm]\nprint(np.c_[X[0:3,:], y[0:3]])",
"[[6.1 2.8 4.7 1.2 1. ]\n [5.7 3.8 1.7 0.3 0. ]\n [7.7 2.6 6.9 2.3 2. ]]\n"
],
[
"# Convert to pandas dataframe \ndf = pd.DataFrame(data=X, columns=['sl', 'sw', 'pl', 'pw'])\n# create column for labels\ndf['label'] = pd.Series(iris.target_names[y], dtype='category')\n\n# Summary statistics\ndf.describe(include='all')",
"_____no_output_____"
],
[
"# Peak at the data\ndf.head()",
"_____no_output_____"
],
[
"# Create latex table from first 5 rows \ntbl = df[:6].to_latex(index=False, escape=False)\nprint(tbl)",
"\\begin{tabular}{rrrrl}\n\\toprule\n sl & sw & pl & pw & label \\\\\n\\midrule\n 6.1 & 2.8 & 4.7 & 1.2 & versicolor \\\\\n 5.7 & 3.8 & 1.7 & 0.3 & setosa \\\\\n 7.7 & 2.6 & 6.9 & 2.3 & virginica \\\\\n 6.0 & 2.9 & 4.5 & 1.5 & versicolor \\\\\n 6.8 & 2.8 & 4.8 & 1.4 & versicolor \\\\\n 5.4 & 3.4 & 1.5 & 0.4 & setosa \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"# 2d scatterplot\n#https://seaborn.pydata.org/generated/seaborn.pairplot.html\nimport seaborn as sns;\nsns.set(style=\"ticks\", color_codes=True)\n# Make a dataframe with nicer labels for printing\n#iris_df = sns.load_dataset(\"iris\")\niris_df = df.copy()\niris_df.columns = iris['feature_names'] + ['label'] \ng = sns.pairplot(iris_df, vars = iris_df.columns[0:3] , hue=\"label\")\n#save_fig(\"iris-scatterplot.pdf\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Boston housing dataset <a class=\"anchor\" id=\"EDA-boston\"></a>",
"_____no_output_____"
]
],
[
[
"# Load data (creates numpy arrays)\nboston = sklearn.datasets.load_boston()\nX = boston.data\ny = boston.target\n\n# Convert to Pandas format\ndf = pd.DataFrame(X)\ndf.columns = boston.feature_names\ndf['MEDV'] = y.tolist()\n\ndf.describe()",
"_____no_output_____"
],
[
"# plot marginal histograms of each column (13 features, 1 response)\nplt.figure()\ndf.hist()\nplt.show()",
"_____no_output_____"
],
[
"# scatter plot of response vs each feature \nnrows = 3; ncols = 4;\nfig, ax = plt.subplots(nrows=nrows, ncols=ncols, sharey=True, figsize=[15, 10])\nplt.tight_layout()\nplt.clf()\nfor i in range(0,12):\n plt.subplot(nrows, ncols, i+1)\n plt.scatter(X[:,i], y)\n plt.xlabel(boston.feature_names[i])\n plt.ylabel(\"house price\")\n plt.grid()\n#save_fig(\"boston-housing-scatter.pdf\")\nplt.show()\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a2227c85591db42e77216b9ede1acf25f35816d
| 508,330 |
ipynb
|
Jupyter Notebook
|
3. Convolutional Neural Networks/L1 Convolutional Neural Networks/maxpooling_visualization.ipynb
|
xia0nan/Udacity-Deep-Learning
|
350c9b0341a0c157745b9a787997f3a85cc71b15
|
[
"MIT"
] | null | null | null |
3. Convolutional Neural Networks/L1 Convolutional Neural Networks/maxpooling_visualization.ipynb
|
xia0nan/Udacity-Deep-Learning
|
350c9b0341a0c157745b9a787997f3a85cc71b15
|
[
"MIT"
] | null | null | null |
3. Convolutional Neural Networks/L1 Convolutional Neural Networks/maxpooling_visualization.ipynb
|
xia0nan/Udacity-Deep-Learning
|
350c9b0341a0c157745b9a787997f3a85cc71b15
|
[
"MIT"
] | null | null | null | 1,377.588076 | 165,536 | 0.956143 |
[
[
[
"# Maxpooling Layer\n\nIn this notebook, we add and visualize the output of a maxpooling layer in a CNN. \n\nA convolutional layer + activation function, followed by a pooling layer, and a linear layer (to create a desired output size) make up the basic layers of a CNN.\n\n<img src='notebook_ims/CNN_all_layers.png' height=50% width=50% />",
"_____no_output_____"
],
[
"### Import the image",
"_____no_output_____"
]
],
[
[
"import cv2\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# TODO: Feel free to try out your own images here by changing img_path\n# to a file path to another image on your computer!\nimg_path = 'data/udacity_sdc.png'\n\n# load color image \nbgr_img = cv2.imread(img_path)\n# convert to grayscale\ngray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)\n\n# normalize, rescale entries to lie in [0,1]\ngray_img = gray_img.astype(\"float32\")/255\n\n# plot image\nplt.imshow(gray_img, cmap='gray')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Define and visualize the filters",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\n## TODO: Feel free to modify the numbers here, to try out another filter!\nfilter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])\n\nprint('Filter shape: ', filter_vals.shape)\n",
"Filter shape: (4, 4)\n"
],
[
"# Defining four different filters, \n# all of which are linear combinations of the `filter_vals` defined above\n\n# define four filters\nfilter_1 = filter_vals\nfilter_2 = -filter_1\nfilter_3 = filter_1.T\nfilter_4 = -filter_3\nfilters = np.array([filter_1, filter_2, filter_3, filter_4])\n\n# For an example, print out the values of filter 1\nprint('Filter 1: \\n', filter_1)",
"Filter 1: \n [[-1 -1 1 1]\n [-1 -1 1 1]\n [-1 -1 1 1]\n [-1 -1 1 1]]\n"
]
],
[
[
"### Define convolutional and pooling layers\n\nYou've seen how to define a convolutional layer, next is a:\n* Pooling layer\n\nIn the next cell, we initialize a convolutional layer so that it contains all the created filters. Then add a maxpooling layer, [documented here](http://pytorch.org/docs/stable/_modules/torch/nn/modules/pooling.html), with a kernel size of (2x2) so you can see that the image resolution has been reduced after this step!\n\nA maxpooling layer reduces the x-y size of an input and only keeps the most *active* pixel values. Below is an example of a 2x2 pooling kernel, with a stride of 2, applied to a small patch of grayscale pixel values; reducing the size of the patch by a factor of 4. Only the maximum pixel values in 2x2 remain in the new, pooled output.\n\n<img src='notebook_ims/maxpooling_ex.png' height=50% width=50% />",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n \n# define a neural network with a convolutional layer with four filters\n# AND a pooling layer of size (2, 2)\nclass Net(nn.Module):\n \n def __init__(self, weight):\n super(Net, self).__init__()\n # initializes the weights of the convolutional layer to be the weights of the 4 defined filters\n k_height, k_width = weight.shape[2:]\n # defines the convolutional layer, assumes there are 4 grayscale filters\n # torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)\n self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)\n self.conv.weight = torch.nn.Parameter(weight)\n # define a pooling layer\n self.pool = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n # calculates the output of a convolutional layer\n # pre- and post-activation\n conv_x = self.conv(x)\n activated_x = F.relu(conv_x)\n \n # applies pooling layer\n pooled_x = self.pool(activated_x)\n \n # returns all layers\n return conv_x, activated_x, pooled_x\n \n# instantiate the model and set the weights\nweight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)\nmodel = Net(weight)\n\n# print out the layer in the network\nprint(model)",
"Net(\n (conv): Conv2d(1, 4, kernel_size=(4, 4), stride=(1, 1), bias=False)\n (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n)\n"
]
],
[
[
"### Visualize the output of each filter\n\nFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.",
"_____no_output_____"
]
],
[
[
"# helper function for visualizing the output of a given layer\n# default number of filters is 4\ndef viz_layer(layer, n_filters= 4):\n fig = plt.figure(figsize=(20, 20))\n \n for i in range(n_filters):\n ax = fig.add_subplot(1, n_filters, i+1)\n # grab layer outputs\n ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')\n ax.set_title('Output %s' % str(i+1))",
"_____no_output_____"
]
],
[
[
"Let's look at the output of a convolutional layer after a ReLu activation function is applied.\n\n#### ReLU activation\n\nA ReLU function turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`. \n\n<img src='notebook_ims/relu_ex.png' height=50% width=50% />",
"_____no_output_____"
]
],
[
[
"# plot original image\nplt.imshow(gray_img, cmap='gray')\n\n# visualize all filters\nfig = plt.figure(figsize=(12, 6))\nfig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)\nfor i in range(4):\n ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i], cmap='gray')\n ax.set_title('Filter %s' % str(i+1))\n\n \n# convert the image into an input Tensor\ngray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)\n\n# get all the layers \nconv_layer, activated_layer, pooled_layer = model(gray_img_tensor)\n\n# visualize the output of the activated conv layer\nviz_layer(activated_layer)",
"_____no_output_____"
]
],
[
[
"### Visualize the output of the pooling layer\n\nThen, take a look at the output of a pooling layer. The pooling layer takes as input the feature maps pictured above and reduces the dimensionality of those maps, by some pooling factor, by constructing a new, smaller image of only the maximum (brightest) values in a given kernel area.\n\nTake a look at the values on the x, y axes to see how the image has changed size.\n\n",
"_____no_output_____"
]
],
[
[
"# visualize the output of the pooling layer\nviz_layer(pooled_layer)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a222f3b5a996ed332f623928c9bcec087ecdca1
| 28,826 |
ipynb
|
Jupyter Notebook
|
01_06_Pivot_BS_Data.ipynb
|
HansjoergW/bfh_cas_bgd_fs2020_sa
|
7c38c1e8aa357d77551bfe1918f70430063f84cb
|
[
"Apache-2.0"
] | null | null | null |
01_06_Pivot_BS_Data.ipynb
|
HansjoergW/bfh_cas_bgd_fs2020_sa
|
7c38c1e8aa357d77551bfe1918f70430063f84cb
|
[
"Apache-2.0"
] | 2 |
2021-09-28T05:24:45.000Z
|
2022-02-26T09:44:51.000Z
|
01_06_Pivot_BS_Data.ipynb
|
HansjoergW/bfh_cas_bgd_fs2020_sa
|
7c38c1e8aa357d77551bfe1918f70430063f84cb
|
[
"Apache-2.0"
] | null | null | null | 33.209677 | 419 | 0.514189 |
[
[
[
"# default_exp filter",
"_____no_output_____"
],
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
],
[
"#hide\n# stellt sicher, dass beim verändern der core library diese wieder neu geladen wird\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"# 01_06_Pivot_BS_Data",
"_____no_output_____"
],
[
"In this notebook, we will transform the verticalized data rows of the BalanceSheet into a horizontalized dataframe.\n<br>\nCurrently, our data looks similar to the table below. Every Value is placed on its own row.\n\n\n| bs_id | company | date | attribute | value |\n|-------|------------|------------|-----------|-------|\n| 1 | VitaSport | 31.10.2018 | Assets | 100 |\n| 1 | VitaSport | 31.10.2018 | Cash | 80 |\n| 1 | VitaSport | 31.10.2018 | Other | 20 |\n| 2 | VitaSport | 31.10.2019 | Assets | 120 |\n| 2 | VitaSport | 31.10.2019 | Cash | 80 |\n| 2 | VitaSport | 31.10.2019 | Other | 40 |\n| 3 | GloryFood | 31.10.2019 | Assets | 50 |\n| 3 | GloryFood | 31.10.2019 | Cash | 5 |\n| 3 | GloryFood | 31.10.2019 | Other | 45 |\n\n<br>\nBut what we would like to have one entry per BalanceSheet:\n\n| bs_id | company | date | Assets | Cash | Other |\n|-------|-----------|------------|--------|------|-------|\n| 1 | VitaSport | 31.10.2018 | 100 | 80 | 20 |\n| 2 | VitaSport | 31.10.2019 | 120 | 80 | 40 |\n| 3 | GloryFood | 31.10.2019 | 50 | 5 | 45 |",
"_____no_output_____"
]
],
[
[
"# imports\nfrom bfh_cas_bgd_fs2020_sa.core import * # initialze spark\n\nfrom pathlib import Path\nfrom typing import List, Tuple, Union, Set\nfrom pyspark.sql.dataframe import DataFrame\nfrom pyspark.sql.functions import col, pandas_udf, PandasUDFType\nfrom pyspark.sql.types import *\n\nimport pandas as pd\n\nimport shutil # provides high level file operations\nimport time # used to measure execution time\nimport os\nimport sys",
"_____no_output_____"
],
[
"# folder with our test-dataset which contains only data from two zip files\ntst_filtered_folder = \"./tmp/filtered/\"\ntst_bs_folder = \"./tmp/bs/\"\n\n# folder with the whole dataset as a single parquet\nall_filtered_folder = \"D:/data/parq_filtered\"\nall_bs_folder = \"D:/data/parq_bs\"",
"_____no_output_____"
]
],
[
[
"## Init Spark",
"_____no_output_____"
]
],
[
[
"spark = get_spark_session() # Session anlegen\nspark # display the most important information of the session",
"_____no_output_____"
]
],
[
[
"## Load the dataset",
"_____no_output_____"
],
[
"Loading the data doesn't really do anything. It just prepares the df. But we well use the cache() method to keep the data in memory, once it is loaded for the first time.",
"_____no_output_____"
],
[
"### Load the test data",
"_____no_output_____"
]
],
[
[
"df_tst = spark.read.parquet(tst_filtered_folder).cache()",
"_____no_output_____"
]
],
[
[
"### Load the whole dataset",
"_____no_output_____"
]
],
[
[
"df_all = spark.read.parquet(all_filtered_folder).cache()",
"_____no_output_____"
]
],
[
[
"### Print all the contained column names",
"_____no_output_____"
]
],
[
[
"_ = [print(x, end=\", \") for x in df_all.columns] # print the name of the columns for convenience",
"cik, adsh, tag, version, coreg, ddate, qtrs, uom, value, footnote, name, sic, countryba, stprba, cityba, zipba, bas1, bas2, baph, countryma, stprma, cityma, zipma, mas1, mas2, countryinc, stprinc, ein, former, changed, afs, wksi, fye, form, period, fy, fp, filed, accepted, prevrpt, detail, instance, nciks, aciks, report, line, stmt, inpth, rfile, plabel, negating, ticker, name_cik_tic, exchange, cik_select, "
]
],
[
[
"## Loading data into memory",
"_____no_output_____"
],
[
"We just make a count on the test and the all dataset. This ensure that the data will be loaded into the memory and is cached afterwards.",
"_____no_output_____"
]
],
[
[
"start = time.time()\nprint(\"Entries in Test: \", \"{:_}\".format(df_tst.count())) # loading test dataset into memory\nduration = time.time() - start\nprint(\"duration: \", duration)",
"Entries in Test: 1_680_108\nduration: 13.733977556228638\n"
],
[
"start = time.time()\nprint(\"Entries in Test: \", \"{:_}\".format(df_all.count())) # loading all dataset into memory\nduration = time.time() - start\nprint(\"duration: \", duration)",
"Entries in Test: 35_454_045\nduration: 203.78116416931152\n"
]
],
[
[
"Since we filtered out about two thirds of the entries, loading the reduced data set takes only about 3 minutes to load it completely into memory",
"_____no_output_____"
],
[
"## Basics",
"_____no_output_____"
],
[
"In order to test how to pivot the data, we implement a simple example to test the principle. Actually, der is a pivot function, which provides the desired functionality.",
"_____no_output_____"
]
],
[
[
"df_bs_data = spark.createDataFrame( \\\n[ \\\n (1,\"VitaSport\",\"31.10.2018\",\"Assets\",100), \\\n (1,\"VitaSport\",\"31.10.2018\",\"Cash \",80 ), \\\n (1,\"VitaSport\",\"31.10.2018\",\"Other \",20 ), \\\n (2,\"VitaSport\",\"31.10.2019\",\"Assets\",120), \\\n (2,\"VitaSport\",\"31.10.2019\",\"Cash \",80 ), \\\n (2,\"VitaSport\",\"31.10.2019\",\"Other \",40 ), \\\n (3,\"GloryFood\",\"31.10.2019\",\"Assets\",50 ), \\\n (3,\"GloryFood\",\"31.10.2019\",\"Cash \",5 ), \\\n (3,\"GloryFood\",\"31.10.2019\",\"Other \",45 ) \\\n], \\\n (\"bs_id\", \"company\", \"date\", \"attribute\", \"value\") \\\n)\n\ndf_bs_data.groupby([\"company\",\"bs_id\",\"date\"]).pivot(\"attribute\").max(\"value\").show()",
"+---------+-----+----------+------+------+------+\n| company|bs_id| date|Assets|Cash |Other |\n+---------+-----+----------+------+------+------+\n|VitaSport| 2|31.10.2019| 120| 80| 40|\n|VitaSport| 1|31.10.2018| 100| 80| 20|\n|GloryFood| 3|31.10.2019| 50| 5| 45|\n+---------+-----+----------+------+------+------+\n\n"
]
],
[
[
"This looks simple. But it could be, that we will get more than one result. In the above sample, we just used the max aggregate function. However, that might be a too simple solution for real data.",
"_____no_output_____"
],
[
"## Pivoting Apple in the Testdata",
"_____no_output_____"
],
[
"In a first step, we select only the BalanceSheet data of Apple in the testset and we expect to have 2 BalanceSheets in there (one for every quarter - since the testset contains two quarter of data.",
"_____no_output_____"
]
],
[
[
"apple_df = df_tst.where(\"cik == 320193 and stmt = 'BS'\").cache()",
"_____no_output_____"
]
],
[
[
"Check how many datarows there are for Apple in the two test quarters.",
"_____no_output_____"
]
],
[
[
"apple_df.count()",
"_____no_output_____"
],
[
"apple_vip_cols = apple_df.select(['cik','adsh','period','tag', 'version', 'ddate','uom','value', 'qtrs','fp', 'report','line'])",
"_____no_output_____"
],
[
"apple_vip_cols.show()",
"+------+--------------------+----------+--------------------+------------+----------+---+----------+----+---+------+----+\n| cik| adsh| period| tag| version| ddate|uom| value|qtrs| fp|report|line|\n+------+--------------------+----------+--------------------+------------+----------+---+----------+----+---+------+----+\n|320193|0000320193-19-000119|2019-09-30| LiabilitiesCurrent|us-gaap/2019|2018-09-30|USD|1.15929E11| 0| FY| 4| 23|\n|320193|0000320193-19-000119|2019-09-30| LiabilitiesCurrent|us-gaap/2019|2019-09-30|USD|1.05718E11| 0| FY| 4| 23|\n|320193|0000320193-19-000119|2019-09-30|ContractWithCusto...|us-gaap/2019|2018-09-30|USD| 5.966E9| 0| FY| 4| 20|\n|320193|0000320193-19-000119|2019-09-30|ContractWithCusto...|us-gaap/2019|2019-09-30|USD| 5.522E9| 0| FY| 4| 20|\n|320193|0000320193-19-000119|2019-09-30| Assets|us-gaap/2019|2019-09-30|USD|3.38516E11| 0| FY| 4| 15|\n|320193|0000320193-19-000119|2019-09-30| Assets|us-gaap/2019|2018-09-30|USD|3.65725E11| 0| FY| 4| 15|\n|320193|0000320193-19-000119|2019-09-30|CommonStocksInclu...|us-gaap/2019|2018-09-30|USD| 4.0201E10| 0| FY| 4| 31|\n|320193|0000320193-19-000119|2019-09-30|CommonStocksInclu...|us-gaap/2019|2019-09-30|USD| 4.5174E10| 0| FY| 4| 31|\n|320193|0000320193-19-000119|2019-09-30|OtherAssetsNoncur...|us-gaap/2019|2018-09-30|USD| 2.2283E10| 0| FY| 4| 13|\n|320193|0000320193-19-000119|2019-09-30|OtherAssetsNoncur...|us-gaap/2019|2019-09-30|USD| 3.2978E10| 0| FY| 4| 13|\n|320193|0000320193-19-000119|2019-09-30| CommercialPaper|us-gaap/2019|2018-09-30|USD| 1.1964E10| 0| FY| 4| 21|\n|320193|0000320193-19-000119|2019-09-30| CommercialPaper|us-gaap/2019|2019-09-30|USD| 5.98E9| 0| FY| 4| 21|\n|320193|0000320193-19-000119|2019-09-30|MarketableSecurit...|us-gaap/2019|2018-09-30|USD| 4.0388E10| 0| FY| 4| 4|\n|320193|0000320193-19-000119|2019-09-30|MarketableSecurit...|us-gaap/2019|2019-09-30|USD| 5.1713E10| 0| FY| 4| 4|\n|320193|0000320193-19-000119|2019-09-30|LongTermDebtNoncu...|us-gaap/2019|2018-09-30|USD| 9.3735E10| 0| FY| 4| 25|\n|320193|0000320193-19-000119|2019-09-30|LongTermDebtNoncu...|us-gaap/2019|2019-09-30|USD| 9.1807E10| 0| FY| 4| 25|\n|320193|0000320193-19-000119|2019-09-30| StockholdersEquity|us-gaap/2019|2016-09-30|USD|1.28249E11| 0| FY| 4| 34|\n|320193|0000320193-19-000119|2019-09-30| StockholdersEquity|us-gaap/2019|2017-09-30|USD|1.34047E11| 0| FY| 4| 34|\n|320193|0000320193-19-000119|2019-09-30| StockholdersEquity|us-gaap/2019|2018-09-30|USD|1.07147E11| 0| FY| 4| 34|\n|320193|0000320193-19-000119|2019-09-30| StockholdersEquity|us-gaap/2019|2019-09-30|USD| 9.0488E10| 0| FY| 4| 34|\n+------+--------------------+----------+--------------------+------------+----------+---+----------+----+---+------+----+\nonly showing top 20 rows\n\n"
]
],
[
[
"Checking the \"ddate\" column, we see entries that are in the past (compared to the \"period\" field - which is the Balance Sheet Date, rounded to nearest month-end). This is normal, since the balancesheet also contains the data of the balance sheet from a year ago. However, in our case we are only interested in the data for the actual period. These are the entries where period and ddate have the same value.",
"_____no_output_____"
]
],
[
[
"apple_bs_per_period = apple_vip_cols.where(\"period == ddate\").orderBy([\"cik\",\"adsh\",\"period\",\"report\",\"line\"])",
"_____no_output_____"
],
[
"apple_bs_per_period.show(32)",
"+------+--------------------+----------+--------------------+------------+----------+------+----------+----+---+------+----+\n| cik| adsh| period| tag| version| ddate| uom| value|qtrs| fp|report|line|\n+------+--------------------+----------+--------------------+------------+----------+------+----------+----+---+------+----+\n|320193|0000320193-19-000076|2019-06-30|CashAndCashEquiva...|us-gaap/2018|2019-06-30| USD| 5.053E10| 0| Q3| 4| 3|\n|320193|0000320193-19-000076|2019-06-30|MarketableSecurit...|us-gaap/2018|2019-06-30| USD| 4.4084E10| 0| Q3| 4| 4|\n|320193|0000320193-19-000076|2019-06-30|AccountsReceivabl...|us-gaap/2018|2019-06-30| USD| 1.4148E10| 0| Q3| 4| 5|\n|320193|0000320193-19-000076|2019-06-30| InventoryNet|us-gaap/2018|2019-06-30| USD| 3.355E9| 0| Q3| 4| 6|\n|320193|0000320193-19-000076|2019-06-30|NontradeReceivabl...|us-gaap/2018|2019-06-30| USD| 1.2326E10| 0| Q3| 4| 7|\n|320193|0000320193-19-000076|2019-06-30| OtherAssetsCurrent|us-gaap/2018|2019-06-30| USD| 1.053E10| 0| Q3| 4| 8|\n|320193|0000320193-19-000076|2019-06-30| AssetsCurrent|us-gaap/2018|2019-06-30| USD|1.34973E11| 0| Q3| 4| 9|\n|320193|0000320193-19-000076|2019-06-30|MarketableSecurit...|us-gaap/2018|2019-06-30| USD|1.15996E11| 0| Q3| 4| 11|\n|320193|0000320193-19-000076|2019-06-30|PropertyPlantAndE...|us-gaap/2018|2019-06-30| USD| 3.7636E10| 0| Q3| 4| 12|\n|320193|0000320193-19-000076|2019-06-30|OtherAssetsNoncur...|us-gaap/2018|2019-06-30| USD| 3.3634E10| 0| Q3| 4| 13|\n|320193|0000320193-19-000076|2019-06-30| AssetsNoncurrent|us-gaap/2018|2019-06-30| USD|1.87266E11| 0| Q3| 4| 14|\n|320193|0000320193-19-000076|2019-06-30| Assets|us-gaap/2018|2019-06-30| USD|3.22239E11| 0| Q3| 4| 15|\n|320193|0000320193-19-000076|2019-06-30|AccountsPayableCu...|us-gaap/2018|2019-06-30| USD| 2.9115E10| 0| Q3| 4| 18|\n|320193|0000320193-19-000076|2019-06-30|OtherLiabilitiesC...|us-gaap/2018|2019-06-30| USD| 3.1673E10| 0| Q3| 4| 19|\n|320193|0000320193-19-000076|2019-06-30|ContractWithCusto...|us-gaap/2018|2019-06-30| USD| 5.434E9| 0| Q3| 4| 20|\n|320193|0000320193-19-000076|2019-06-30| CommercialPaper|us-gaap/2018|2019-06-30| USD| 9.953E9| 0| Q3| 4| 21|\n|320193|0000320193-19-000076|2019-06-30| LongTermDebtCurrent|us-gaap/2018|2019-06-30| USD| 1.3529E10| 0| Q3| 4| 22|\n|320193|0000320193-19-000076|2019-06-30| LiabilitiesCurrent|us-gaap/2018|2019-06-30| USD| 8.9704E10| 0| Q3| 4| 23|\n|320193|0000320193-19-000076|2019-06-30|LongTermDebtNoncu...|us-gaap/2018|2019-06-30| USD| 8.4936E10| 0| Q3| 4| 25|\n|320193|0000320193-19-000076|2019-06-30|OtherLiabilitiesN...|us-gaap/2018|2019-06-30| USD| 5.1143E10| 0| Q3| 4| 26|\n|320193|0000320193-19-000076|2019-06-30|LiabilitiesNoncur...|us-gaap/2018|2019-06-30| USD|1.36079E11| 0| Q3| 4| 27|\n|320193|0000320193-19-000076|2019-06-30| Liabilities|us-gaap/2018|2019-06-30| USD|2.25783E11| 0| Q3| 4| 28|\n|320193|0000320193-19-000076|2019-06-30|CommitmentsAndCon...|us-gaap/2018|2019-06-30| USD| null| 0| Q3| 4| 29|\n|320193|0000320193-19-000076|2019-06-30|CommonStocksInclu...|us-gaap/2018|2019-06-30| USD| 4.3371E10| 0| Q3| 4| 31|\n|320193|0000320193-19-000076|2019-06-30|RetainedEarningsA...|us-gaap/2018|2019-06-30| USD| 5.3724E10| 0| Q3| 4| 32|\n|320193|0000320193-19-000076|2019-06-30|AccumulatedOtherC...|us-gaap/2018|2019-06-30| USD| -6.39E8| 0| Q3| 4| 33|\n|320193|0000320193-19-000076|2019-06-30| StockholdersEquity|us-gaap/2018|2019-06-30| USD| 9.6456E10| 0| Q3| 4| 34|\n|320193|0000320193-19-000076|2019-06-30|LiabilitiesAndSto...|us-gaap/2018|2019-06-30| USD|3.22239E11| 0| Q3| 4| 35|\n|320193|0000320193-19-000076|2019-06-30|CommonStockParOrS...|us-gaap/2018|2019-06-30| USD| 0.0| 0| Q3| 5| 1|\n|320193|0000320193-19-000076|2019-06-30|CommonStockShares...|us-gaap/2018|2019-06-30|shares| 1.26E10| 0| Q3| 5| 2|\n|320193|0000320193-19-000076|2019-06-30|CommonStockShares...|us-gaap/2018|2019-06-30|shares|4.531395E9| 0| Q3| 5| 3|\n|320193|0000320193-19-000076|2019-06-30|CommonStockShares...|us-gaap/2018|2019-06-30|shares|4.531395E9| 0| Q3| 5| 4|\n+------+--------------------+----------+--------------------+------------+----------+------+----------+----+---+------+----+\nonly showing top 32 rows\n\n"
]
],
[
[
"Comparing the data above with the BalanceSheet in the appropriate report (https://www.sec.gov/ix?doc=/Archives/edgar/data/320193/000032019319000076/a10-qq320196292019.htm) we see that the data and entries match.",
"_____no_output_____"
],
[
"Finally, we pivot the data and we expect two rows in the data.",
"_____no_output_____"
]
],
[
[
"apple_pivoted_df = apple_bs_per_period.select([\"cik\",\"adsh\",\"period\",\"ddate\",'tag','value']) \\\n .groupby([\"cik\",\"adsh\",\"period\",\"ddate\"]) \\\n .pivot(\"tag\",['Assets','AssetsCurrent','OtherAssetsCurrent']).max('value')",
"_____no_output_____"
],
[
"apple_pivoted_df.select([\"cik\",\"adsh\",\"period\",'ddate', 'Assets','AssetsCurrent','OtherAssetsCurrent']).show()",
"+------+--------------------+----------+----------+----------+-------------+------------------+\n| cik| adsh| period| ddate| Assets|AssetsCurrent|OtherAssetsCurrent|\n+------+--------------------+----------+----------+----------+-------------+------------------+\n|320193|0000320193-19-000119|2019-09-30|2019-09-30|3.38516E11| 1.62819E11| 1.2352E10|\n|320193|0000320193-19-000076|2019-06-30|2019-06-30|3.22239E11| 1.34973E11| 1.053E10|\n+------+--------------------+----------+----------+----------+-------------+------------------+\n\n"
]
],
[
[
"The result looks promising.",
"_____no_output_____"
],
[
"## Deciding which tags to pivot",
"_____no_output_____"
],
[
"In the analysis step we created a sorted list of the tags that are present in BalanceSheets. As was shown there, it doesn't make sense to pivot all 3400 tags. Instead, only a small subset appears often enough in reports to be useful. <br>\nWe stored the sorted list in the file \"bs_tags.csv\". No, we will load it and use the first 100 tags to define which values should be pivoted.",
"_____no_output_____"
]
],
[
[
"bs_tags = pd.read_csv(\"./bs_tags.csv\")['tag']",
"_____no_output_____"
],
[
"relevant_tags = bs_tags[:100].tolist()",
"_____no_output_____"
]
],
[
[
"## Pivoting",
"_____no_output_____"
],
[
"### Pivot the testset",
"_____no_output_____"
]
],
[
[
"df_test_bs_ready = df_tst.where(\"stmt = 'BS' and period == ddate\").select(['cik','ticker','adsh','period','tag', 'ddate','value']).cache()",
"_____no_output_____"
],
[
"df_test_bs_ready.count()",
"_____no_output_____"
],
[
"df_test_bs_ready.select('tag').distinct().count()",
"_____no_output_____"
],
[
"df_test_bs_pivot = df_test_bs_ready.groupby([\"cik\",\"adsh\",\"period\",\"ddate\"]).pivot(\"tag\",relevant_tags) \\\n .max('value').cache()",
"_____no_output_____"
],
[
"df_test_bs_pivot.count()",
"_____no_output_____"
],
[
"df_test_bs_pivot.write.parquet(tst_bs_folder)",
"_____no_output_____"
]
],
[
[
"### Pivot the whole dataset",
"_____no_output_____"
]
],
[
[
"df_all_bs_ready = df_all.where(\"stmt = 'BS' and period == ddate\").select(['cik','ticker','adsh','form','period','tag','value']).cache()",
"_____no_output_____"
],
[
"df_all_bs_ready.count()",
"_____no_output_____"
],
[
"df_all_bs_ready.select('tag').distinct().count()",
"_____no_output_____"
],
[
"df_all_bs_pivot = df_all_bs_ready.groupby([\"cik\",\"ticker\",\"adsh\",\"form\",\"period\"]).pivot(\"tag\",relevant_tags) \\\n .max('value').cache()",
"_____no_output_____"
],
[
"df_all_bs_pivot.count()",
"_____no_output_____"
]
],
[
[
"In order to have an easy way to look at the data with a texteditor, we convert it to a pandas Dataframe and store it as CSV. The resulting file size is now 54 MB. ",
"_____no_output_____"
]
],
[
[
"df_all_bs_pivot.toPandas().to_csv(\"bs_data.csv\",index=False,header=True)",
"_____no_output_____"
]
],
[
[
"But for further processing, we also store it as parquet, since this will keep that datatype information of the columns.",
"_____no_output_____"
]
],
[
[
"shutil.rmtree(all_bs_folder, ignore_errors=True)\ndf_all_bs_pivot.repartition(8,col(\"cik\")).write.parquet(all_bs_folder)",
"_____no_output_____"
]
],
[
[
"## Stop the SparkContext",
"_____no_output_____"
]
],
[
[
"spark.stop()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a223a1158441b01f3544d2ddf8971c21a7491d4
| 812,624 |
ipynb
|
Jupyter Notebook
|
session 7/session 6.ipynb
|
Aasrith1906/Mentorship-Program
|
48413028a63eca95cdbd322f9be99a7827b736db
|
[
"MIT"
] | null | null | null |
session 7/session 6.ipynb
|
Aasrith1906/Mentorship-Program
|
48413028a63eca95cdbd322f9be99a7827b736db
|
[
"MIT"
] | null | null | null |
session 7/session 6.ipynb
|
Aasrith1906/Mentorship-Program
|
48413028a63eca95cdbd322f9be99a7827b736db
|
[
"MIT"
] | null | null | null | 2,587.974522 | 423,508 | 0.962967 |
[
[
[
"import numpy as np \nimport matplotlib.pyplot as plt\nimport random \n\n#using the monte carlo method to approximate the value of pi \n\nN_array = np.arange(1,5000)\npi_array = []\nx_array_points = []\ny_array_points = []\n\nfor n in N_array:\n \n num_in = 0\n num_out = 0\n \n for i in range(n):\n \n x = random.uniform(0,1)\n y = random.uniform(0,1)\n \n if (x**2) + (y**2) <= 1:\n \n num_in +=1\n \n else:\n \n num_out += 1\n \n pi_array.append(4*num_in/n)\n \n x_array_points.append(x)\n y_array_points.append(y)\n\nplt.style.use('ggplot')\n",
"_____no_output_____"
],
[
"import pandas as pd\n\ndict_ = {\"N\":N_array , \"PI\":pi_array }\n\ndf = pd.DataFrame(dict_)\n\ndf.dropna()\n\ndf[\"PI\"].mean()",
"_____no_output_____"
],
[
"plt.figure(figsize=(50,10))\nplt.plot(N_array , pi_array)\nplt.title(\"pi vs n\")\n",
"_____no_output_____"
],
[
"x = np.arange(0,1,0.01)\ny = np.sqrt(1-x**2)\nplt.figure(figsize=(30,10))\nplt.plot(x,y,'b')\nplt.scatter(x_array_points,y_array_points)\nplt.title(\"random points lying inside and outside the quarter circle\")\n",
"_____no_output_____"
],
[
"import random\n\n'''\nfinding area of a curve using monte carlo method \n\nf(x) = x*(1-x)\n'''\n\n#plotting the curve\n\ny_array = []\nx_array = []\n\nfor i in np.arange(0,1,0.01):\n \n y = i*(i-1)\n \n x_array.append(i)\n y_array.append(y)\n\nplt.figure(figsize=(30,10))\n\nplt.plot(x_array , y_array,'b')\n\n\n\nN = 10000 #number of random points\n\nx_in = []\nx_out = []\ny_in = []\ny_out = []\n\nnum_points = 0\n\nArea_rect = -0.25\n\nfor i in range(N):\n \n x = random.uniform(0,1)\n y = random.uniform(0 , min(y_array))\n x_array.append(x)\n y_array.append(y)\n \n y_actual = x*(x-1)\n \n if y < y_actual:\n \n num_points+=1\n x_in.append(x)\n y_in.append(y)\n \n else:\n \n x_out.append(x)\n y_out.append(y)\n \nArea = 0.26*num_points/N\n\nprint(Area)\n\nplt.scatter(x_in , y_in , color = 'g')\nplt.scatter(x_out, y_out , color = 'r')\nplt.show()\n ",
"0.086502\n"
],
[
"'''\nfunction that finds monte carlo integral of any function\n'''\n\ndef MonteCarlo(func , limits , N): #function , upper and lower limits , number of random points\n \n a , b = limits[0] , limits[1]\n \n integral = 0\n \n for i in range(N):\n \n x_rand = random.uniform(a , b)\n #print(x_rand)\n y = func(x_rand)\n #print(y)\n integral+=y\n \n area = (b-a)/float(N)*integral\n \n print(\"Area of function between {} {} is {}\".format(a , b , area))\n\n \n \n ",
"_____no_output_____"
],
[
"#testing monte carlo method function \n\n\nMonteCarlo(np.sin, [0,np.pi],100000) #integral of sinx between 0 and pi\n\ndef func(x):\n return x**2\n\nMonteCarlo(func , [0,2] , 1000)",
"Area of function between 0 3.141592653589793 is 2.0051351813785345\nArea of function between 0 2 is 2.660976652919167\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a224945711e85dd6d0d2e709217aab9bbdd33df
| 739,182 |
ipynb
|
Jupyter Notebook
|
FingerprintingDataAnalysis.ipynb
|
sonjageorgievska/Crowds
|
cd01ca8af5bb4f7822b8698bb5d1bffbed243c03
|
[
"Apache-2.0"
] | null | null | null |
FingerprintingDataAnalysis.ipynb
|
sonjageorgievska/Crowds
|
cd01ca8af5bb4f7822b8698bb5d1bffbed243c03
|
[
"Apache-2.0"
] | null | null | null |
FingerprintingDataAnalysis.ipynb
|
sonjageorgievska/Crowds
|
cd01ca8af5bb4f7822b8698bb5d1bffbed243c03
|
[
"Apache-2.0"
] | null | null | null | 677.527039 | 89,830 | 0.93749 |
[
[
[
"Analysing GPS data from Jaume University",
"_____no_output_____"
],
[
"Defining functions",
"_____no_output_____"
]
],
[
[
"import json \nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef getmeasurementTimestamp(item):\n return int(item['measurementTimestamp'])\ndef getProcessingTimestamp(item):\n return int(item['processingTimestamp'])\n\ndef get_x_error(item): #the error in the data is the stdev of the sample, we compute the error of the estimation (the sample mean)\n return item['value']['averagecoordinate']['error']['coordinates'][0]/math.sqrt(item['value']['trackeeHistory']['nMeasurements'])\n\ndef get_y_error(item):\n return item['value']['averagecoordinate']['error']['coordinates'][1]/math.sqrt(item['value']['trackeeHistory']['nMeasurements'])\n\ndef get_fitted(item):\n return item['value']['trackeeHistory']['fitStatus']\n \ndef get_x_sample_error(item): \n return item['value']['averagecoordinate']['error']['coordinates'][0]\ndef get_y_sample_error(item):\n return item['value']['averagecoordinate']['error']['coordinates'][1]\n\ndef get_probChi2(item):\n return item['value']['trackeeHistory']['probChi2'] \ndef get_Chi2PerDof(item):\n return item['value']['trackeeHistory']['chi2PerDof'] \n\ndef plotHistogramOfDictionary(dictionary, xlabel, ylabel, nbins):\n dictionaryList = []\n for address in dictionary.keys():\n dictionaryList.append(dictionary[address])\n dictArray = np.array(dictionaryList)\n plt.hist(dictArray, bins = nbins)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n axes = plt.gca()\n plt.show()\n \ndef getX(line):\n coordinates = line[\"value\"][\"averagecoordinate\"][\"avg\"][\"coordinates\"]\n return coordinates[0]\n\ndef getY(line):\n coordinates = line[\"value\"][\"averagecoordinate\"][\"avg\"][\"coordinates\"]\n return coordinates[1]",
"_____no_output_____"
]
],
[
[
"Reading GPS data",
"_____no_output_____"
]
],
[
[
"for i in [0]:\n data = []\n with open(\"F:/ArenaData/Fingerprinting/fingerprints_GPS.json\") as f:\n data = f.readlines()\n json_lines = []\n mac_adresses = [] \n for line in data:\n jsline = json.loads(line)\n jsline[\"measurementTimestamp\"]/=1000\n json_lines.append(jsline)#now json_lines contains all lines of data\n mac_adresses.append(jsline[\"value\"][\"sourceMac\"]) # mac_addresses is a list of address per line \n#sorting by time\njson_lines.sort(key = getmeasurementTimestamp) # now json_lines is sorted by time",
"_____no_output_____"
]
],
[
[
"Computation of error etc",
"_____no_output_____"
]
],
[
[
"minTime = getmeasurementTimestamp(json_lines[0])\nmaxTime = getmeasurementTimestamp(json_lines[len(json_lines) - 1])\nprint(\"minTime=\"+ str(minTime))\nprint(\"maxTime=\"+ str(maxTime))\n\ntimeMinutes = (maxTime - minTime)/60\nprint(\"time in seconds = \", str(maxTime - minTime))\nprint(\"time in hours = \", str(timeMinutes/60))",
"_____no_output_____"
]
],
[
[
"Computing FirstTimeSeen and LastTimeSeen for every address",
"_____no_output_____"
]
],
[
[
"concertFinishedTimestamp=maxTime\nFirstTimeSeen=dict()\nLastTimeSeen=dict()\n\nfor jsline in json_lines:\n address = jsline[\"value\"][\"sourceMac\"]\n time = getmeasurementTimestamp(jsline)\n if address in FirstTimeSeen: \n if time < FirstTimeSeen[address]:\n FirstTimeSeen[address] = time\n else:\n FirstTimeSeen[address] = time\n \n if address in LastTimeSeen: \n if time > LastTimeSeen[address]:\n LastTimeSeen[address] = time\n else:\n LastTimeSeen[address] = time",
"_____no_output_____"
]
],
[
[
"Computing dwell time, number of persistent addresses, and number of addresses visible in every second",
"_____no_output_____"
]
],
[
[
"DwellTime = dict()\nDwellTimeDuringConcert = dict()\nnumberOfAdressesAtConcert=0\nfor address in LastTimeSeen.keys(): \n DwellTime[address] = int((LastTimeSeen[address] - FirstTimeSeen[address]) /60) # in minutes\n if LastTimeSeen[address] <= concertFinishedTimestamp:\n numberOfAdressesAtConcert += 1\n DwellTimeDuringConcert[address] = DwellTime[address] \nprint('number of addresses detected during all hours:')\nprint(numberOfAdressesAtConcert)\nlongTermAddresses=[]\nnumberOfAddresses = []\nAddressesInSec = []\nfor jsline in json_lines:\n sec = int(math.floor((getmeasurementTimestamp(jsline)- minTime)))\n #print(str(sec))\n address = jsline[\"value\"][\"sourceMac\"]\n if DwellTime[address] > 0:# i.e. >1 minutes \n longTermAddresses.append(address)\n if len(AddressesInSec) <= sec:\n while len(AddressesInSec) <= sec:\n AddressesInSec.append([]) \n AddressesInSec[sec].append(address) \n else:\n if address not in AddressesInSec[sec]: \n AddressesInSec[sec].append(address)\nfor setje in AddressesInSec:\n numberOfAddresses.append(len(setje)) \nlongTermCount = len(set(longTermAddresses)) \nprint(\"Long term persistent addresses (>=1 min)= \" + str(longTermCount))\naverage = 0 \nmaxN = 0\nfor addresses in AddressesInSec: \n average += len(addresses) \n maxN = max(len(addresses), maxN)\naverage /= len(AddressesInSec) \nprint(\"average Number Of Addresses Per Second \" + str(average))\nprint(maxN)",
"_____no_output_____"
],
[
"plotHistogramOfDictionary(DwellTimeDuringConcert, \"dwell time(minutes)\", \"number of addresses\", 30)",
"_____no_output_____"
]
],
[
[
"Drawing how many addresses per second are visible ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nplt.plot(numberOfAddresses)\nplt.ylabel('addresses present')\nplt.xlabel('sec')\naxes = plt.gca()\naxes.set_xlim([1804000,1817000])\naxes.set_ylim([0,max(numberOfAddresses)])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Just a dictionary that indicates for every address whether it is randomized or not",
"_____no_output_____"
]
],
[
[
"Randomized = dict()\nPersistentRandomized = dict()\ncount0 = 0\ncount1 = 0\nfor line in json_lines:\n address = line[\"value\"][\"sourceMac\"]\n if line[\"value\"][\"trackeeHistory\"][\"localMac\"] == 1 :\n count1 +=1\n Randomized[address] = 1\n if DwellTime[address] > 10: \n if LastTimeSeen[address] < concertFinishedTimestamp:\n PersistentRandomized[address] = 1 \n else:\n count0 +=1\n Randomized[address] = 0\n if DwellTime[address] > 10: \n if LastTimeSeen[address] < concertFinishedTimestamp:\n PersistentRandomized[address] = 0 \n\nzeros=0\nones=0\nzerosPersistent = 0\nonesPersistent = 0\nfor key in Randomized.keys():\n if Randomized[key]==0:\n zeros +=1\n else:\n ones +=1\nfor key in PersistentRandomized.keys():\n if PersistentRandomized[key]==0:\n zerosPersistent +=1\n else:\n onesPersistent +=1 \nprint(\"total number of lines with localMac == 1: \" + str(count1))\nprint(\"total number of lines with localMac == 0: \" + str(count0))\nprint(\"total number of addresses with localMac == 1: \" + str(ones))\nprint(\"total number of addresses with localMac == 0: \" + str(zeros))\nprint(\"total number of persistent addresses with localMac == 1: \" + str(onesPersistent))\nprint(\"total number of persistent addresses with localMac == 0: \" + str(zerosPersistent))",
"total number of lines with localMac == 1: 0\ntotal number of lines with localMac == 0: 19937\ntotal number of addresses with localMac == 1: 0\ntotal number of addresses with localMac == 0: 16\ntotal number of persistent addresses with localMac == 1: 0\ntotal number of persistent addresses with localMac == 0: 14\n"
],
[
"DwellTimeConcertRandomized = dict()\nDwellTimeConcertNonRandomized = dict()\nfor key in DwellTimeDuringConcert.keys():\n if Randomized[key] == 0:\n DwellTimeConcertNonRandomized[key] = DwellTimeDuringConcert[key]\n else:\n DwellTimeConcertRandomized[key] = DwellTimeDuringConcert[key]",
"_____no_output_____"
],
[
"plotHistogramOfDictionary(DwellTimeConcertNonRandomized, \"dwell time(minutes) of non-randomized addresses\", \"number of addresses\", 50)",
"_____no_output_____"
]
],
[
[
"Computes visible addresses per time_interval with a specified localMac tag, time_interval in seconds",
"_____no_output_____"
]
],
[
[
"# checking the trajectory of a certain address, error used is sample error (not error of the estimation of the mean) \nlines = []#will contain only the lines for that address\nfor line in json_lines:\n address = line[\"value\"][\"sourceMac\"]\n if (math.floor(address) == 11):\n lines.append(line) \nx_coord = []\ny_coord = []\ntimes = []\nx_errors = []\ny_errors = []\nfor line in lines: \n d=9\n if True : \n coordinates = line[\"value\"][\"averagecoordinate\"][\"avg\"][\"coordinates\"]\n time = math.floor(line[\"measurementTimestamp\"])-minTime \n if time not in times: \n x_coord.append( coordinates[0])\n y_coord.append(coordinates[1])\n x_errors.append(0.1)# for a 95% confidence\n y_errors.append(0.1)# for a 95% confidence\n times.append(time)\n \nprint(len(x_errors)) \nprint(len(x_coord))",
"497\n497\n"
],
[
"#drawing hte x and y coordinates\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)\n#ax0.errorbar(times, x_coord, yerr=x_errors)\nax0.errorbar(times, x_coord, yerr=x_errors, fmt='o')\nax0.set_title('x-coordinate ')\nplt.xlabel('time(sec)')\n\n#ax1.errorbar(times, y_coord, yerr=y_errors)\nax1.errorbar(times, y_coord, yerr=y_errors, fmt='o')\nax1.set_title('y-coordinate ')\n\nplt.show()",
"_____no_output_____"
],
[
"coor = dict()\ncount =0\nfor y in y_coord:\n key = times[count]\n coor[key] = y\n count +=1\nplotHistogramOfDictionary(coor, 'y_coord', 'freq', 200)",
"_____no_output_____"
],
[
"13000/60",
"_____no_output_____"
],
[
"216/60",
"_____no_output_____"
],
[
"def get_coords(phone): \n # checking the trajectory of a certain address, error used is sample error (not error of the estimation of the mean) \n lines = []#will contain only the lines for that address\n for line in json_lines:\n address = line[\"value\"][\"sourceMac\"]\n #if (address == '27e573c8-1640-4ea8-86d8-0733c800e9cd'):#this is the address we are checking for, non-randomized\n if (math.floor(address) == phone):\n #if (address == '8b8a2356-d11e-4bd5-bb35-d8370bf48b1e'):#randomized address \n lines.append(line) \n x_coord = []\n y_coord = []\n times = []\n x_errors = []\n y_errors = []\n for line in lines: \n d=9\n if True : \n coordinates = line[\"value\"][\"averagecoordinate\"][\"avg\"][\"coordinates\"]\n time = math.floor(line[\"measurementTimestamp\"])-minTime \n if time not in times and time >=1804000 and time <= 1817000: \n #if time not in times and time >=1811000 and time <= 1812000: \n x_coord.append( coordinates[0])\n y_coord.append(coordinates[1])\n x_errors.append(0.1)# for a 95% confidence\n y_errors.append(0.1)# for a 95% confidence\n times.append(time)\n \n #print(len(x_errors)) \n #print(len(x_coord))\n return (x_coord, y_coord)",
"_____no_output_____"
]
],
[
[
"#### Plotting the GPS trajectories of the detected phones",
"_____no_output_____"
]
],
[
[
"for address in range(0,26):\n (x, y) = get_coords(address)\n #print(y)\n if len(x) > 0:\n #print(address)\n plt.plot(x,y)\n #plt.xlim(200, 320) \n #plt.ylim(20,120)\n plt.gca().set_aspect('equal', adjustable='box')\nplt.show() ",
"_____no_output_____"
],
[
"for address in range(0,26):\n (x, y) = get_coords(address)\n plt.plot(x,y)\n print(address)\n plt.show() ",
"0\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a2254e573fbf83db63b4e5171ba7cf1714f9603
| 276,322 |
ipynb
|
Jupyter Notebook
|
project-face-generation/dlnd_face_generation_5layer_128b_20e.ipynb
|
sonalagrawal7/DeepLearning-nanodegree-exercises
|
1d4c939d95024cd1a9b3cf23d129b2e262bce097
|
[
"MIT"
] | 3 |
2021-03-12T12:59:31.000Z
|
2021-03-16T17:11:39.000Z
|
project-face-generation/dlnd_face_generation_5layer_128b_20e.ipynb
|
sonalagrawal7/DeepLearning-nanodegree-exercises
|
1d4c939d95024cd1a9b3cf23d129b2e262bce097
|
[
"MIT"
] | null | null | null |
project-face-generation/dlnd_face_generation_5layer_128b_20e.ipynb
|
sonalagrawal7/DeepLearning-nanodegree-exercises
|
1d4c939d95024cd1a9b3cf23d129b2e262bce097
|
[
"MIT"
] | null | null | null | 195.280565 | 96,300 | 0.87115 |
[
[
[
"# Face Generation\n\nIn this project, you'll define and train a DCGAN on a dataset of faces. Your goal is to get a generator network to generate *new* images of faces that look as realistic as possible!\n\nThe project will be broken down into a series of tasks from **loading in data to defining and training adversarial networks**. At the end of the notebook, you'll be able to visualize the results of your trained Generator to see how it performs; your generated samples should look like fairly realistic faces with small amounts of noise.\n\n### Get the Data\n\nYou'll be using the [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) to train your adversarial networks.\n\nThis dataset is more complex than the number datasets (like MNIST or SVHN) you've been working with, and so, you should prepare to define deeper networks and train them for a longer time to get good results. It is suggested that you utilize a GPU for training.\n\n### Pre-processed Data\n\nSince the project's main focus is on building the GANs, we've done *some* of the pre-processing for you. Each of the CelebA images has been cropped to remove parts of the image that don't include a face, then resized down to 64x64x3 NumPy images. Some sample data is show below.\n\n<img src='assets/processed_face_data.png' width=60% />\n\n> If you are working locally, you can download this data [by clicking here](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/November/5be7eb6f_processed-celeba-small/processed-celeba-small.zip)\n\nThis is a zip file that you'll need to extract in the home directory of this notebook for further loading and processing. After extracting the data, you should be left with a directory of data `processed_celeba_small/`",
"_____no_output_____"
]
],
[
[
"# can comment out after executing\n#!unzip processed_celeba_small.zip",
"_____no_output_____"
],
[
"data_dir = 'processed_celeba_small/'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle as pkl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport problem_unittests as tests\n#import helper\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Visualize the CelebA Data\n\nThe [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset contains over 200,000 celebrity images with annotations. Since you're going to be generating faces, you won't need the annotations, you'll only need the images. Note that these are color images with [3 color channels (RGB)](https://en.wikipedia.org/wiki/Channel_(digital_image)#RGB_Images) each.\n\n### Pre-process and Load the Data\n\nSince the project's main focus is on building the GANs, we've done *some* of the pre-processing for you. Each of the CelebA images has been cropped to remove parts of the image that don't include a face, then resized down to 64x64x3 NumPy images. This *pre-processed* dataset is a smaller subset of the very large CelebA data.\n\n> There are a few other steps that you'll need to **transform** this data and create a **DataLoader**.\n\n#### Exercise: Complete the following `get_dataloader` function, such that it satisfies these requirements:\n\n* Your images should be square, Tensor images of size `image_size x image_size` in the x and y dimension.\n* Your function should return a DataLoader that shuffles and batches these Tensor images.\n\n#### ImageFolder\n\nTo create a dataset given a directory of images, it's recommended that you use PyTorch's [ImageFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#imagefolder) wrapper, with a root directory `processed_celeba_small/` and data transformation passed in.",
"_____no_output_____"
]
],
[
[
"# necessary imports\nimport torch\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n",
"_____no_output_____"
],
[
"def get_dataloader(batch_size, image_size, data_dir='processed_celeba_small/'):\n \"\"\"\n Batch the neural network data using DataLoader\n :param batch_size: The size of each batch; the number of images in a batch\n :param img_size: The square size of the image data (x, y)\n :param data_dir: Directory where image data is located\n :return: DataLoader with batched data\n \"\"\"\n \n # TODO: Implement function and return a dataloader\n transform = transforms.Compose([transforms.Resize(image_size), # resize to 128x128\n transforms.ToTensor()])\n \n dataset = datasets.ImageFolder(data_dir, transform)\n loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)\n\n \n return loader\n",
"_____no_output_____"
]
],
[
[
"## Create a DataLoader\n\n#### Exercise: Create a DataLoader `celeba_train_loader` with appropriate hyperparameters.\n\nCall the above function and create a dataloader to view images. \n* You can decide on any reasonable `batch_size` parameter\n* Your `image_size` **must be** `32`. Resizing the data to a smaller size will make for faster training, while still creating convincing images of faces!",
"_____no_output_____"
]
],
[
[
"# Define function hyperparameters\nbatch_size = 128\nimg_size = 32\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# Call your function and get a dataloader\nceleba_train_loader = get_dataloader(batch_size, img_size)\n",
"_____no_output_____"
]
],
[
[
"Next, you can view some images! You should seen square images of somewhat-centered faces.\n\nNote: You'll need to convert the Tensor images into a NumPy type and transpose the dimensions to correctly display an image, suggested `imshow` code is below, but it may not be perfect.",
"_____no_output_____"
]
],
[
[
"# helper display function\ndef imshow(img):\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# obtain one batch of training images\ndataiter = iter(celeba_train_loader)\nimages, _ = dataiter.next() # _ for no labels\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(20, 4))\nplot_size=20\nfor idx in np.arange(plot_size):\n ax = fig.add_subplot(2, plot_size/2, idx+1, xticks=[], yticks=[])\n imshow(images[idx])",
"_____no_output_____"
]
],
[
[
"#### Exercise: Pre-process your image data and scale it to a pixel range of -1 to 1\n\nYou need to do a bit of pre-processing; you know that the output of a `tanh` activated generator will contain pixel values in a range from -1 to 1, and so, we need to rescale our training images to a range of -1 to 1. (Right now, they are in a range from 0-1.)",
"_____no_output_____"
]
],
[
[
"# TODO: Complete the scale function\ndef scale(x, feature_range=(-1, 1)):\n ''' Scale takes in an image x and returns that image, scaled\n with a feature_range of pixel values from -1 to 1. \n This function assumes that the input x is already scaled from 0-1.'''\n # assume x is scaled to (0, 1)\n # scale to feature_range and return scaled x\n min, max = feature_range\n x = x * (max - min) + min \n return x\n",
"_____no_output_____"
],
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n# check scaled range\n# should be close to -1 to 1\nimg = images[0]\nscaled_img = scale(img)\n\nprint('Min: ', scaled_img.min())\nprint('Max: ', scaled_img.max())",
"Min: tensor(-0.9922)\nMax: tensor(0.9922)\n"
]
],
[
[
"---\n# Define the Model\n\nA GAN is comprised of two adversarial networks, a discriminator and a generator.\n\n## Discriminator\n\nYour first task will be to define the discriminator. This is a convolutional classifier like you've built before, only without any maxpooling layers. To deal with this complex data, it's suggested you use a deep network with **normalization**. You are also allowed to create any helper functions that may be useful.\n\n#### Exercise: Complete the Discriminator class\n* The inputs to the discriminator are 32x32x3 tensor images\n* The output should be a single value that will indicate whether a given image is real or fake\n",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n# helper conv function\ndef conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n \"\"\"Creates a convolutional layer, with optional batch normalization.\n \"\"\"\n layers = []\n conv_layer = nn.Conv2d(in_channels, out_channels, \n kernel_size, stride, padding, bias=False)\n \n # append conv layer\n layers.append(conv_layer)\n\n if batch_norm:\n # append batchnorm layer\n layers.append(nn.BatchNorm2d(out_channels))\n \n # using Sequential container\n return nn.Sequential(*layers)",
"_____no_output_____"
],
[
"class Discriminator(nn.Module):\n\n def __init__(self, conv_dim):\n \"\"\"\n Initialize the Discriminator Module\n :param conv_dim: The depth of the first convolutional layer\n \"\"\"\n super(Discriminator, self).__init__()\n\n # complete init function\n self.conv_dim = conv_dim\n\n # [3, 32, 32] input\n self.conv1 = conv(3, conv_dim, 4, batch_norm=False) # first layer, no batch_norm \n # [10, 16, 16] input\n self.conv2 = conv(conv_dim, conv_dim*2, 4)\n # [20, 8, 8] input\n self.conv3 = conv(conv_dim*2, conv_dim*4, 4)\n # [40, 4, 4] input\n self.conv4 = conv(conv_dim*4, conv_dim*8, 1, padding=0, stride=1) \n # [80, 4, 4] output\n self.conv5 = conv(conv_dim*8, conv_dim*16, 1, padding=0, stride=1) \n # [160, 4, 4] output\n \n self.out_dim = self.conv_dim *16*4*4\n # final, fully-connected layer\n self.fc = nn.Linear(self.out_dim, 1)\n \n\n def forward(self, x):\n \"\"\"\n Forward propagation of the neural network\n :param x: The input to the neural network \n :return: Discriminator logits; the output of the neural network\n \"\"\"\n # define feedforward behavior\n #print(x.shape)\n x = F.leaky_relu(self.conv1(x))\n #print(x.shape)\n x = F.leaky_relu(self.conv2(x))\n #print(x.shape)\n x = F.leaky_relu(self.conv3(x))\n #print(x.shape)\n x = F.leaky_relu(self.conv4(x))\n #print(x.shape)\n x = F.leaky_relu(self.conv5(x))\n #print(x.shape)\n x = x.view(-1,self.out_dim)\n #print(x.shape)\n x = self.fc(x)\n return x\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_discriminator(Discriminator)",
"Tests Passed\n"
]
],
[
[
"## Generator\n\nThe generator should upsample an input and generate a *new* image of the same size as our training data `32x32x3`. This should be mostly transpose convolutional layers with normalization applied to the outputs.\n\n#### Exercise: Complete the Generator class\n* The inputs to the generator are vectors of some length `z_size`\n* The output should be a image of shape `32x32x3`",
"_____no_output_____"
]
],
[
[
"def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n \"\"\"Creates a transposed-convolutional layer, with optional batch normalization.\n \"\"\"\n ## TODO: Complete this function\n ## create a sequence of transpose + optional batch norm layers\n layers = []\n transpose_conv_layer = nn.ConvTranspose2d(in_channels, out_channels, \n kernel_size, stride, padding, bias=False)\n # append transpose convolutional layer\n layers.append(transpose_conv_layer)\n \n if batch_norm:\n # append batchnorm layer\n layers.append(nn.BatchNorm2d(out_channels))\n \n return nn.Sequential(*layers) ",
"_____no_output_____"
],
[
"class Generator(nn.Module):\n \n def __init__(self, z_size, conv_dim):\n \"\"\"\n Initialize the Generator Module\n :param z_size: The length of the input latent vector, z\n :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\n \"\"\"\n super(Generator, self).__init__()\n\n # complete init function\n self.conv_dim = conv_dim\n self.out_dim = self.conv_dim *16*4*4\n \n # first, fully-connected layer\n self.fc = nn.Linear(z_size, self.out_dim)\n\n # transpose conv layers\n #[160, 4, 4] input\n self.dconv1 = deconv(conv_dim*16, conv_dim*8, 1, padding=0, stride=1)\n #[80, 4, 4] input\n self.dconv2 = deconv(conv_dim*8, conv_dim*4, 1, padding=0, stride=1)\n #[40, 4, 4] input\n self.dconv3 = deconv(conv_dim*4, conv_dim*2, 4)\n #[20, 8, 8] input\n self.dconv4 = deconv(conv_dim*2, conv_dim, 4)\n #[10, 16, 16] input\n self.dconv5 = deconv(conv_dim, 3, 4, batch_norm=False)\n #[3, 32, 32] output\n \n\n def forward(self, x):\n \"\"\"\n Forward propagation of the neural network\n :param x: The input to the neural network \n :return: A 32x32x3 Tensor image as output\n \"\"\"\n # define feedforward behavior\n #print(x.shape)\n x = self.fc(x)\n #print(x.shape)\n x = x.view(-1, self.conv_dim*16, 4, 4)\n #print(x.shape)\n x = F.relu(self.dconv1(x))\n #print(x.shape)\n x = F.relu(self.dconv2(x))\n #print(x.shape)\n x = F.relu(self.dconv3(x))\n #print(x.shape)\n x = F.relu(self.dconv4(x))\n #print(x.shape)\n x = self.dconv5(x)\n #print(x.shape)\n x = F.tanh(x)\n \n return x\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_generator(Generator)",
"Tests Passed\n"
]
],
[
[
"## Initialize the weights of your networks\n\nTo help your models converge, you should initialize the weights of the convolutional and linear layers in your model. From reading the [original DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf), they say:\n> All weights were initialized from a zero-centered Normal distribution with standard deviation 0.02.\n\nSo, your next task will be to define a weight initialization function that does just this!\n\nYou can refer back to the lesson on weight initialization or even consult existing model code, such as that from [the `networks.py` file in CycleGAN Github repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py) to help you complete this function.\n\n#### Exercise: Complete the weight initialization function\n\n* This should initialize only **convolutional** and **linear** layers\n* Initialize the weights to a normal distribution, centered around 0, with a standard deviation of 0.02.\n* The bias terms, if they exist, may be left alone or set to 0.",
"_____no_output_____"
]
],
[
[
"def weights_init_normal(m):\n \"\"\"\n Applies initial weights to certain layers in a model .\n The weights are taken from a normal distribution \n with mean = 0, std dev = 0.02.\n :param m: A module or layer in a network \n \"\"\"\n # classname will be something like:\n # `Conv`, `BatchNorm2d`, `Linear`, etc.\n classname = m.__class__.__name__\n \n # TODO: Apply initial weights to convolutional and linear layers\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n m.weight.data.normal_(mean=0,std=0.02)\n if hasattr(m, 'bias') and m.bias is not None:\n m.bias.data.fill_(0) \n ",
"_____no_output_____"
]
],
[
[
"## Build complete network\n\nDefine your models' hyperparameters and instantiate the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ndef build_network(d_conv_dim, g_conv_dim, z_size):\n # define discriminator and generator\n D = Discriminator(d_conv_dim)\n G = Generator(z_size=z_size, conv_dim=g_conv_dim)\n\n # initialize model weights\n D.apply(weights_init_normal)\n G.apply(weights_init_normal)\n\n print(D)\n print()\n print(G)\n \n return D, G\n",
"_____no_output_____"
]
],
[
[
"#### Exercise: Define model hyperparameters",
"_____no_output_____"
]
],
[
[
"# Define model hyperparams\nd_conv_dim = 32\ng_conv_dim = 32\nz_size = 100\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nD, G = build_network(d_conv_dim, g_conv_dim, z_size)",
"Discriminator(\n (conv1): Sequential(\n (0): Conv2d(3, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n )\n (conv2): Sequential(\n (0): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv3): Sequential(\n (0): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv4): Sequential(\n (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (conv5): Sequential(\n (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (fc): Linear(in_features=8192, out_features=1, bias=True)\n)\n\nGenerator(\n (fc): Linear(in_features=100, out_features=8192, bias=True)\n (dconv1): Sequential(\n (0): ConvTranspose2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (dconv2): Sequential(\n (0): ConvTranspose2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (dconv3): Sequential(\n (0): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (dconv4): Sequential(\n (0): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (dconv5): Sequential(\n (0): ConvTranspose2d(32, 3, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n )\n)\n"
]
],
[
[
"### Training on GPU\n\nCheck if you can train on GPU. Here, we'll set this as a boolean variable `train_on_gpu`. Later, you'll be responsible for making sure that \n>* Models,\n* Model inputs, and\n* Loss function arguments\n\nAre moved to GPU, where appropriate.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport torch\n\n# Check for a GPU\ntrain_on_gpu = torch.cuda.is_available()\nif not train_on_gpu:\n print('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Training on GPU!')",
"Training on GPU!\n"
]
],
[
[
"---\n## Discriminator and Generator Losses\n\nNow we need to calculate the losses for both types of adversarial networks.\n\n### Discriminator Losses\n\n> * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`. \n* Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\n\n\n### Generator Loss\n\nThe generator loss will look similar only with flipped labels. The generator's goal is to get the discriminator to *think* its generated images are *real*.\n\n#### Exercise: Complete real and fake loss functions\n\n**You may choose to use either cross entropy or a least squares error loss to complete the following `real_loss` and `fake_loss` functions.**",
"_____no_output_____"
]
],
[
[
"def real_loss(D_out):\n '''Calculates how close discriminator outputs are to being real.\n param, D_out: discriminator logits\n return: real loss'''\n batch_size = D_out.size(0)\n labels = torch.ones(batch_size)\n # move labels to GPU if available \n if train_on_gpu:\n labels = labels.cuda()\n # binary cross entropy with logits loss\n criterion = nn.BCEWithLogitsLoss()\n # calculate loss\n loss = criterion(D_out.squeeze(), labels)\n return loss\n\ndef fake_loss(D_out):\n '''Calculates how close discriminator outputs are to being fake.\n param, D_out: discriminator logits\n return: fake loss'''\n batch_size = D_out.size(0)\n labels = torch.zeros(batch_size) # fake labels = 0\n if train_on_gpu:\n labels = labels.cuda()\n criterion = nn.BCEWithLogitsLoss()\n # calculate loss\n loss = criterion(D_out.squeeze(), labels)\n return loss",
"_____no_output_____"
]
],
[
[
"## Optimizers\n\n#### Exercise: Define optimizers for your Discriminator (D) and Generator (G)\n\nDefine optimizers for your models with appropriate hyperparameters.",
"_____no_output_____"
]
],
[
[
"import torch.optim as optim\nlr = .0002\nbeta1=0.5\nbeta2=0.999\n\n# Create optimizers for the discriminator and generator\nd_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])\ng_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])",
"_____no_output_____"
]
],
[
[
"---\n## Training\n\nTraining will involve alternating between training the discriminator and the generator. You'll use your functions `real_loss` and `fake_loss` to help you calculate the discriminator losses.\n\n* You should train the discriminator by alternating on real and fake images\n* Then the generator, which tries to trick the discriminator and should have an opposing loss function\n\n\n#### Saving Samples\n\nYou've been given some code to print out some loss statistics and save some generated \"fake\" samples.",
"_____no_output_____"
],
[
"#### Exercise: Complete the training function\n\nKeep in mind that, if you've moved your models to GPU, you'll also have to move any model inputs to GPU.",
"_____no_output_____"
]
],
[
[
"def train(D, G, n_epochs, print_every=50):\n '''Trains adversarial networks for some number of epochs\n param, D: the discriminator network\n param, G: the generator network\n param, n_epochs: number of epochs to train for\n param, print_every: when to print and record the models' losses\n return: D and G losses'''\n \n # move models to GPU\n if train_on_gpu:\n D.cuda()\n G.cuda()\n\n # keep track of loss and generated, \"fake\" samples\n samples = []\n losses = []\n\n # Get some fixed data for sampling. These are images that are held\n # constant throughout training, and allow us to inspect the model's performance\n sample_size=16\n fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))\n fixed_z = torch.from_numpy(fixed_z).float()\n # move z to GPU if available\n if train_on_gpu:\n fixed_z = fixed_z.cuda()\n\n # epoch training loop\n for epoch in range(n_epochs):\n\n # batch training loop\n for batch_i, (real_images, _) in enumerate(celeba_train_loader):\n\n batch_size = real_images.size(0)\n real_images = scale(real_images)\n\n # ===============================================\n # YOUR CODE HERE: TRAIN THE NETWORKS\n # ===============================================\n d_optimizer.zero_grad()\n # Compute the discriminator losses on real images \n if train_on_gpu:\n real_images = real_images.cuda()\n\n D_real = D(real_images)\n d_real_loss = real_loss(D_real)\n\n # 2. Train with fake images\n\n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n # move x to GPU, if available\n if train_on_gpu:\n z = z.cuda()\n fake_images = G(z)\n\n # Compute the discriminator losses on fake images \n D_fake = D(fake_images)\n d_fake_loss = fake_loss(D_fake)\n\n # add up loss and perform backprop\n d_loss = d_real_loss + d_fake_loss\n d_loss.backward()\n d_optimizer.step()\n\n # 2. Train the generator with an adversarial loss\n g_optimizer.zero_grad()\n\n # 1. Train with fake images and flipped labels\n\n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n if train_on_gpu:\n z = z.cuda()\n fake_images = G(z)\n\n # Compute the discriminator losses on fake images \n # using flipped labels!\n D_fake = D(fake_images)\n g_loss = real_loss(D_fake) # use real loss to flip labels\n\n # perform backprop\n g_loss.backward()\n g_optimizer.step()\n \n \n # ===============================================\n # END OF YOUR CODE\n # ===============================================\n\n # Print some loss stats\n if batch_i % print_every == 0:\n # append discriminator loss and generator loss\n losses.append((d_loss.item(), g_loss.item()))\n # print discriminator and generator loss\n print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format(\n epoch+1, n_epochs, d_loss.item(), g_loss.item()))\n\n\n ## AFTER EACH EPOCH## \n # this code assumes your generator is named G, feel free to change the name\n # generate and save sample, fake images\n G.eval() # for generating samples\n samples_z = G(fixed_z)\n samples.append(samples_z)\n G.train() # back to training mode\n\n # Save training generator samples\n with open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)\n \n # finally return losses\n return losses",
"_____no_output_____"
]
],
[
[
"Set your number of training epochs and train your GAN!",
"_____no_output_____"
]
],
[
[
"# set number of epochs \nn_epochs = 20\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# call training function\nlosses = train(D, G, n_epochs=n_epochs)",
"Epoch [ 1/ 20] | d_loss: 1.6247 | g_loss: 0.8741\nEpoch [ 1/ 20] | d_loss: 0.0326 | g_loss: 6.0400\nEpoch [ 1/ 20] | d_loss: 1.7157 | g_loss: 2.6900\nEpoch [ 1/ 20] | d_loss: 0.2929 | g_loss: 3.5238\nEpoch [ 1/ 20] | d_loss: 1.2407 | g_loss: 7.8117\nEpoch [ 1/ 20] | d_loss: 0.3097 | g_loss: 2.3731\nEpoch [ 1/ 20] | d_loss: 0.4277 | g_loss: 4.2024\nEpoch [ 1/ 20] | d_loss: 0.5402 | g_loss: 4.7408\nEpoch [ 1/ 20] | d_loss: 0.9398 | g_loss: 5.5606\nEpoch [ 1/ 20] | d_loss: 0.2710 | g_loss: 2.0205\nEpoch [ 1/ 20] | d_loss: 0.3301 | g_loss: 2.9206\nEpoch [ 1/ 20] | d_loss: 0.6585 | g_loss: 4.2027\nEpoch [ 1/ 20] | d_loss: 0.7312 | g_loss: 1.7120\nEpoch [ 1/ 20] | d_loss: 0.3486 | g_loss: 3.6101\nEpoch [ 1/ 20] | d_loss: 1.0040 | g_loss: 1.2868\nEpoch [ 2/ 20] | d_loss: 0.6032 | g_loss: 2.5485\nEpoch [ 2/ 20] | d_loss: 0.6758 | g_loss: 1.8964\nEpoch [ 2/ 20] | d_loss: 0.7363 | g_loss: 3.5788\nEpoch [ 2/ 20] | d_loss: 0.6955 | g_loss: 2.6197\nEpoch [ 2/ 20] | d_loss: 0.8618 | g_loss: 3.8707\nEpoch [ 2/ 20] | d_loss: 0.7749 | g_loss: 1.4990\nEpoch [ 2/ 20] | d_loss: 0.6230 | g_loss: 1.2480\nEpoch [ 2/ 20] | d_loss: 0.6947 | g_loss: 2.4302\nEpoch [ 2/ 20] | d_loss: 0.7678 | g_loss: 2.8462\nEpoch [ 2/ 20] | d_loss: 1.3807 | g_loss: 1.1557\nEpoch [ 2/ 20] | d_loss: 0.6967 | g_loss: 2.2071\nEpoch [ 2/ 20] | d_loss: 0.8969 | g_loss: 3.3905\nEpoch [ 2/ 20] | d_loss: 0.6076 | g_loss: 2.1075\nEpoch [ 2/ 20] | d_loss: 0.7805 | g_loss: 3.3541\nEpoch [ 2/ 20] | d_loss: 0.6085 | g_loss: 2.2694\nEpoch [ 3/ 20] | d_loss: 0.8240 | g_loss: 1.4505\nEpoch [ 3/ 20] | d_loss: 0.5979 | g_loss: 2.2937\nEpoch [ 3/ 20] | d_loss: 0.9312 | g_loss: 1.1831\nEpoch [ 3/ 20] | d_loss: 1.1684 | g_loss: 2.7551\nEpoch [ 3/ 20] | d_loss: 1.1738 | g_loss: 3.4960\nEpoch [ 3/ 20] | d_loss: 0.6259 | g_loss: 2.4089\nEpoch [ 3/ 20] | d_loss: 0.9279 | g_loss: 1.8124\nEpoch [ 3/ 20] | d_loss: 0.6781 | g_loss: 1.2742\nEpoch [ 3/ 20] | d_loss: 0.6466 | g_loss: 1.6521\nEpoch [ 3/ 20] | d_loss: 0.7064 | g_loss: 2.0703\nEpoch [ 3/ 20] | d_loss: 1.0884 | g_loss: 1.3413\nEpoch [ 3/ 20] | d_loss: 0.6713 | g_loss: 1.4297\nEpoch [ 3/ 20] | d_loss: 0.7088 | g_loss: 2.0959\nEpoch [ 3/ 20] | d_loss: 0.8577 | g_loss: 2.6215\nEpoch [ 3/ 20] | d_loss: 0.6899 | g_loss: 1.3116\nEpoch [ 4/ 20] | d_loss: 0.9149 | g_loss: 2.9597\nEpoch [ 4/ 20] | d_loss: 0.6923 | g_loss: 2.3165\nEpoch [ 4/ 20] | d_loss: 0.5348 | g_loss: 2.2608\nEpoch [ 4/ 20] | d_loss: 0.9896 | g_loss: 1.1206\nEpoch [ 4/ 20] | d_loss: 0.5923 | g_loss: 2.0251\nEpoch [ 4/ 20] | d_loss: 1.1683 | g_loss: 0.5718\nEpoch [ 4/ 20] | d_loss: 1.0335 | g_loss: 1.7602\nEpoch [ 4/ 20] | d_loss: 1.0102 | g_loss: 1.1596\nEpoch [ 4/ 20] | d_loss: 0.7206 | g_loss: 2.1989\nEpoch [ 4/ 20] | d_loss: 0.7160 | g_loss: 0.9924\nEpoch [ 4/ 20] | d_loss: 0.8947 | g_loss: 1.5250\nEpoch [ 4/ 20] | d_loss: 0.8276 | g_loss: 1.8824\nEpoch [ 4/ 20] | d_loss: 0.5578 | g_loss: 2.8167\nEpoch [ 4/ 20] | d_loss: 0.9239 | g_loss: 2.8356\nEpoch [ 4/ 20] | d_loss: 0.9007 | g_loss: 1.8761\nEpoch [ 5/ 20] | d_loss: 0.5971 | g_loss: 2.7503\nEpoch [ 5/ 20] | d_loss: 0.6723 | g_loss: 2.0506\nEpoch [ 5/ 20] | d_loss: 0.7357 | g_loss: 1.4757\nEpoch [ 5/ 20] | d_loss: 0.7040 | g_loss: 1.7579\nEpoch [ 5/ 20] | d_loss: 0.7677 | g_loss: 4.0924\nEpoch [ 5/ 20] | d_loss: 0.6825 | g_loss: 1.6158\nEpoch [ 5/ 20] | d_loss: 0.5624 | g_loss: 1.8346\nEpoch [ 5/ 20] | d_loss: 0.5476 | g_loss: 2.1049\nEpoch [ 5/ 20] | d_loss: 0.5849 | g_loss: 2.7267\nEpoch [ 5/ 20] | d_loss: 0.6904 | g_loss: 2.5691\nEpoch [ 5/ 20] | d_loss: 0.9539 | g_loss: 3.2025\nEpoch [ 5/ 20] | d_loss: 0.7895 | g_loss: 1.6403\nEpoch [ 5/ 20] | d_loss: 0.7710 | g_loss: 2.9273\nEpoch [ 5/ 20] | d_loss: 0.4544 | g_loss: 2.2992\nEpoch [ 5/ 20] | d_loss: 0.7063 | g_loss: 3.2460\nEpoch [ 6/ 20] | d_loss: 0.7038 | g_loss: 2.3942\nEpoch [ 6/ 20] | d_loss: 0.6439 | g_loss: 1.8031\nEpoch [ 6/ 20] | d_loss: 0.5863 | g_loss: 2.7548\nEpoch [ 6/ 20] | d_loss: 0.6073 | g_loss: 1.7616\nEpoch [ 6/ 20] | d_loss: 0.5048 | g_loss: 2.1240\nEpoch [ 6/ 20] | d_loss: 0.6468 | g_loss: 1.9533\nEpoch [ 6/ 20] | d_loss: 0.6401 | g_loss: 1.5890\nEpoch [ 6/ 20] | d_loss: 0.8546 | g_loss: 1.5125\nEpoch [ 6/ 20] | d_loss: 0.7109 | g_loss: 2.4874\nEpoch [ 6/ 20] | d_loss: 0.6594 | g_loss: 2.6755\nEpoch [ 6/ 20] | d_loss: 1.0982 | g_loss: 4.1147\nEpoch [ 6/ 20] | d_loss: 0.4843 | g_loss: 3.0015\nEpoch [ 6/ 20] | d_loss: 1.0214 | g_loss: 3.3929\nEpoch [ 6/ 20] | d_loss: 0.5194 | g_loss: 3.2884\nEpoch [ 6/ 20] | d_loss: 0.6022 | g_loss: 1.0322\nEpoch [ 7/ 20] | d_loss: 0.4815 | g_loss: 1.8224\nEpoch [ 7/ 20] | d_loss: 0.7825 | g_loss: 3.1951\nEpoch [ 7/ 20] | d_loss: 0.6007 | g_loss: 2.3242\nEpoch [ 7/ 20] | d_loss: 0.7771 | g_loss: 1.2066\nEpoch [ 7/ 20] | d_loss: 0.6062 | g_loss: 1.7579\nEpoch [ 7/ 20] | d_loss: 0.6932 | g_loss: 1.6646\nEpoch [ 7/ 20] | d_loss: 0.4298 | g_loss: 2.3013\nEpoch [ 7/ 20] | d_loss: 0.5267 | g_loss: 1.8892\nEpoch [ 7/ 20] | d_loss: 0.9730 | g_loss: 1.8738\nEpoch [ 7/ 20] | d_loss: 1.2434 | g_loss: 1.2851\nEpoch [ 7/ 20] | d_loss: 0.8428 | g_loss: 1.3450\nEpoch [ 7/ 20] | d_loss: 0.7834 | g_loss: 2.9410\nEpoch [ 7/ 20] | d_loss: 0.6740 | g_loss: 3.2885\nEpoch [ 7/ 20] | d_loss: 0.6776 | g_loss: 2.0673\nEpoch [ 7/ 20] | d_loss: 1.0825 | g_loss: 2.7209\nEpoch [ 8/ 20] | d_loss: 1.3701 | g_loss: 4.5404\nEpoch [ 8/ 20] | d_loss: 0.6111 | g_loss: 1.8260\nEpoch [ 8/ 20] | d_loss: 0.7416 | g_loss: 2.2915\nEpoch [ 8/ 20] | d_loss: 0.4472 | g_loss: 3.3564\nEpoch [ 8/ 20] | d_loss: 0.5827 | g_loss: 1.8157\nEpoch [ 8/ 20] | d_loss: 0.6728 | g_loss: 1.5817\nEpoch [ 8/ 20] | d_loss: 0.6054 | g_loss: 2.0269\nEpoch [ 8/ 20] | d_loss: 0.6934 | g_loss: 1.1111\nEpoch [ 8/ 20] | d_loss: 0.4458 | g_loss: 3.4001\nEpoch [ 8/ 20] | d_loss: 0.5342 | g_loss: 1.9855\nEpoch [ 8/ 20] | d_loss: 0.5860 | g_loss: 2.9757\nEpoch [ 8/ 20] | d_loss: 0.3622 | g_loss: 2.8821\nEpoch [ 8/ 20] | d_loss: 0.5298 | g_loss: 2.9694\nEpoch [ 8/ 20] | d_loss: 0.6248 | g_loss: 1.7837\nEpoch [ 8/ 20] | d_loss: 0.4873 | g_loss: 2.9310\nEpoch [ 9/ 20] | d_loss: 0.4278 | g_loss: 2.4461\nEpoch [ 9/ 20] | d_loss: 0.4772 | g_loss: 2.5226\nEpoch [ 9/ 20] | d_loss: 1.2736 | g_loss: 3.9989\nEpoch [ 9/ 20] | d_loss: 0.5640 | g_loss: 1.6691\nEpoch [ 9/ 20] | d_loss: 0.4518 | g_loss: 2.6881\nEpoch [ 9/ 20] | d_loss: 0.8778 | g_loss: 3.3626\nEpoch [ 9/ 20] | d_loss: 1.1877 | g_loss: 1.2551\nEpoch [ 9/ 20] | d_loss: 0.5867 | g_loss: 2.3460\nEpoch [ 9/ 20] | d_loss: 0.5405 | g_loss: 2.6626\nEpoch [ 9/ 20] | d_loss: 0.7929 | g_loss: 2.5961\nEpoch [ 9/ 20] | d_loss: 0.5217 | g_loss: 2.5239\nEpoch [ 9/ 20] | d_loss: 0.8797 | g_loss: 3.7497\nEpoch [ 9/ 20] | d_loss: 0.6721 | g_loss: 3.5718\nEpoch [ 9/ 20] | d_loss: 0.4575 | g_loss: 1.7040\nEpoch [ 9/ 20] | d_loss: 0.4723 | g_loss: 2.2643\nEpoch [ 10/ 20] | d_loss: 1.0309 | g_loss: 3.8165\nEpoch [ 10/ 20] | d_loss: 0.5689 | g_loss: 1.8754\nEpoch [ 10/ 20] | d_loss: 0.4012 | g_loss: 1.8834\nEpoch [ 10/ 20] | d_loss: 0.4618 | g_loss: 2.4162\nEpoch [ 10/ 20] | d_loss: 0.4890 | g_loss: 1.1829\nEpoch [ 10/ 20] | d_loss: 0.6348 | g_loss: 3.5954\nEpoch [ 10/ 20] | d_loss: 0.8761 | g_loss: 3.3720\nEpoch [ 10/ 20] | d_loss: 0.4507 | g_loss: 2.7894\nEpoch [ 10/ 20] | d_loss: 1.5432 | g_loss: 2.0066\nEpoch [ 10/ 20] | d_loss: 0.4324 | g_loss: 1.1868\nEpoch [ 10/ 20] | d_loss: 0.7866 | g_loss: 2.2064\nEpoch [ 10/ 20] | d_loss: 0.6819 | g_loss: 2.5732\nEpoch [ 10/ 20] | d_loss: 0.7253 | g_loss: 2.6755\nEpoch [ 10/ 20] | d_loss: 1.0588 | g_loss: 2.9119\nEpoch [ 10/ 20] | d_loss: 0.9119 | g_loss: 3.3064\nEpoch [ 11/ 20] | d_loss: 0.6179 | g_loss: 3.6138\nEpoch [ 11/ 20] | d_loss: 0.7674 | g_loss: 1.1351\n"
]
],
[
[
"## Training loss\n\nPlot the training losses for the generator and discriminator, recorded after each epoch.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator', alpha=0.5)\nplt.plot(losses.T[1], label='Generator', alpha=0.5)\nplt.title(\"Training Losses\")\nplt.legend()",
"_____no_output_____"
]
],
[
[
"## Generator samples from training\n\nView samples of images from the generator, and answer a question about the strengths and weaknesses of your trained models.",
"_____no_output_____"
]
],
[
[
"# helper function for viewing a list of passed in sample images\ndef view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(16,4), nrows=2, ncols=8, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n img = img.detach().cpu().numpy()\n img = np.transpose(img, (1, 2, 0))\n img = ((img + 1)*255 / (2)).astype(np.uint8)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((32,32,3)))",
"_____no_output_____"
],
[
"# Load samples from generator, taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)",
"_____no_output_____"
],
[
"_ = view_samples(-1, samples)",
"_____no_output_____"
]
],
[
[
"### Question: What do you notice about your generated samples and how might you improve this model?\nWhen you answer this question, consider the following factors:\n* The dataset is biased; it is made of \"celebrity\" faces that are mostly white\n* Model size; larger models have the opportunity to learn more features in a data feature space\n* Optimization strategy; optimizers and number of epochs affect your final result\n",
"_____no_output_____"
],
[
"**Answer:** (Write your answer in this cell)",
"_____no_output_____"
],
[
"### Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_face_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"problem_unittests.py\" files in your submission.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a225cfd127ce982f6352ec6e0b43693ca148777
| 2,537 |
ipynb
|
Jupyter Notebook
|
Day-15-challenge.ipynb
|
sawani02/30-Days-Of-Code-Python
|
9fcf93ea95c34543019d7127dafc1ba1121f3b50
|
[
"MIT"
] | null | null | null |
Day-15-challenge.ipynb
|
sawani02/30-Days-Of-Code-Python
|
9fcf93ea95c34543019d7127dafc1ba1121f3b50
|
[
"MIT"
] | null | null | null |
Day-15-challenge.ipynb
|
sawani02/30-Days-Of-Code-Python
|
9fcf93ea95c34543019d7127dafc1ba1121f3b50
|
[
"MIT"
] | null | null | null | 21.5 | 137 | 0.473 |
[
[
[
"# Day 15 challenge: Linked List\nPlease click on the link to view the challenge on hackerrank website: https://www.hackerrank.com/challenges/30-linked-list/problem ",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None \nclass Solution: \n def display(self,head):\n current = head\n while current:\n print(current.data,end=' ')\n current = current.next\n \n def insert(self,head,data): \n #Complete this method\n # Initial condition, if head empty\n if (head == None):\n head = Node(data)\n elif (head.next == None):\n head.next = Node(data)\n else: \n self.insert(head.next, data)\n return head",
"_____no_output_____"
],
[
"#input: number of inputs\nmylist= Solution()\nT=int(input())\nhead=None\nfor i in range(T):\n data=int(input())\n head=mylist.insert(head,data) \nmylist.display(head); ",
"3\n6\n2\n1\n6 2 1 "
]
],
[
[
"# What I learned?",
"_____no_output_____"
],
[
"- Linked list insertion using self.\n- Singly linked list\n- Doubly linked list ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a225e8860b0d86124c6a2dd54f7600411545fb7
| 930 |
ipynb
|
Jupyter Notebook
|
data_preparation.ipynb
|
norbertparti/traffic_sign_recognition
|
9eae4964bd0a38954914bca350a891d48318a4a5
|
[
"Apache-2.0"
] | null | null | null |
data_preparation.ipynb
|
norbertparti/traffic_sign_recognition
|
9eae4964bd0a38954914bca350a891d48318a4a5
|
[
"Apache-2.0"
] | null | null | null |
data_preparation.ipynb
|
norbertparti/traffic_sign_recognition
|
9eae4964bd0a38954914bca350a891d48318a4a5
|
[
"Apache-2.0"
] | null | null | null | 23.846154 | 250 | 0.517204 |
[
[
[
"<a href=\"https://colab.research.google.com/github/norbertparti/traffic_sign_recognition/blob/main/data_preparation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a225eeb6f9346df60cbb1a7049aae9dcec8c1da
| 2,966 |
ipynb
|
Jupyter Notebook
|
HackerRank/Intro_to_Statistics/Day_02.ipynb
|
KartikKannapur/HackerRank
|
50c630d2c3bcb537033519fc5d857749584aafa7
|
[
"MIT"
] | null | null | null |
HackerRank/Intro_to_Statistics/Day_02.ipynb
|
KartikKannapur/HackerRank
|
50c630d2c3bcb537033519fc5d857749584aafa7
|
[
"MIT"
] | null | null | null |
HackerRank/Intro_to_Statistics/Day_02.ipynb
|
KartikKannapur/HackerRank
|
50c630d2c3bcb537033519fc5d857749584aafa7
|
[
"MIT"
] | 1 |
2020-03-06T00:36:29.000Z
|
2020-03-06T00:36:29.000Z
| 21.97037 | 151 | 0.547876 |
[
[
[
"# Day 2: Basic Probability Puzzles #1",
"_____no_output_____"
],
[
"### Objective \nIn this challenge, we practice calculating probability.\n\n### Task \nIn a single toss of 2 fair (evenly-weighted) dice, find the probability of that their sum will be at most 9.",
"_____no_output_____"
],
[
"Answer : 5/6",
"_____no_output_____"
],
[
"# Day 2: Basic Probability Puzzles #2",
"_____no_output_____"
],
[
"### Objective \nIn this challenge, we practice calculating probability.\n\n### Task \nFor a single toss of 2 fair (evenly-weighted) dice, find the probability that the values rolled by each die will be different and their sum is 6.",
"_____no_output_____"
],
[
"Answer : 1/9",
"_____no_output_____"
],
[
"# Day 2: Basic Probability Puzzles #3",
"_____no_output_____"
],
[
"### Objective \nIn this challenge, we practice calculating probability.\n\n### Task \nThere are 3 urns: XX, YY and ZZ.\n\nUrn XX contains 4 red balls and 3 black balls.\nUrn YY contains 5 red balls and 4 black balls.\nUrn ZZ contains 4 red balls and 4 black balls.\nOne ball is drawn from each urn. What is the probability that the 33 balls drawn consist of 2 red balls and 1 black ball?",
"_____no_output_____"
],
[
"Answer : 17/42",
"_____no_output_____"
],
[
"# Day 2: Basic Probability Puzzles #4",
"_____no_output_____"
],
[
"### Objective \nIn this challenge, we practice calculating probability.\n\n### Task \nBag1 contains 4 red balls and 5 black balls. \nBag2 contains 3 red balls and 7 black balls. \nOne ball is drawn from the Bag1, and 2 balls are drawn from Bag2. Find the probability that 2 balls are black and 1 ball is red.",
"_____no_output_____"
],
[
"Answer : 7/15",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a2260bda7794ab5b0f6ed81fc181704f9b88931
| 1,982 |
ipynb
|
Jupyter Notebook
|
Rectangle.ipynb
|
saraelsharawy/Circle
|
8a9a965a4dca5833a12dfd29c1ba7dd6128e630c
|
[
"MIT"
] | null | null | null |
Rectangle.ipynb
|
saraelsharawy/Circle
|
8a9a965a4dca5833a12dfd29c1ba7dd6128e630c
|
[
"MIT"
] | null | null | null |
Rectangle.ipynb
|
saraelsharawy/Circle
|
8a9a965a4dca5833a12dfd29c1ba7dd6128e630c
|
[
"MIT"
] | null | null | null | 18.87619 | 100 | 0.504036 |
[
[
[
"print(\"Hello, this is Assignment 2. \\nThe code will produce the perimeter of a rectangle.\")",
"Hello, this is Assignment 2. \nThe code will produce the perimeter of a rectangle.\n"
],
[
"l= float(input(\"Please enter the length of the rectangle \"))",
"Please enter the length of the rectangle 5\n"
],
[
"w= float(input(\"Please enter the width of the rectangle \"))",
"Please enter the width of the rectangle 2\n"
],
[
"rec = 2*(l+w)\nprint(\"The perimeter of the rectangle is: \", rec)",
"The perimeter of the rectangle is: 14.0\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a2265770f55c4ee9d845c163adfe8ae84a257c1
| 685,250 |
ipynb
|
Jupyter Notebook
|
Chapter 7/Bayesian Data Analysis Chapter 7.ipynb
|
chdamianos/Bayesian-Analysis-with-Python
|
ce13871d7910d4a55a7fca4ab5f9e7b85f8df1dd
|
[
"MIT"
] | null | null | null |
Chapter 7/Bayesian Data Analysis Chapter 7.ipynb
|
chdamianos/Bayesian-Analysis-with-Python
|
ce13871d7910d4a55a7fca4ab5f9e7b85f8df1dd
|
[
"MIT"
] | null | null | null |
Chapter 7/Bayesian Data Analysis Chapter 7.ipynb
|
chdamianos/Bayesian-Analysis-with-Python
|
ce13871d7910d4a55a7fca4ab5f9e7b85f8df1dd
|
[
"MIT"
] | null | null | null | 529.559505 | 113,176 | 0.941394 |
[
[
[
"%matplotlib inline\nimport pymc3 as pm\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\npalette = 'muted'\nsns.set_palette(palette); sns.set_color_codes(palette)\nnp.set_printoptions(precision=2)",
"_____no_output_____"
]
],
[
[
"# Simple example",
"_____no_output_____"
]
],
[
[
"clusters = 3\n\nn_cluster = [90, 50, 75]\nn_total = sum(n_cluster)\n\nmeans = [9, 21, 35]\nstd_devs = [2, 2, 2]\n# example of mixture data\nmix = np.random.normal(np.repeat(means, n_cluster), np.repeat(std_devs, n_cluster))",
"_____no_output_____"
],
[
"sns.kdeplot(np.array(mix))\nplt.xlabel('$x$', fontsize=14);",
"_____no_output_____"
],
[
"# Author: Thomas Boggs\n\nimport matplotlib.tri as tri\nfrom functools import reduce\nfrom matplotlib import ticker, cm\n\n_corners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])\n_triangle = tri.Triangulation(_corners[:, 0], _corners[:, 1])\n_midpoints = [(_corners[(i + 1) % 3] + _corners[(i + 2) % 3]) / 2.0 for i in range(3)]\n\ndef xy2bc(xy, tol=1.e-3):\n '''Converts 2D Cartesian coordinates to barycentric.\n Arguments:\n xy: A length-2 sequence containing the x and y value.\n '''\n s = [(_corners[i] - _midpoints[i]).dot(xy - _midpoints[i]) / 0.75 for i in range(3)]\n return np.clip(s, tol, 1.0 - tol)\n\nclass Dirichlet(object):\n def __init__(self, alpha):\n '''Creates Dirichlet distribution with parameter `alpha`.'''\n from math import gamma\n from operator import mul\n self._alpha = np.array(alpha)\n self._coef = gamma(np.sum(self._alpha)) /reduce(mul, [gamma(a) for a in self._alpha])\n def pdf(self, x):\n '''Returns pdf value for `x`.'''\n from operator import mul\n return self._coef * reduce(mul, [xx ** (aa - 1)\n for (xx, aa)in zip(x, self._alpha)])\n def sample(self, N):\n '''Generates a random sample of size `N`.'''\n return np.random.dirichlet(self._alpha, N)\n\ndef draw_pdf_contours(dist, nlevels=100, subdiv=8, **kwargs):\n '''Draws pdf contours over an equilateral triangle (2-simplex).\n Arguments:\n dist: A distribution instance with a `pdf` method.\n border (bool): If True, the simplex border is drawn.\n nlevels (int): Number of contours to draw.\n subdiv (int): Number of recursive mesh subdivisions to create.\n kwargs: Keyword args passed on to `plt.triplot`.\n '''\n refiner = tri.UniformTriRefiner(_triangle)\n trimesh = refiner.refine_triangulation(subdiv=subdiv)\n pvals = [dist.pdf(xy2bc(xy)) for xy in zip(trimesh.x, trimesh.y)]\n\n plt.tricontourf(trimesh, pvals, nlevels, cmap=cm.Blues, **kwargs)\n plt.axis('equal')\n plt.xlim(0, 1)\n plt.ylim(0, 0.75**0.5)\n plt.axis('off')",
"_____no_output_____"
],
[
"alphas = [[0.5] * 3, [1] * 3, [10] * 3, [2, 5, 10]]\nfor (i, alpha) in enumerate(alphas):\n plt.subplot(2, 2, i + 1)\n dist = Dirichlet(alpha)\n draw_pdf_contours(dist)\n \n plt.title(r'$\\alpha$ = ({:.1f}, {:.1f}, {:.1f})'.format(*alpha), fontsize=16)",
"_____no_output_____"
],
[
"with pm.Model() as model_kg:\n # Each observation is assigned to a cluster/component with probability p\n p = pm.Dirichlet('p', a=np.ones(clusters))\n category = pm.Categorical('category', p=p, shape=n_total) \n \n # Known Gaussians means\n means = pm.math.constant([10, 20, 35])\n\n y = pm.Normal('y', mu=means[category], sd=2, observed=mix)\n\n step1 = pm.ElemwiseCategorical(vars=[category], values=range(clusters))\n ## The CategoricalGibbsMetropolis is a recent addition to PyMC3\n ## I have not find the time yet to experiment with it.\n #step1 = pm.CategoricalGibbsMet\\ropolis(vars=[category]) \n step2 = pm.Metropolis(vars=[p])\n trace_kg = pm.sample(10000, step=[step1, step2],chains=1,njobs=1)",
"/home/damianos/miniconda3/envs/pymc3/lib/python3.7/site-packages/ipykernel_launcher.py:11: DeprecationWarning: ElemwiseCategorical is deprecated, switch to CategoricalGibbsMetropolis.\n # This is added back by InteractiveShellApp.init_path()\nSequential sampling (1 chains in 1 job)\nCompoundStep\n>ElemwiseCategorical: [category]\n>Metropolis: [p]\n100%|██████████| 10500/10500 [00:30<00:00, 349.40it/s]\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"chain_kg = trace_kg[1000:]\nvarnames_kg = ['p']\npm.traceplot(chain_kg, varnames_kg);",
"_____no_output_____"
],
[
"print(pm.summary(chain_kg, varnames_kg))\nn_cluster_array = np.array(n_cluster)\nprint('\\n')\nprint('Actual values of cluster fractions: {}'.format(n_cluster_array/n_cluster_array.sum()))",
" mean sd mc_error hpd_2.5 hpd_97.5\np__0 0.418133 0.032734 0.001302 0.347043 0.475764\np__1 0.234513 0.029194 0.001281 0.173813 0.285907\np__2 0.347354 0.032026 0.001378 0.281533 0.406833\n\n\nActual values of cluster fractions: [0.42 0.23 0.35]\n"
],
[
"with pm.Model() as model_ug:\n # Each observation is assigned to a cluster/component with probability p\n p = pm.Dirichlet('p', a=np.ones(clusters))\n category = pm.Categorical('category', p=p, shape=n_total) \n \n # We estimate the unknown gaussians means and standard deviation\n means = pm.Normal('means', mu=[10, 20, 35], sd=2, shape=clusters)\n sd = pm.HalfCauchy('sd', 5)\n\n y = pm.Normal('y', mu=means[category], sd=sd, observed=mix)\n\n step1 = pm.ElemwiseCategorical(vars=[category], values=range(clusters))\n step2 = pm.Metropolis(vars=[means, sd, p])\n trace_ug = pm.sample(10000, step=[step1, step2],chains=1,njobs=1)",
"/home/damianos/miniconda3/envs/pymc3/lib/python3.7/site-packages/ipykernel_launcher.py:12: DeprecationWarning: ElemwiseCategorical is deprecated, switch to CategoricalGibbsMetropolis.\n if sys.path[0] == '':\nSequential sampling (1 chains in 1 job)\nCompoundStep\n>ElemwiseCategorical: [category]\n>CompoundStep\n>>Metropolis: [p]\n>>Metropolis: [sd]\n>>Metropolis: [means]\n100%|██████████| 10500/10500 [00:35<00:00, 297.53it/s]\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"chain_ug = trace_ug[1000:]\nvarnames_ug = ['means', 'sd', 'p']\npm.traceplot(chain_ug, varnames_ug);",
"_____no_output_____"
],
[
"print(pm.summary(chain_ug, varnames_ug))\nmeans_array = np.array([9, 21, 35])\nstd_devs = [2, 2, 2]\nprint('\\n')\nprint('Actual values of cluster fractions: {}'.format(n_cluster_array/n_cluster_array.sum()))\nprint('Actual values of cluster means: {}'.format(means_array))\nprint('Actual values of cluster sd: {}'.format(std_devs))",
" mean sd mc_error hpd_2.5 hpd_97.5\nmeans__0 9.008717 0.217561 0.007712 8.613627 9.463408\nmeans__1 21.356761 0.275587 0.010145 20.812555 21.887742\nmeans__2 34.742823 0.225639 0.008249 34.304095 35.202331\nsd 2.008365 0.097757 0.004210 1.821559 2.183144\np__0 0.417117 0.033083 0.001022 0.351142 0.480680\np__1 0.233721 0.028664 0.000926 0.172529 0.283838\np__2 0.349161 0.030978 0.001001 0.288788 0.411532\n\n\nActual values of cluster fractions: [0.42 0.23 0.35]\nActual values of cluster means: [ 9 21 35]\nActual values of cluster sd: [2, 2, 2]\n"
],
[
"ppc = pm.sample_ppc(chain_ug, 50, model_ug)",
"/home/damianos/miniconda3/envs/pymc3/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: sample_ppc() is deprecated. Please use sample_posterior_predictive()\n \"\"\"Entry point for launching an IPython kernel.\n100%|██████████| 50/50 [00:00<00:00, 998.86it/s]\n"
],
[
"for i in ppc['y']:\n sns.kdeplot(i, alpha=0.1, color='b')\nsns.kdeplot(np.array(mix), lw=2, color='k');\nplt.xlabel('$x$', fontsize=14);",
"_____no_output_____"
]
],
[
[
"#### Note the higher uncertainty where the data overlap and the reduced uncertainty at the high/low limits",
"_____no_output_____"
],
[
"## Marginalized Gaussian Mixture model",
"_____no_output_____"
],
[
"### In the previous models we have explicitly defined the latent variable $z$ in the model. This is ineffective in terms of sampling. PyMC3 offers the ability to model the outcome conditionaly on $z$ as $p(y|z,\\theta)$ and marginalise it to get $p(y|\\theta)$",
"_____no_output_____"
]
],
[
[
"with pm.Model() as model_mg:\n p = pm.Dirichlet('p', a=np.ones(clusters))\n\n means = pm.Normal('means', mu=[10, 20, 35], sd=2, shape=clusters)\n sd = pm.HalfCauchy('sd', 5)\n \n y = pm.NormalMixture('y', w=p, mu=means, sd=sd, observed=mix)\n \n trace_mg = pm.sample(5000, chains=1,njobs=1)",
"Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nSequential sampling (1 chains in 1 job)\nNUTS: [sd, means, p]\n100%|██████████| 5500/5500 [00:08<00:00, 637.72it/s]\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"chain_mg = trace_mg[500:]\nvarnames_mg = ['means', 'sd', 'p']\npm.traceplot(chain_mg, varnames_mg);",
"_____no_output_____"
]
],
[
[
"## Zero inflated Poisson model",
"_____no_output_____"
]
],
[
[
"lam_params = [0.5, 1.5, 3, 8]\nk = np.arange(0, max(lam_params) * 3)\nfor lam in lam_params:\n y = stats.poisson(lam).pmf(k)\n plt.plot(k, y, 'o-', label=\"$\\\\lambda$ = {:3.1f}\".format(lam))\nplt.legend();\nplt.xlabel('$k$', fontsize=14);\nplt.ylabel('$pmf(k)$', fontsize=14);",
"_____no_output_____"
],
[
"np.random.seed(42)\nn = 100\nlam_true = 2.5 # Poisson rate\npi = 0.2 # probability of extra-zeros (pi = 1-psi)\n\n# Simulate some data\ncounts = np.array([(np.random.random() > pi) * np.random.poisson(lam_true) for i in range(n)])",
"_____no_output_____"
],
[
"plt.hist(counts, bins=30);",
"_____no_output_____"
],
[
"with pm.Model() as ZIP:\n psi = pm.Beta('psi', 1, 1)\n lam = pm.Gamma('lam', 2, 0.1)\n \n y_pred = pm.ZeroInflatedPoisson('y_pred', theta=lam, psi=psi, observed=counts)\n trace_ZIP = pm.sample(5000,chains=1,njobs=1)",
"Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nSequential sampling (1 chains in 1 job)\nNUTS: [lam, psi]\n100%|██████████| 5500/5500 [00:03<00:00, 1652.34it/s]\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"chain_ZIP = trace_ZIP[100:]\npm.traceplot(chain_ZIP);",
"_____no_output_____"
],
[
"pm.summary(chain_ZIP)",
"_____no_output_____"
]
],
[
[
"## Zero inflated Poisson regression",
"_____no_output_____"
]
],
[
[
"#Kruschke plot",
"_____no_output_____"
],
[
"fish_data = pd.read_csv('fish.csv')\nfish_data.head()",
"_____no_output_____"
],
[
"plt.hist(fish_data['count'], bins=20, normed=True);",
"/home/damianos/miniconda3/envs/pymc3/lib/python3.7/site-packages/matplotlib/axes/_axes.py:6521: MatplotlibDeprecationWarning: \nThe 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.\n alternative=\"'density'\", removal=\"3.1\")\n"
],
[
"with pm.Model() as ZIP_reg:\n psi = pm.Beta('psi', 1, 1)\n \n alpha = pm.Normal('alpha', 0, 10)\n beta = pm.Normal('beta', 0, 10, shape=2)\n lam = pm.math.exp(alpha + beta[0] * fish_data['child'] + beta[1] * fish_data['camper'])\n \n y = pm.ZeroInflatedPoisson('y', theta=lam, psi=psi, observed=fish_data['count'])\n trace_ZIP_reg = pm.sample(2000,chains=1,njobs=1)",
"Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nSequential sampling (1 chains in 1 job)\nNUTS: [beta, alpha, psi]\n100%|██████████| 2500/2500 [00:05<00:00, 437.66it/s]\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"chain_ZIP_reg = trace_ZIP_reg[100:]\npm.traceplot(chain_ZIP_reg);",
"_____no_output_____"
],
[
"pm.summary(chain_ZIP_reg)\n",
"_____no_output_____"
],
[
"children = [0, 1, 2, 3, 4]\nfish_count_pred_0 = []\nfish_count_pred_1 = []\nthin = 5\n# calculate the expectation lambda with and withour a camper for difference number of children\n# note lambda from the model is exp(a+bX)\nfor n in children:\n without_camper = chain_ZIP_reg['alpha'][::thin] + chain_ZIP_reg['beta'][:,0][::thin] * n\n with_camper = without_camper + chain_ZIP_reg['beta'][:,1][::thin]\n fish_count_pred_0.append(np.exp(without_camper))\n fish_count_pred_1.append(np.exp(with_camper))",
"_____no_output_____"
],
[
"plt.plot(children, fish_count_pred_0, 'bo', alpha=0.01)\nplt.plot(children, fish_count_pred_1, 'ro', alpha=0.01)\n\nplt.xticks(children);\nplt.xlabel('Number of children', fontsize=14)\nplt.ylabel('Fish caught', fontsize=14)\nplt.plot([], 'bo', label='without camper')\nplt.plot([], 'ro', label='with camper')\nplt.legend(fontsize=14);",
"_____no_output_____"
]
],
[
[
"## Robust logistic Regression",
"_____no_output_____"
]
],
[
[
"iris = sns.load_dataset(\"iris\")\ndf = iris.query(\"species == ('setosa', 'versicolor')\")\ny_0 = pd.Categorical(df['species']).codes\nx_n = 'sepal_length' \nx_0 = df[x_n].values\n# contaminate our set with ones with unusually small sepal length\ny_0 = np.concatenate((y_0, np.ones(6)))\nx_0 = np.concatenate((x_0, [4.2, 4.5, 4.0, 4.3, 4.2, 4.4]))\nx_0_m = x_0 - x_0.mean()\nplt.plot(x_0, y_0, 'o', color='k');",
"_____no_output_____"
],
[
"with pm.Model() as model_rlg:\n alpha_tmp = pm.Normal('alpha_tmp', mu=0, sd=100)\n beta = pm.Normal('beta', mu=0, sd=10)\n \n mu = alpha_tmp + beta * x_0_m\n theta = pm.Deterministic('theta', 1 / (1 + pm.math.exp(-mu)))\n # add the mixture here as a combination of the logistic derived theta \n # and a random pi from a Beta distribution\n pi = pm.Beta('pi', 1, 1)\n p = pi * 0.5 + (1 - pi) * theta\n # correct alpha from centering\n alpha = pm.Deterministic('alpha', alpha_tmp - beta * x_0.mean())\n bd = pm.Deterministic('bd', -alpha/beta)\n \n yl = pm.Bernoulli('yl', p=p, observed=y_0)\n\n trace_rlg = pm.sample(2000, njobs=1,chains=1)",
"Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nSequential sampling (1 chains in 1 job)\nNUTS: [pi, beta, alpha_tmp]\n100%|██████████| 2500/2500 [00:02<00:00, 1131.39it/s]\nThere were 3 divergences after tuning. Increase `target_accept` or reparameterize.\nOnly one chain was sampled, this makes it impossible to run some convergence checks\n"
],
[
"varnames = ['alpha', 'beta', 'bd', 'pi']\npm.traceplot(trace_rlg, varnames);",
"_____no_output_____"
],
[
"pm.summary(trace_rlg, varnames)",
"_____no_output_____"
],
[
"theta = trace_rlg['theta'].mean(axis=0)\nidx = np.argsort(x_0)\nplt.plot(x_0[idx], theta[idx], color='b', lw=3);\nplt.axvline(trace_rlg['bd'].mean(), ymax=1, color='r')\nbd_hpd = pm.hpd(trace_rlg['bd'])\nplt.fill_betweenx([0, 1], bd_hpd[0], bd_hpd[1], color='r', alpha=0.5)\n\nplt.plot(x_0, y_0, 'o', color='k');\ntheta_hpd = pm.hpd(trace_rlg['theta'])[idx]\nplt.fill_between(x_0[idx], theta_hpd[:,0], theta_hpd[:,1], color='b', alpha=0.5);\n\nplt.xlabel(x_n, fontsize=16);\nplt.ylabel('$\\\\theta$', rotation=0, fontsize=16);",
"_____no_output_____"
],
[
"import sys, IPython, scipy, matplotlib, platform\nprint(\"This notebook was created on a %s computer running %s and using:\\nPython %s\\nIPython %s\\nPyMC3 %s\\nNumPy %s\\nSciPy %s\\nMatplotlib %s\\nSeaborn %s\\nPandas %s\" % (platform.machine(), ' '.join(platform.linux_distribution()[:2]), sys.version[:5], IPython.__version__, pm.__version__, np.__version__, scipy.__version__, matplotlib.__version__, sns.__version__, pd.__version__))",
"This notebook was created on a x86_64 computer running debian buster/sid and using:\nPython 3.7.2\nIPython 7.2.0\nPyMC3 3.6\nNumPy 1.16.0\nSciPy 1.2.0\nMatplotlib 3.0.2\nSeaborn 0.9.0\nPandas 0.23.4\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a22717504b32a01d3f058f8f3a3ef0647253495
| 23,429 |
ipynb
|
Jupyter Notebook
|
tutorials/nlp/GLUE_Benchmark.ipynb
|
qmpzzpmq/NeMo
|
acca8d0bf558aa2466954c2222e61cd8fbf2b2c1
|
[
"Apache-2.0"
] | 10 |
2021-04-01T05:55:18.000Z
|
2022-02-15T01:41:41.000Z
|
tutorials/nlp/GLUE_Benchmark.ipynb
|
qmpzzpmq/NeMo
|
acca8d0bf558aa2466954c2222e61cd8fbf2b2c1
|
[
"Apache-2.0"
] | null | null | null |
tutorials/nlp/GLUE_Benchmark.ipynb
|
qmpzzpmq/NeMo
|
acca8d0bf558aa2466954c2222e61cd8fbf2b2c1
|
[
"Apache-2.0"
] | null | null | null | 41.393993 | 489 | 0.555337 |
[
[
[
"\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n# If you're using Google Colab and not running locally, run this cell\n\n# install NeMo\nBRANCH = 'v1.0.0b3'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n",
"_____no_output_____"
],
[
"# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:\n# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'\n\n! pip install ipywidgets\n! jupyter nbextension enable --py widgetsnbextension\n\n# Please restart the kernel after running this cell",
"_____no_output_____"
],
[
"from nemo.collections import nlp as nemo_nlp\nfrom nemo.utils.exp_manager import exp_manager\n\nimport os\nimport wget \nimport torch\nimport pytorch_lightning as pl\nfrom omegaconf import OmegaConf",
"_____no_output_____"
]
],
[
[
"In this tutorial, we are going to describe how to finetune a BERT-like model based on [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) on [GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding](https://openreview.net/pdf?id=rJ4km2R5t7). \n\n# GLUE tasks\nGLUE Benchmark includes 9 natural language understanding tasks:\n\n## Single-Sentence Tasks\n\n* CoLA - [The Corpus of Linguistic Acceptability](https://arxiv.org/abs/1805.12471) is a set of English sentences from published linguistics literature. The task is to predict whether a given sentence is grammatically correct or not.\n* SST-2 - [The Stanford Sentiment Treebank](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence: positive or negative.\n\n## Similarity and Paraphrase tasks\n\n* MRPC - [The Microsoft Research Paraphrase Corpus](https://www.aclweb.org/anthology/I05-5002.pdf) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.\n* QQP - [The Quora Question Pairs](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs) dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent.\n* STS-B - [The Semantic Textual Similarity Benchmark](https://arxiv.org/abs/1708.00055) is a collection of sentence pairs drawn from news headlines, video, and image captions, and natural language inference data. The task is to determine how similar two sentences are.\n\n## Inference Tasks\n\n* MNLI - [The Multi-Genre Natural Language Inference Corpus](https://cims.nyu.edu/~sbowman/multinli/multinli_0.9.pdf) is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The task has the matched (in-domain) and mismatched (cross-domain) sections.\n* QNLI - [The Stanford Question Answering Dataset](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question. The task is to determine whether the context sentence contains the answer to the question.\n* RTE The Recognizing Textual Entailment (RTE) datasets come from a series of annual [textual entailment challenges](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment). The task is to determine whether the second sentence is the entailment of the first one or not.\n* WNLI - The Winograd Schema Challenge is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices (Hector Levesque, Ernest Davis, and Leora Morgenstern. The winograd schema challenge. In Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning. 2012).\n\nAll tasks are classification tasks, except for the STS-B task which is a regression task. All classification tasks are 2-class problems, except for the MNLI task which has 3-classes.\n\nMore details about GLUE benchmark could be found [here](https://gluebenchmark.com/).",
"_____no_output_____"
],
[
"# Datasets\n\n**To proceed further, you need to download the GLUE data.** For example, you can download [this script](https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py) using `wget` and then execute it by running:\n\n`python download_glue_data.py`\n\nuse `--tasks TASK` if datasets for only selected GLUE tasks are needed\n\nAfter running the above commands, you will have a folder `glue_data` with data folders for every GLUE task. For example, data for MRPC task would be under glue_data/MRPC.\n\nThis tutorial and [examples/nlp/glue_benchmark/glue_benchmark.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/glue_benchmark/glue_benchmark.py) work with all GLUE tasks without any modifications. For this tutorial, we are going to use MRPC task.\n\n\n\n",
"_____no_output_____"
]
],
[
[
"# supported task names: [\"cola\", \"sst-2\", \"mrpc\", \"sts-b\", \"qqp\", \"mnli\", \"qnli\", \"rte\", \"wnli\"]\nTASK = 'mrpc'\nDATA_DIR = 'glue_data/MRPC'\nWORK_DIR = \"WORK_DIR\"\nMODEL_CONFIG = 'glue_benchmark_config.yaml'",
"_____no_output_____"
],
[
"! ls -l $DATA_DIR",
"_____no_output_____"
]
],
[
[
"For each task, there are 3 files: `train.tsv, dev.tsv, and test.tsv`. Note, MNLI has 2 dev sets: matched and mismatched, evaluation on both dev sets will be done automatically.",
"_____no_output_____"
]
],
[
[
"# let's take a look at the training data \n! head -n 5 {DATA_DIR}/train.tsv",
"_____no_output_____"
]
],
[
[
"# Model configuration\n\nNow, let's take a closer look at the model's configuration and learn to train the model.\n\nGLUE model is comprised of the pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) model followed by a Sequence Regression module (for STS-B task) or Sequence classifier module (for the rest of the tasks).\n\nThe model is defined in a config file which declares multiple important sections. They are:\n- **model**: All arguments that are related to the Model - language model, a classifier, optimizer and schedulers, datasets and any other related information\n\n- **trainer**: Any argument to be passed to PyTorch Lightning",
"_____no_output_____"
]
],
[
[
"# download the model's configuration file \nconfig_dir = WORK_DIR + '/configs/'\nos.makedirs(config_dir, exist_ok=True)\nif not os.path.exists(config_dir + MODEL_CONFIG):\n print('Downloading config file...')\n wget.download('https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b2/examples/nlp/glue_benchmark/' + MODEL_CONFIG, config_dir)\nelse:\n print ('config file is already exists')",
"_____no_output_____"
],
[
"# this line will print the entire config of the model\nconfig_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'\nprint(config_path)\nconfig = OmegaConf.load(config_path)\nprint(OmegaConf.to_yaml(config))",
"_____no_output_____"
]
],
[
[
"# Model Training\n## Setting up Data within the config\n\nAmong other things, the config file contains dictionaries called **dataset**, **train_ds** and **validation_ds**. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.\n\nWe assume that both training and evaluation files are located in the same directory, and use the default names mentioned during the data download step. \nSo, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below.\n\nAlso notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user.\n\nLet's now add the data directory path, task name and output directory for saving predictions to the config.",
"_____no_output_____"
]
],
[
[
"config.model.task_name = TASK\nconfig.model.output_dir = WORK_DIR\nconfig.model.dataset.data_dir = DATA_DIR",
"_____no_output_____"
]
],
[
[
"## Building the PyTorch Lightning Trainer\n\nNeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem.\n\nLet's first instantiate a Trainer object",
"_____no_output_____"
]
],
[
[
"print(\"Trainer config - \\n\")\nprint(OmegaConf.to_yaml(config.trainer))",
"_____no_output_____"
],
[
"# lets modify some trainer configs\n# checks if we have GPU available and uses it\ncuda = 1 if torch.cuda.is_available() else 0\nconfig.trainer.gpus = cuda\n\nconfig.trainer.precision = 16 if torch.cuda.is_available() else 32\n\n# for mixed precision training, uncomment the line below (precision should be set to 16 and amp_level to O1):\n# config.trainer.amp_level = O1\n\n# remove distributed training flags\nconfig.trainer.accelerator = None\n\n# setup max number of steps to reduce training time for demonstration purposes of this tutorial\nconfig.trainer.max_steps = 128\n\ntrainer = pl.Trainer(**config.trainer)",
"_____no_output_____"
]
],
[
[
"## Setting up a NeMo Experiment\n\nNeMo has an experiment manager that handles logging and checkpointing for us, so let's use it:",
"_____no_output_____"
]
],
[
[
"exp_dir = exp_manager(trainer, config.get(\"exp_manager\", None))\n\n# the exp_dir provides a path to the current experiment for easy access\nexp_dir = str(exp_dir)\nexp_dir",
"_____no_output_____"
]
],
[
[
"Before initializing the model, we might want to modify some of the model configs. For example, we might want to modify the pretrained BERT model and use [Megatron-LM BERT](https://arxiv.org/abs/1909.08053) or [AlBERT model](https://arxiv.org/abs/1909.11942):",
"_____no_output_____"
]
],
[
[
"# get the list of supported BERT-like models, for the complete list of HugginFace models, see https://huggingface.co/models\nprint(nemo_nlp.modules.get_pretrained_lm_models_list(include_external=True))\n\n# specify BERT-like model, you want to use, for example, \"megatron-bert-345m-uncased\" or 'bert-base-uncased'\nPRETRAINED_BERT_MODEL = \"albert-base-v1\"",
"_____no_output_____"
],
[
"# add the specified above model parameters to the config\nconfig.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL",
"_____no_output_____"
]
],
[
[
"Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation.\nAlso, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model.",
"_____no_output_____"
]
],
[
[
"model = nemo_nlp.models.GLUEModel(cfg=config.model, trainer=trainer)",
"_____no_output_____"
]
],
[
[
"## Monitoring training progress\nOptionally, you can create a Tensorboard visualization to monitor training progress.",
"_____no_output_____"
]
],
[
[
"try:\n from google import colab\n COLAB_ENV = True\nexcept (ImportError, ModuleNotFoundError):\n COLAB_ENV = False\n\n# Load the TensorBoard notebook extension\nif COLAB_ENV:\n %load_ext tensorboard\n %tensorboard --logdir {exp_dir}\nelse:\n print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")",
"_____no_output_____"
]
],
[
[
"Note, it’s recommended to finetune the model on each task separately. Also, based on [GLUE Benchmark FAQ#12](https://gluebenchmark.com/faq), there are might be some differences in dev/test distributions for QQP task and in train/dev for WNLI task.",
"_____no_output_____"
]
],
[
[
"# start model training\ntrainer.fit(model)",
"_____no_output_____"
]
],
[
[
"## Training Script\n\nIf you have NeMo installed locally, you can also train the model with [examples/nlp/glue_benchmark/glue_benchmark.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/glue_benchmark/glue_benchmark.py).\n\nTo run training script, use:\n\n`python glue_benchmark.py \\\n model.dataset.data_dir=PATH_TO_DATA_DIR \\\n model.task_name=TASK`\n",
"_____no_output_____"
],
[
"Average results after 3 runs:\n\n| Task | Metric | ALBERT-large | ALBERT-xlarge | Megatron-345m | BERT base paper | BERT large paper |\n|-------|--------------------------|--------------|---------------|---------------|-----------------|------------------|\n| CoLA | Matthew's correlation | 54.94 | 61.72 | 64.56 | 52.1 | 60.5 |\n| SST-2 | Accuracy | 92.74 | 91.86 | 95.87 | 93.5 | 94.9 |\n| MRPC | F1/Accuracy | 92.05/88.97 | 91.87/88.61 | 92.36/89.46 | 88.9/- | 89.3/- |\n| STS-B | Person/Spearman corr. | 90.41/90.21 | 90.07/90.10 | 91.51/91.61 | -/85.8 | -/86.5 |\n| QQP | F1/Accuracy | 88.26/91.26 | 88.80/91.65 | 89.18/91.91 | 71.2/- | 72.1/- |\n| MNLI | Matched /Mismatched acc. | 86.69/86.81 | 88.66/88.73 | 89.86/89.81 | 84.6/83.4 | 86.7/85.9 |\n| QNLI | Accuracy | 92.68 | 93.66 | 94.33 | 90.5 | 92.7 |\n| RTE | Accuracy | 80.87 | 82.86 | 83.39 | 66.4 | 70.1 |\n\nWNLI task was excluded from the experiments due to the problematic WNLI set.\nThe dev sets were used for evaluation for ALBERT and Megatron models, and the test sets results for [the BERT paper](https://arxiv.org/abs/1810.04805).\n\nHyperparameters used to get the results from the above table, could be found in the table below. Some tasks could be further finetuned to improve performance numbers, the tables are for a baseline reference only.\nEach cell in the table represents the following parameters:\nNumber of GPUs used/ Batch Size/ Learning Rate/ Number of Epochs. For not specified parameters, please refer to the default parameters in the training script.\n\n| Task | ALBERT-large | ALBERT-xlarge | Megatron-345m |\n|-------|--------------|---------------|---------------|\n| CoLA | 1 / 32 / 1e-5 / 3 | 1 / 32 / 1e-5 / 10 | 4 / 16 / 2e-5 / 12 |\n| SST-2 | 4 / 16 / 2e-5 / 5 | 4 / 16 / 2e-5 /12 | 4 / 16 / 2e-5 / 12 |\n| MRPC | 1 / 32 / 1e-5 / 5 | 1 / 16 / 2e-5 / 5 | 1 / 16 / 2e-5 / 10 |\n| STS-B | 1 / 16 / 2e-5 / 5 | 1 / 16 / 4e-5 / 12 | 4 / 16 / 3e-5 / 12 |\n| QQP | 1 / 16 / 2e-5 / 5 | 4 / 16 / 1e-5 / 12 | 4 / 16 / 1e-5 / 12 |\n| MNLI | 4 / 64 / 1e-5 / 5 | 4 / 32 / 1e-5 / 5 | 4 / 32 / 1e-5 / 5 | \n| QNLI | 4 / 16 / 1e-5 / 5 | 4 / 16 / 1e-5 / 5 | 4 / 16 / 2e-5 / 5 | \n| RTE | 1 / 16 / 1e-5 / 5 | 1 / 16 / 1e-5 / 12 | 4 / 16 / 3e-5 / 12 |\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a2278d9e5b8f9d09980e455379c3bf0ebb31366
| 189,235 |
ipynb
|
Jupyter Notebook
|
Focus-Areas-2021/project8/Automated-big-query-tables-creation.ipynb
|
sureshkuc/Freie-Universitat-Berlin
|
a9458044702070e4d3f549bffc7f839701ab86f0
|
[
"MIT"
] | null | null | null |
Focus-Areas-2021/project8/Automated-big-query-tables-creation.ipynb
|
sureshkuc/Freie-Universitat-Berlin
|
a9458044702070e4d3f549bffc7f839701ab86f0
|
[
"MIT"
] | null | null | null |
Focus-Areas-2021/project8/Automated-big-query-tables-creation.ipynb
|
sureshkuc/Freie-Universitat-Berlin
|
a9458044702070e4d3f549bffc7f839701ab86f0
|
[
"MIT"
] | null | null | null | 20.879951 | 6,246 | 0.456026 |
[
[
[
"pip install pandas_gbq",
"Defaulting to user installation because normal site-packages is not writeable\nCollecting pandas_gbq\n Downloading pandas_gbq-0.14.1-py3-none-any.whl (24 kB)\nRequirement already satisfied: google-auth-oauthlib in /home/suresh/.local/lib/python3.8/site-packages (from pandas_gbq) (0.4.2)\nRequirement already satisfied: pandas>=0.20.1 in /home/suresh/.local/lib/python3.8/site-packages (from pandas_gbq) (1.1.4)\nRequirement already satisfied: google-auth in /home/suresh/.local/lib/python3.8/site-packages (from pandas_gbq) (1.23.0)\nRequirement already satisfied: setuptools in /usr/lib/python3/dist-packages (from pandas_gbq) (45.2.0)\nCollecting google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1\n Downloading google_cloud_bigquery-2.10.0-py2.py3-none-any.whl (215 kB)\n\u001b[K |████████████████████████████████| 215 kB 6.5 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: packaging>=14.3 in /home/suresh/.local/lib/python3.8/site-packages (from google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (20.4)\nRequirement already satisfied: grpcio<2.0dev,>=1.32.0 in /home/suresh/.local/lib/python3.8/site-packages (from google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (1.33.2)\nCollecting google-api-core[grpc]<2.0.0dev,>=1.23.0\n Downloading google_api_core-1.26.1-py2.py3-none-any.whl (92 kB)\n\u001b[K |████████████████████████████████| 92 kB 2.5 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: requests<3.0.0dev,>=2.18.0 in /usr/lib/python3/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.23.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (2.22.0)\nRequirement already satisfied: pytz in /usr/lib/python3/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.23.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (2019.3)\nRequirement already satisfied: six>=1.13.0 in /usr/lib/python3/dist-packages (from google-api-core[grpc]<2.0.0dev,>=1.23.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (1.14.0)\nRequirement already satisfied: rsa<5,>=3.1.4 in /home/suresh/.local/lib/python3.8/site-packages (from google-auth->pandas_gbq) (4.6)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /home/suresh/.local/lib/python3.8/site-packages (from google-auth->pandas_gbq) (4.1.1)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /home/suresh/.local/lib/python3.8/site-packages (from google-auth->pandas_gbq) (0.2.8)\nCollecting google-cloud-bigquery-storage<3.0.0dev,>=2.0.0\n Downloading google_cloud_bigquery_storage-2.3.0-py2.py3-none-any.whl (142 kB)\n\u001b[K |████████████████████████████████| 142 kB 8.3 MB/s eta 0:00:01\n\u001b[?25hCollecting google-cloud-core<2.0dev,>=1.4.1\n Downloading google_cloud_core-1.6.0-py2.py3-none-any.whl (28 kB)\nCollecting google-auth\n Downloading google_auth-1.27.1-py2.py3-none-any.whl (136 kB)\n\u001b[K |████████████████████████████████| 136 kB 8.3 MB/s eta 0:00:01\n\u001b[?25hCollecting google-resumable-media<2.0dev,>=0.6.0\n Downloading google_resumable_media-1.2.0-py2.py3-none-any.whl (75 kB)\n\u001b[K |████████████████████████████████| 75 kB 6.1 MB/s eta 0:00:01\n\u001b[?25hCollecting google-crc32c<2.0dev,>=1.0\n Downloading google_crc32c-1.1.2-cp38-cp38-manylinux2014_x86_64.whl (38 kB)\nRequirement already satisfied: cffi>=1.0.0 in /home/suresh/.local/lib/python3.8/site-packages (from google-crc32c<2.0dev,>=1.0->google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (1.14.3)\nRequirement already satisfied: pycparser in /home/suresh/.local/lib/python3.8/site-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0->google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (2.20)\nCollecting googleapis-common-protos<2.0dev,>=1.6.0\n Downloading googleapis_common_protos-1.53.0-py2.py3-none-any.whl (198 kB)\n\u001b[K |████████████████████████████████| 198 kB 10.1 MB/s eta 0:00:01\n\u001b[?25hCollecting libcst>=0.2.5\n Downloading libcst-0.3.17-py3-none-any.whl (507 kB)\n\u001b[K |████████████████████████████████| 507 kB 11.9 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4.2 in /home/suresh/.local/lib/python3.8/site-packages (from libcst>=0.2.5->google-cloud-bigquery-storage<3.0.0dev,>=2.0.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (3.7.4.3)\nRequirement already satisfied: pyyaml>=5.2 in /usr/lib/python3/dist-packages (from libcst>=0.2.5->google-cloud-bigquery-storage<3.0.0dev,>=2.0.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (5.3.1)\nRequirement already satisfied: pyparsing>=2.0.2 in /home/suresh/.local/lib/python3.8/site-packages (from packaging>=14.3->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (2.4.7)\nRequirement already satisfied: numpy>=1.15.4 in /home/suresh/.local/lib/python3.8/site-packages (from pandas>=0.20.1->pandas_gbq) (1.19.4)\nRequirement already satisfied: python-dateutil>=2.7.3 in /home/suresh/.local/lib/python3.8/site-packages (from pandas>=0.20.1->pandas_gbq) (2.8.1)\nCollecting proto-plus>=1.10.0\n Downloading proto_plus-1.14.2-py3-none-any.whl (42 kB)\n\u001b[K |████████████████████████████████| 42 kB 3.6 MB/s eta 0:00:01\n\u001b[?25hCollecting protobuf>=3.12.0\n Downloading protobuf-3.15.5-cp38-cp38-manylinux1_x86_64.whl (1.0 MB)\n\u001b[K |████████████████████████████████| 1.0 MB 8.4 MB/s eta 0:00:01\n\u001b[?25hCollecting pyarrow<4.0dev,>=1.0.0\n Downloading pyarrow-3.0.0-cp38-cp38-manylinux2014_x86_64.whl (20.7 MB)\n\u001b[K |████████████████████████████████| 20.7 MB 8.8 MB/s eta 0:00:011\n\u001b[?25hRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /home/suresh/.local/lib/python3.8/site-packages (from pyasn1-modules>=0.2.1->google-auth->pandas_gbq) (0.4.8)\nCollecting typing-inspect>=0.4.0\n Downloading typing_inspect-0.6.0-py3-none-any.whl (8.1 kB)\nRequirement already satisfied: mypy-extensions>=0.3.0 in /home/suresh/.local/lib/python3.8/site-packages (from typing-inspect>=0.4.0->libcst>=0.2.5->google-cloud-bigquery-storage<3.0.0dev,>=2.0.0->google-cloud-bigquery[bqstorage,pandas]<3.0.0dev,>=1.11.1->pandas_gbq) (0.4.3)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /home/suresh/.local/lib/python3.8/site-packages (from google-auth-oauthlib->pandas_gbq) (1.3.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/lib/python3/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib->pandas_gbq) (3.1.0)\nCollecting pydata-google-auth\n Downloading pydata_google_auth-1.1.0-py2.py3-none-any.whl (13 kB)\nInstalling collected packages: protobuf, googleapis-common-protos, google-auth, typing-inspect, google-crc32c, google-api-core, proto-plus, libcst, google-resumable-media, google-cloud-core, pyarrow, google-cloud-bigquery-storage, google-cloud-bigquery, pydata-google-auth, pandas-gbq\n Attempting uninstall: google-auth\n Found existing installation: google-auth 1.23.0\n Uninstalling google-auth-1.23.0:\n Successfully uninstalled google-auth-1.23.0\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ntensorflow-gpu 2.3.1 requires numpy<1.19.0,>=1.16.0, but you have numpy 1.19.4 which is incompatible.\u001b[0m\nSuccessfully installed google-api-core-1.26.1 google-auth-1.27.1 google-cloud-bigquery-2.10.0 google-cloud-bigquery-storage-2.3.0 google-cloud-core-1.6.0 google-crc32c-1.1.2 google-resumable-media-1.2.0 googleapis-common-protos-1.53.0 libcst-0.3.17 pandas-gbq-0.14.1 proto-plus-1.14.2 protobuf-3.15.5 pyarrow-3.0.0 pydata-google-auth-1.1.0 typing-inspect-0.6.0\n\u001b[33mWARNING: You are using pip version 20.3.3; however, version 21.0.1 is available.\nYou should consider upgrading via the '/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\nNote: you may need to restart the kernel to use updated packages.\n"
],
[
"import pandas as pd\nimport os\nimport pandas_gbq",
"_____no_output_____"
],
[
"PROJECT_ID = 'ipads2020assignment8'\nfor folder in os.listdir('./sensing/'):\n for file in os.listdir('./sensing/'+folder):\n print('./sensing/'+folder+'/'+file)\n data= pd.read_csv('./sensing/'+folder+'/'+file)\n columns={x:'_'.join(x.split()) for x in data.columns}\n data=data.rename(columns=columns)\n try:\n pandas_gbq.to_gbq(\n data, 'Students.'+file.split('.')[0], project_id=PROJECT_ID, if_exists='fail',\n )\n except:\n print('./sensing/'+folder+'/'+file)\n pass",
"./sensing/wifi_location/wifi_location_u54.csv\n"
],
[
"os.listdir('./sensing')",
"_____no_output_____"
],
[
"PROJECT_ID = 'ipads2020assignment8'\nd=[]\nfor folder in os.listdir('./sensing/'):\n a=[]\n for file in os.listdir('./sensing/'+folder):\n a.append(file.split('.')[0].split('_')[-1])\n d.append(a)\n ",
"_____no_output_____"
],
[
"\nimport json\n\nwith open('/home/suresh/Profile-Areas/Project7/studentLife_no-audio__no-wifi/studentLife/EMA/response/Sleep/Sleep_u00.json') as f:\n data = json.load(f)\n\n# Output: {'name': 'Bob', 'languages': ['English', 'Fench']}\nprint(data)",
"[{'null': '43.75908069,-72.32885314', 'resp_time': 1364114760}, {'null': '8', 'resp_time': 1364114765}, {'null': '1', 'resp_time': 1364114453}, {'null': '1', 'resp_time': 1364114775}, {'null': '3', 'resp_time': 1364114365}, {'null': '8', 'resp_time': 1364114761}, {'null': '8', 'resp_time': 1364114771}, {'null': '43.75885953,-72.32939114', 'resp_time': 1364148460}, {'hour': '8', 'location': '43.70692415,-72.2873929', 'rate': '1', 'resp_time': 1364237676, 'social': '1'}, {'hour': '5', 'location': '43.70708859,-72.28753397', 'rate': '3', 'resp_time': 1365010047, 'social': '2'}, {'hour': '8', 'location': '43.70759062,-72.28510298', 'rate': '1', 'resp_time': 1364334405, 'social': '1'}, {'hour': '7', 'location': '43.70393682,-72.30152284', 'rate': '2', 'resp_time': 1364321234, 'social': '1'}, {'hour': '8', 'location': '43.75932116,-72.32890406', 'rate': '3', 'resp_time': 1364357389, 'social': '1'}, {'hour': '8', 'location': '43.70688822,-72.28758629', 'rate': '1', 'resp_time': 1364408307, 'social': '1'}, {'hour': '6', 'location': '43.70528646,-72.28684485', 'rate': '2', 'resp_time': 1364437512, 'social': '1'}, {'hour': '7', 'location': '43.70567695,-72.2879286', 'rate': '2', 'resp_time': 1364490035, 'social': '1'}, {'hour': '4', 'location': '43.70466313,-72.30515871', 'rate': '3', 'resp_time': 1364572628, 'social': '1'}, {'hour': '8', 'location': '43.70671862,-72.28748696', 'rate': '1', 'resp_time': 1364522690, 'social': '1'}, {'hour': '4', 'location': '43.70647015,-72.28715547', 'rate': '3', 'resp_time': 1364577749, 'social': '1'}, {'hour': '8', 'location': '43.70777831,-72.28510534', 'rate': '1', 'resp_time': 1364583795, 'social': '1'}, {'hour': '9', 'location': '43.75914412,-72.32945574', 'rate': '2', 'resp_time': 1364750657, 'social': '1'}, {'hour': '3', 'location': '43.70677717,-72.2874797', 'rate': '4', 'resp_time': 1364836266, 'social': '1'}, {'hour': '10', 'location': '43.70708859,-72.28753397', 'rate': '2', 'resp_time': 1365095193, 'social': '2'}, {'hour': '4', 'location': '43.70708859,-72.28753397', 'rate': '1', 'resp_time': 1365181215, 'social': '1'}, {'hour': '9', 'location': '43.70708859,-72.28753397', 'rate': '1', 'resp_time': 1365358039, 'social': '1'}, {'hour': '9', 'location': '43.70708859,-72.28753397', 'rate': '2', 'resp_time': 1365268350, 'social': '2'}, {'hour': '5', 'location': '43.70678333,-72.28754051', 'rate': '1', 'resp_time': 1365440432, 'social': '1'}, {'hour': '4', 'location': '43.70680961,-72.28757253', 'rate': '3', 'resp_time': 1365440444, 'social': '1'}, {'hour': '6', 'location': '43.64301116,-72.30784768', 'rate': '3', 'resp_time': 1365529523, 'social': '1'}, {'hour': '5', 'location': '43.705058,-72.28772623', 'rate': '1', 'resp_time': 1365614822, 'social': '1'}, {'hour': '8', 'location': '43.70414319,-72.28774882', 'rate': '1', 'resp_time': 1365701129, 'social': '1'}, {'hour': '3', 'location': '43.71253942,-72.30803315', 'rate': '1', 'resp_time': 1365786365, 'social': '1'}, {'hour': '3', 'location': '43.70661096,-72.28731884', 'rate': '1', 'resp_time': 1366045742, 'social': '1'}, {'hour': '14', 'location': 'Unknown', 'rate': '1', 'resp_time': 1365897960, 'social': '1'}, {'hour': '9', 'location': '43.75917269,-72.32894748', 'rate': '1', 'resp_time': 1365966931, 'social': '1'}, {'hour': '9', 'location': '43.75917269,-72.32894748', 'rate': '1', 'resp_time': 1365966938, 'social': '1'}, {'hour': '3', 'location': '43.70661096,-72.28731884', 'rate': '4', 'resp_time': 1366045748, 'social': '1'}, {'hour': '3', 'location': '43.70399343,-72.30226048', 'rate': '4', 'resp_time': 1366218064, 'social': '1'}, {'hour': '8', 'location': '41.64732181,-71.51018915', 'rate': '1', 'resp_time': 1366488754, 'social': '1'}, {'hour': '8', 'location': '43.75957917,-72.32886943', 'rate': '2', 'resp_time': 1366564205, 'social': '1'}, {'hour': '9', 'location': '43.75923622,-72.32892739', 'rate': '1', 'resp_time': 1366841234, 'social': '1'}, {'hour': '6', 'location': '43.71722994,-72.30901577', 'rate': '3', 'resp_time': 1366739819, 'social': '1'}, {'hour': '3', 'location': '43.70661363,-72.28736513', 'rate': '4', 'resp_time': 1366650364, 'social': '1'}, {'hour': '5', 'location': '43.72475325,-72.31381984', 'rate': '3', 'resp_time': 1366909403, 'social': '1'}, {'hour': '3', 'location': '43.71346364,-72.30894875', 'rate': '4', 'resp_time': 1367429993, 'social': '1'}, {'hour': '3', 'location': '43.71346364,-72.30894875', 'rate': '4', 'resp_time': 1367429974, 'social': '1'}, {'hour': '3', 'location': '43.71346364,-72.30894875', 'rate': '4', 'resp_time': 1367429973, 'social': '1'}, {'hour': '11', 'location': '43.70665289,-72.28734948', 'rate': '1', 'resp_time': 1368208319, 'social': '1'}, {'hour': '15', 'location': '43.7336199,-72.31693218', 'rate': '1', 'resp_time': 1368466442, 'social': '1'}, {'hour': '10', 'location': '48.86274371,2.33147895', 'rate': '1', 'resp_time': 1367757820, 'social': '1'}, {'hour': '10', 'location': '43.75959375,-72.32893949', 'rate': '2', 'resp_time': 1368121503, 'social': '1'}, {'hour': '9', 'location': 'Unknown', 'rate': '1', 'resp_time': 1367851204, 'social': '1'}, {'hour': '6', 'location': '45.43592024,12.34452394', 'rate': '1', 'resp_time': 1368036984, 'social': '1'}, {'hour': '10', 'location': '42.62013314,-71.17654852', 'rate': '1', 'resp_time': 1368725594, 'social': '1'}, {'hour': '9', 'location': '43.7590614,-72.32948628', 'rate': '2', 'resp_time': 1368812382, 'social': '1'}, {'hour': '9', 'location': 'Unknown', 'rate': '2', 'resp_time': 1369334102, 'social': '1'}, {'hour': '9', 'location': '43.70251211,-72.2885559', 'rate': '2', 'resp_time': 1369421721, 'social': '2'}, {'hour': '7', 'location': '38.92580143,-77.05273194', 'rate': '3', 'resp_time': 1369502283, 'social': '1'}, {'hour': '12', 'location': '43.75908452,-72.32932991', 'rate': '2', 'resp_time': 1369593829, 'social': '1'}, {'hour': '9', 'location': '43.70602498,-72.28697831', 'rate': '3', 'resp_time': 1369763675, 'social': '3'}, {'hour': '1', 'location': 'Unknown', 'rate': '1', 'resp_time': 1369847265, 'social': '1'}, {'hour': '10', 'location': '43.75946982,-72.32910245', 'rate': '2', 'resp_time': 1370019791, 'social': '1'}, {'hour': '9', 'location': '43.75938059,-72.32893392', 'rate': '1', 'resp_time': 1370195956, 'social': '1'}]\n"
],
[
"dataframe=pd.read_json('/home/suresh/Profile-Areas/Project7/studentLife_no-audio__no-wifi/studentLife/EMA/response/Sleep/Sleep_u00.json')",
"_____no_output_____"
],
[
"os.listdir('./EMA/response/Sleep')",
"_____no_output_____"
],
[
"PROJECT_ID = 'ipads2020assignment8'\n\nfor file in os.listdir('./EMA/response/Lab/'):\n print('./EMA/response/Sleep/'+file)\n data= pd.read_json('./EMA/response/Lab/'+file)\n columns={x:'_'.join(x.split()) for x in data.columns}\n data=data.rename(columns=columns)\n try:\n pandas_gbq.to_gbq(\n data, 'Students.'+file.split('.')[0], project_id=PROJECT_ID, if_exists='fail',\n)\n except:\n print('except','./EMA/response/Lab/'+file)\n pass",
"./EMA/response/Sleep/Lab_u30.json\n"
],
[
"for folder in os.listdir('./EMA/response/'):\n for file in os.listdir('./EMA/response/'+folder):\n print('./EMA/response/'+folder+'/'+file)\n data= pd.read_json('./EMA/response/'+folder+'/'+file)\n columns={x:'_'.join(x.split()) for x in data.columns}\n data=data.rename(columns=columns)\n try:\n pandas_gbq.to_gbq(\n data, 'Students.'+file.split('.')[0], project_id=PROJECT_ID, if_exists='fail',\n )\n except:\n print('except','./EMA/response/'+folder+'/'+file)\n pass",
"./EMA/response/Class/Class_u39.json\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2282faca7b1d08dcdc8942458b781ce48ba882
| 212,675 |
ipynb
|
Jupyter Notebook
|
20201029_generate_TFRecord.ipynb
|
thjeong/dacon_202011_classify_landmarks
|
e8f998dfc7539dd9a9497d61aa0f32bff080f7ef
|
[
"MIT"
] | 2 |
2020-11-05T09:05:49.000Z
|
2020-11-18T08:51:47.000Z
|
20201029_generate_TFRecord.ipynb
|
thjeong/dacon_202011_classify_landmarks
|
e8f998dfc7539dd9a9497d61aa0f32bff080f7ef
|
[
"MIT"
] | null | null | null |
20201029_generate_TFRecord.ipynb
|
thjeong/dacon_202011_classify_landmarks
|
e8f998dfc7539dd9a9497d61aa0f32bff080f7ef
|
[
"MIT"
] | 1 |
2020-11-05T09:06:01.000Z
|
2020-11-05T09:06:01.000Z
| 182.710481 | 175,540 | 0.891623 |
[
[
[
"# 빠른 학습을 위한 tfrecords 데이터셋 생성\n- 컴페티션 기본 데이터는 data/public 하위 폴더에 있다고 가정합니다. (train.csv, sample_submission.csv, etc)\n- 또한 train.zip, test.zip 역시 data/public 하위에 압축을 풀어놓았다고 가정하고 시작하겠습니다.",
"_____no_output_____"
]
],
[
[
"import os\nimport os.path as pth\nimport json\nimport shutil\nimport pandas as pd\nfrom tqdm import tqdm\n\ndata_base_path = pth.join('data', 'public') \nos.makedirs(data_base_path, exist_ok=True)",
"_____no_output_____"
],
[
"category_csv_name = 'category.csv'\ncategory_json_name = 'category.json'\nsubmission_csv_name = 'sample_submisstion.csv'\ntrain_csv_name = 'train.csv'\n\ntrain_zip_name = 'train.zip'\ntest_zip_name = 'test.zip'",
"_____no_output_____"
]
],
[
[
"일단 모든 jpg 파일을 한 경로에 놓고 작업하기 편하게 하는 방식입니다. \n파일이 많다보니 파일 옮기는 작업을 쉘 한줄로 하려니 명령어가 너무 길어져 오류가 발생힙니다. \n조금 번거롭더라도 하나씩 가져와서 한 경로 이하에 놓도록 하였습니다.",
"_____no_output_____"
]
],
[
[
"train_data_path = pth.join(data_base_path, 'train')\ntest_data_path = pth.join(data_base_path, 'test')\n\nif not pth.exists(train_data_path):\n os.system('unzip {}/{} -d {}'.format(data_base_path, train_zip_name, train_data_path))\n # os.system('mv {}/*/*/* {}'.format(train_data_path, train_data_path))\n place_name_list = [name for name in os.listdir(train_data_path) if not name.endswith('.JPG')]\n for place_name in place_name_list:\n place_fullpath = pth.join(train_data_path, place_name)\n landmark_name_list = os.listdir(place_fullpath)\n for landmark_name in landmark_name_list:\n landmark_fullpath = pth.join(place_fullpath, landmark_name)\n image_name_list = os.listdir(landmark_fullpath)\n for image_name in image_name_list:\n image_fullpath = pth.join(landmark_fullpath, image_name)\n if not image_fullpath.endswith('.JPG'):\n continue\n shutil.move(image_fullpath, train_data_path)\n\nif not pth.exists(test_data_path):\n os.system('unzip {}/{} -d {}'.format(data_base_path, test_zip_name, test_data_path))\n # os.system('mv {}/*/* {}'.format(test_data_path, test_data_path))\n temp_name_list = [name for name in os.listdir(test_data_path) if not name.endswith('.JPG')]\n for temp_name in temp_name_list:\n temp_fullpath = pth.join(test_data_path, temp_name)\n image_name_list = os.listdir(temp_fullpath)\n for image_name in image_name_list:\n image_fullpath = pth.join(temp_fullpath, image_name)\n if not image_fullpath.endswith('.JPG'):\n continue\n shutil.move(image_fullpath, test_data_path)",
"_____no_output_____"
],
[
"train_csv_path = pth.join(data_base_path, train_csv_name)\ntrain_df = pd.read_csv(train_csv_path)\ntrain_dict = {k:v for k, v in train_df.values}\n\nsubmission_csv_path = pth.join(data_base_path, submission_csv_name)\nsubmission_df = pd.read_csv(submission_csv_path)\n# submission_df.head()\n\ntrain_df.head()",
"_____no_output_____"
],
[
"### Check all file is exist\n\nfor basename in tqdm(train_df['id']):\n if not pth.exists(pth.join(train_data_path, basename+'.JPG')):\n print(basename)\n\nfor basename in tqdm(submission_df['id']):\n if not pth.exists(pth.join(test_data_path, basename+'.JPG')):\n print(basename) ",
"100%|██████████| 88102/88102 [00:00<00:00, 193567.81it/s]\n100%|██████████| 37964/37964 [00:00<00:00, 332545.77it/s]\n"
],
[
"category_csv_path = pth.join(data_base_path, category_csv_name)\ncategory_df = pd.read_csv(category_csv_path)\ncategory_dict = {k:v for k, v in category_df.values}\ncategory_df.head()",
"_____no_output_____"
],
[
"# category_json_path = pth.join(data_base_path, category_json_name)\n# with open(category_json_path) as f:\n# category_dict = json.load(f)\n# category_dict",
"_____no_output_____"
]
],
[
[
"## 2. 추출한 csv와 생성한 이미지를 기반으로 tfrecord 생성",
"_____no_output_____"
],
[
"데이터를 읽는 오버헤드를 줄이기 위해 학습 데이터를 tfrecord형태로 새로 생성합니다",
"_____no_output_____"
]
],
[
[
"!pip install tensorflow",
"Collecting tensorflow\n Downloading tensorflow-2.3.1-cp38-cp38-manylinux2010_x86_64.whl (320.5 MB)\n\u001b[K |████████████████████████████████| 320.5 MB 20 kB/s s eta 0:00:01 |█████████████▍ | 134.6 MB 10.7 MB/s eta 0:00:18 |█████████████████████████████▍ | 294.1 MB 11.8 MB/s eta 0:00:03\n\u001b[?25hCollecting opt-einsum>=2.3.2\n Downloading opt_einsum-3.3.0-py3-none-any.whl (65 kB)\n\u001b[K |████████████████████████████████| 65 kB 6.0 MB/s eta 0:00:01\n\u001b[?25hCollecting protobuf>=3.9.2\n Downloading protobuf-3.13.0-cp38-cp38-manylinux1_x86_64.whl (1.3 MB)\n\u001b[K |████████████████████████████████| 1.3 MB 10.8 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: wrapt>=1.11.1 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorflow) (1.11.2)\nRequirement already satisfied: six>=1.12.0 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorflow) (1.15.0)\nCollecting gast==0.3.3\n Downloading gast-0.3.3-py2.py3-none-any.whl (9.7 kB)\nCollecting grpcio>=1.8.6\n Downloading grpcio-1.33.2-cp38-cp38-manylinux2014_x86_64.whl (3.8 MB)\n\u001b[K |████████████████████████████████| 3.8 MB 9.1 MB/s eta 0:00:01\n\u001b[?25hCollecting google-pasta>=0.1.8\n Downloading google_pasta-0.2.0-py3-none-any.whl (57 kB)\n\u001b[K |████████████████████████████████| 57 kB 6.6 MB/s eta 0:00:01\n\u001b[?25hCollecting absl-py>=0.7.0\n Downloading absl_py-0.11.0-py3-none-any.whl (127 kB)\n\u001b[K |████████████████████████████████| 127 kB 11.0 MB/s eta 0:00:01\n\u001b[?25hCollecting astunparse==1.6.3\n Downloading astunparse-1.6.3-py2.py3-none-any.whl (12 kB)\nCollecting termcolor>=1.1.0\n Downloading termcolor-1.1.0.tar.gz (3.9 kB)\nRequirement already satisfied: wheel>=0.26 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorflow) (0.34.2)\nRequirement already satisfied: h5py<2.11.0,>=2.10.0 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorflow) (2.10.0)\nCollecting keras-preprocessing<1.2,>=1.1.1\n Downloading Keras_Preprocessing-1.1.2-py2.py3-none-any.whl (42 kB)\n\u001b[K |████████████████████████████████| 42 kB 2.1 MB/s eta 0:00:01\n\u001b[?25hCollecting tensorflow-estimator<2.4.0,>=2.3.0\n Downloading tensorflow_estimator-2.3.0-py2.py3-none-any.whl (459 kB)\n\u001b[K |████████████████████████████████| 459 kB 13.3 MB/s eta 0:00:01\n\u001b[?25hCollecting tensorboard<3,>=2.3.0\n Downloading tensorboard-2.3.0-py3-none-any.whl (6.8 MB)\n\u001b[K |████████████████████████████████| 6.8 MB 9.0 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: numpy<1.19.0,>=1.16.0 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorflow) (1.18.5)\nRequirement already satisfied: setuptools in /home/xikizima/anaconda3/lib/python3.8/site-packages (from protobuf>=3.9.2->tensorflow) (49.2.0.post20200714)\nCollecting google-auth<2,>=1.6.3\n Downloading google_auth-1.23.0-py2.py3-none-any.whl (114 kB)\n\u001b[K |████████████████████████████████| 114 kB 11.9 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: werkzeug>=0.11.15 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorboard<3,>=2.3.0->tensorflow) (1.0.1)\nCollecting markdown>=2.6.8\n Downloading Markdown-3.3.3-py3-none-any.whl (96 kB)\n\u001b[K |████████████████████████████████| 96 kB 8.8 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: requests<3,>=2.21.0 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from tensorboard<3,>=2.3.0->tensorflow) (2.24.0)\nCollecting tensorboard-plugin-wit>=1.6.0\n Downloading tensorboard_plugin_wit-1.7.0-py3-none-any.whl (779 kB)\n\u001b[K |████████████████████████████████| 779 kB 15.1 MB/s eta 0:00:01\n\u001b[?25hCollecting google-auth-oauthlib<0.5,>=0.4.1\n Downloading google_auth_oauthlib-0.4.2-py2.py3-none-any.whl (18 kB)\nCollecting rsa<5,>=3.1.4; python_version >= \"3.5\"\n Downloading rsa-4.6-py3-none-any.whl (47 kB)\n\u001b[K |████████████████████████████████| 47 kB 4.5 MB/s eta 0:00:011\n\u001b[?25hCollecting pyasn1-modules>=0.2.1\n Downloading pyasn1_modules-0.2.8-py2.py3-none-any.whl (155 kB)\n\u001b[K |████████████████████████████████| 155 kB 12.2 MB/s eta 0:00:01\n\u001b[?25hCollecting cachetools<5.0,>=2.0.0\n Downloading cachetools-4.1.1-py3-none-any.whl (10 kB)\nRequirement already satisfied: idna<3,>=2.5 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from requests<3,>=2.21.0->tensorboard<3,>=2.3.0->tensorflow) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from requests<3,>=2.21.0->tensorboard<3,>=2.3.0->tensorflow) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from requests<3,>=2.21.0->tensorboard<3,>=2.3.0->tensorflow) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from requests<3,>=2.21.0->tensorboard<3,>=2.3.0->tensorflow) (1.25.9)\nCollecting requests-oauthlib>=0.7.0\n Downloading requests_oauthlib-1.3.0-py2.py3-none-any.whl (23 kB)\nCollecting pyasn1>=0.1.3\n Downloading pyasn1-0.4.8-py2.py3-none-any.whl (77 kB)\n\u001b[K |████████████████████████████████| 77 kB 6.8 MB/s eta 0:00:01\n\u001b[?25hCollecting oauthlib>=3.0.0\n Downloading oauthlib-3.1.0-py2.py3-none-any.whl (147 kB)\n\u001b[K |████████████████████████████████| 147 kB 13.4 MB/s eta 0:00:01\n\u001b[?25hBuilding wheels for collected packages: termcolor\n Building wheel for termcolor (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for termcolor: filename=termcolor-1.1.0-py3-none-any.whl size=4830 sha256=8ee08ec9e1017d7fafe356d7842cdc9a6cdeb9087969dd694876a843c21290f1\n Stored in directory: /home/xikizima/.cache/pip/wheels/a0/16/9c/5473df82468f958445479c59e784896fa24f4a5fc024b0f501\nSuccessfully built termcolor\nInstalling collected packages: opt-einsum, protobuf, gast, grpcio, google-pasta, absl-py, astunparse, termcolor, keras-preprocessing, tensorflow-estimator, pyasn1, rsa, pyasn1-modules, cachetools, google-auth, markdown, tensorboard-plugin-wit, oauthlib, requests-oauthlib, google-auth-oauthlib, tensorboard, tensorflow\nSuccessfully installed absl-py-0.11.0 astunparse-1.6.3 cachetools-4.1.1 gast-0.3.3 google-auth-1.23.0 google-auth-oauthlib-0.4.2 google-pasta-0.2.0 grpcio-1.33.2 keras-preprocessing-1.1.2 markdown-3.3.3 oauthlib-3.1.0 opt-einsum-3.3.0 protobuf-3.13.0 pyasn1-0.4.8 pyasn1-modules-0.2.8 requests-oauthlib-1.3.0 rsa-4.6 tensorboard-2.3.0 tensorboard-plugin-wit-1.7.0 tensorflow-2.3.1 tensorflow-estimator-2.3.0 termcolor-1.1.0\n"
],
[
"!pip install opencv-python",
"Collecting opencv-python\n Downloading opencv_python-4.4.0.46-cp38-cp38-manylinux2014_x86_64.whl (49.5 MB)\n\u001b[K |████████████████████████████████| 49.5 MB 1.1 MB/s eta 0:00:01 |███████▎ | 11.2 MB 767 kB/s eta 0:00:50\n\u001b[?25hRequirement already satisfied: numpy>=1.17.3 in /home/xikizima/anaconda3/lib/python3.8/site-packages (from opencv-python) (1.18.5)\nInstalling collected packages: opencv-python\nSuccessfully installed opencv-python-4.4.0.46\n"
],
[
"import tensorflow as tf\nfrom tensorflow.keras.preprocessing import image\nimport cv2\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom sklearn.model_selection import train_test_split, KFold, RepeatedKFold, GroupKFold, RepeatedStratifiedKFold\nfrom sklearn.utils import shuffle\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport os.path as pth\nimport shutil\nimport time\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom PIL import Image\n\nfrom IPython.display import clear_output\n\nfrom multiprocessing import Process, Queue\nimport datetime",
"_____no_output_____"
],
[
"def _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _floatarray_feature(array):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=array))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _validate_text(text):\n \"\"\"If text is not str or unicode, then try to convert it to str.\"\"\"\n if isinstance(text, str):\n return text\n elif isinstance(text, 'unicode'):\n return text.encode('utf8', 'ignore')\n else:\n return str(text)\n\ndef to_tfrecords(id_list, randmark_id_list, tfrecords_name):\n print(\"Start converting\")\n options = tf.io.TFRecordOptions(compression_type = 'GZIP')\n with tf.io.TFRecordWriter(path=pth.join(tfrecords_name+'.tfrecords'), options=options) as writer:\n for id_, randmark_id in tqdm(zip(id_list, randmark_id_list), total=len(id_list), position=0, leave=True):\n image_path = pth.join(train_data_path, id_ + '.JPG')\n _binary_image = tf.io.read_file(image_path)\n\n string_set = tf.train.Example(features=tf.train.Features(feature={\n 'image_raw': _bytes_feature(_binary_image),\n 'randmark_id': _int64_feature(randmark_id),\n 'id': _bytes_feature(id_.encode()),\n }))\n\n writer.write(string_set.SerializeToString()) ",
"_____no_output_____"
]
],
[
[
"Training 때 사용할 validation을 분리합니다. (Train:0.8, Validation:0.2) ",
"_____no_output_____"
]
],
[
[
"train_ids, val_ids, train_landmark_ids, val_landmark_ids = train_test_split(train_df['id'], train_df['landmark_id'], test_size=0.2, random_state=7777, shuffle=True)\n\nto_tfrecords(train_ids, train_landmark_ids, pth.join(data_base_path, 'all_train'))\nto_tfrecords(val_ids, val_landmark_ids, pth.join(data_base_path, 'all_val'))",
" 0%| | 3/70481 [00:00<41:27, 28.33it/s]"
]
],
[
[
"Testset 또한 속도를 위해 tfrecord 형태로 변환해줍니다.",
"_____no_output_____"
]
],
[
[
"def to_test_tfrecords(id_list, tfrecords_name):\n print(\"Start converting\")\n options = tf.io.TFRecordOptions(compression_type = 'GZIP')\n with tf.io.TFRecordWriter(path=pth.join(tfrecords_name+'.tfrecords'), options=options) as writer:\n for id_ in tqdm(id_list, total=len(id_list), position=0, leave=True):\n image_path = pth.join(test_data_path, id_+'.JPG')\n _binary_image = tf.io.read_file(image_path)\n\n string_set = tf.train.Example(features=tf.train.Features(feature={\n 'image_raw': _bytes_feature(_binary_image),\n # 'randmark_id': _int64_feature(randmark_id),\n 'id': _bytes_feature(id_.encode()),\n }))\n\n writer.write(string_set.SerializeToString()) ",
"_____no_output_____"
],
[
"test_ids = submission_df['id']\nto_test_tfrecords(test_ids, pth.join(data_base_path, 'test'))",
" 0%| | 15/37964 [00:00<04:19, 145.99it/s]"
]
],
[
[
"### Usage",
"_____no_output_____"
]
],
[
[
"train_tfrecord_path = pth.join(data_base_path, 'all_train.tfrecords')\nval_tfrecord_path = pth.join(data_base_path, 'all_val.tfrecords')\ntest_tfrecord_path = pth.join(data_base_path, 'test.tfrecords')\n\nBUFFER_SIZE = 256\nBATCH_SIZE = 64\nNUM_CLASS = 1049",
"_____no_output_____"
],
[
"image_feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'randmark_id': tf.io.FixedLenFeature([], tf.int64),\n # 'id': tf.io.FixedLenFeature([], tf.string),\n}\n\ndef _parse_image_function(example_proto):\n return tf.io.parse_single_example(example_proto, image_feature_description)\n\ndef map_func(target_record):\n img = target_record['image_raw']\n label = target_record['randmark_id']\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.dtypes.cast(img, tf.float32)\n return img, label\n\ndef prep_func(image, label):\n result_image = image / 255\n # result_image = tf.image.resize(image, (300, 300))\n onehot_label = tf.one_hot(label, depth=NUM_CLASS)\n return result_image, onehot_label",
"_____no_output_____"
],
[
"dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP')\ndataset = dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# dataset = dataset.cache()\ndataset = dataset.map(map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ndataset = dataset.shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.map(prep_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ndataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)",
"_____no_output_____"
],
[
"target_class = np.argmax(batch_y.numpy(), axis=1)\ntarget_class",
"_____no_output_____"
],
[
"for batch_x, batch_y in dataset:\n print(batch_x.shape, batch_y.shape)\n\n target_class = np.argmax(batch_y[0].numpy())\n print(category_dict[target_class])\n plt.figure()\n plt.imshow(batch_x[0].numpy())\n # plt.title('{}'.format(category_dict[target_class]))\n plt.show()\n\n break",
"(64, 540, 960, 3) (64, 1049)\n서울풍물시장\n"
]
],
[
[
"### TFRecords vs Normal benchmark ",
"_____no_output_____"
],
[
"동일한 조건에서 순수한 파일 I/O 속도만을 비교하기 위해서 cache, prepetch, multiprocess와 같은 속도에 영향을 줄 수 있는 요소는 제외하고 측정하였습니다.",
"_____no_output_____"
],
[
"- TFRecords 사용 시",
"_____no_output_____"
]
],
[
[
"get_file(pth.join(data_base_path, 'all_train.tfrecords'))\nget_file(pth.join(data_base_path, 'all_val.tfrecords'))\nget_file(pth.join(data_base_path, 'test.tfrecords'))",
"_____no_output_____"
],
[
"dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP')\ndataset = dataset.map(_parse_image_function)",
"_____no_output_____"
],
[
"for _ in tqdm(dataset, position=0, leave=True):\n pass",
"70481it [05:43, 205.02it/s]\n"
]
],
[
[
"- 일반적인 jpg파일 사용 시",
"_____no_output_____"
]
],
[
[
"train_ids, val_ids, train_landmark_ids, val_landmark_ids = train_test_split(train_df['id'], train_df['landmark_id'], test_size=0.2, random_state=7777, shuffle=True)",
"_____no_output_____"
],
[
"def load_image(image_path, label):\n img = tf.io.read_file(image_path)\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.dtypes.cast(img, tf.float32)\n return img, label",
"_____no_output_____"
],
[
"train_tfrecord_array = np.array([pth.join(data_base_path, 'train', img_name+'.JPG') for img_name in train_ids.values])\n\ndataset = tf.data.Dataset.from_tensor_slices((train_tfrecord_array, train_landmark_ids))\ndataset = dataset.map(load_image)",
"_____no_output_____"
],
[
"for _ in tqdm(dataset, position=0, leave=True):\n pass",
"100%|██████████| 70481/70481 [14:40<00:00, 80.08it/s]\n"
]
],
[
[
"- 결과를 보았을 때, 5분 43초(TFRecords) vs 14분 40초(Normal)로 TFRecord를 사용하는 것이 3배 정도 더 빨랐습니다. \n- jpg 이미지가 속도가 더 오래 걸리는 이유는, jpg 방식으로 압축되어 있는 이미지를 raw 이미지로 해독하는데 걸리는 시간으로 인한 오버헤드로 추정됩니다.\n- 저 같은 경우 MobileNetV2 기반 모델이고, 코랩 T4 VGA 기준 학습 속도가 한 에폭에 8~9분정도 걸리는 상황이는 파일 I/O 속도가 전체 학습 속도에 미치는 영향은 상당히 큰 것으로 생각됩니다. \n- 또한 실제 사용에서는 Multiprocessing이나 prefetch와 같은 기능도 같이 사용하기 떄문에 이를 사용해서도 테스트 해보겠습니다.",
"_____no_output_____"
],
[
"- TFRecords 사용 시",
"_____no_output_____"
]
],
[
[
"dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP')\ndataset = dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ndataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\nfor _ in tqdm(dataset, position=0, leave=True):\n pass",
"70481it [05:43, 205.04it/s]\n"
]
],
[
[
"- 일반적인 jpg로 로딩 시",
"_____no_output_____"
]
],
[
[
"dataset = tf.data.Dataset.from_tensor_slices((train_tfrecord_array, train_landmark_ids))\ndataset = dataset.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ndataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\nfor _ in tqdm(dataset, position=0, leave=True):\n pass",
"100%|██████████| 70481/70481 [05:46<00:00, 203.17it/s]\n"
]
],
[
[
"- 결과를 보았을 때, 놀랍게도 TFRecord를 사용하는 것과 일반 이미지 로딩 방식이 거의 동일한 시간을 보이는 것을 확인할 수 있습니다. \n- 코랩에서는 앞서 언급한 이미지 압축 해제에 대한 오버헤드를 tf.data의 부가기능을 활용하여 충분히 극복할 수 있는 것으로 보입니다.\n- 또한, TRRecord는 그냥 읽는 것과 동일한 시간이 걸리는 것으로 확인되는데, 이는 TFRecord로 읽는 방식 자체가 파일 I/O 이외에 별다른 오버헤드가 크게 없어서 그런 것으로 추정됩니다.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a22954dc35275e4d718e726b0b27ef62ea32a8d
| 47,501 |
ipynb
|
Jupyter Notebook
|
batch-norm/Batch_Normalization_Solutions_mine.ipynb
|
rhombhi/deep-learning
|
f147cfc809de617494d385d8b7bd47f79e7e6151
|
[
"MIT"
] | null | null | null |
batch-norm/Batch_Normalization_Solutions_mine.ipynb
|
rhombhi/deep-learning
|
f147cfc809de617494d385d8b7bd47f79e7e6151
|
[
"MIT"
] | null | null | null |
batch-norm/Batch_Normalization_Solutions_mine.ipynb
|
rhombhi/deep-learning
|
f147cfc809de617494d385d8b7bd47f79e7e6151
|
[
"MIT"
] | null | null | null | 53.312009 | 475 | 0.622829 |
[
[
[
"# Batch Normalization – Solutions",
"_____no_output_____"
],
[
"Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.\n\nThis is **not** a good network for classfying MNIST digits. You could create a _much_ simpler network and get _better_ results. However, to give you hands-on experience with batch normalization, we had to make an example that was:\n1. Complicated enough that training would benefit from batch normalization.\n2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization.\n3. Simple enough that the architecture would be easy to understand without additional resources.",
"_____no_output_____"
],
[
"This notebook includes two versions of the network that you can edit. The first uses higher level functions from the `tf.layers` package. The second is the same network, but uses only lower level functions in the `tf.nn` package.\n\n1. [Batch Normalization with `tf.layers.batch_normalization`](#example_1)\n2. [Batch Normalization with `tf.nn.batch_normalization`](#example_2)",
"_____no_output_____"
],
[
"The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named `mnist`. You'll need to run this cell before running anything else in the notebook.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True, reshape=False)",
"WARNING:tensorflow:From <ipython-input-1-913023a6a8bd>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /Applications/anaconda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /Applications/anaconda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-images-idx3-ubyte.gz\nWARNING:tensorflow:From /Applications/anaconda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /Applications/anaconda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /Applications/anaconda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
]
],
[
[
"# Batch Normalization using `tf.layers.batch_normalization`<a id=\"example_1\"></a>\n\nThis version of the network uses `tf.layers` for almost everything, and expects you to implement batch normalization using [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) ",
"_____no_output_____"
],
[
"We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.\n\nThis version of the function does not include batch normalization.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef fully_connected(prev_layer, num_units):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)\n return layer",
"_____no_output_____"
]
],
[
[
"We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network.\n\nThis version of the function does not include batch normalization.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef conv_layer(prev_layer, layer_depth):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)\n return conv_layer",
"_____no_output_____"
]
],
[
[
"**Run the following cell**, along with the earlier cells (to load the dataset and define the necessary functions). \n\nThis cell builds the network **without** batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDO NOT MODIFY THIS CELL\n\"\"\"\ndef train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n \n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define \n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n \n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, \n labels: batch_ys})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually, just to make sure batch normalization really worked\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]]})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)",
"Batch: 0: Validation loss: 0.69073, Validation accuracy: 0.09580\nBatch: 25: Training loss: 0.34228, Training accuracy: 0.09375\nBatch: 50: Training loss: 0.32613, Training accuracy: 0.10938\nBatch: 75: Training loss: 0.32439, Training accuracy: 0.10938\nBatch: 100: Validation loss: 0.32646, Validation accuracy: 0.09860\nBatch: 125: Training loss: 0.33011, Training accuracy: 0.06250\nBatch: 150: Training loss: 0.32706, Training accuracy: 0.07812\nBatch: 175: Training loss: 0.32637, Training accuracy: 0.10938\nBatch: 200: Validation loss: 0.32578, Validation accuracy: 0.11260\nBatch: 225: Training loss: 0.32910, Training accuracy: 0.06250\nBatch: 250: Training loss: 0.32326, Training accuracy: 0.15625\nBatch: 275: Training loss: 0.32758, Training accuracy: 0.07812\nBatch: 300: Validation loss: 0.32534, Validation accuracy: 0.11260\nBatch: 325: Training loss: 0.32614, Training accuracy: 0.09375\nBatch: 350: Training loss: 0.32535, Training accuracy: 0.09375\nBatch: 375: Training loss: 0.32369, Training accuracy: 0.12500\nBatch: 400: Validation loss: 0.32542, Validation accuracy: 0.10020\nBatch: 425: Training loss: 0.32607, Training accuracy: 0.09375\nBatch: 450: Training loss: 0.32518, Training accuracy: 0.07812\nBatch: 475: Training loss: 0.32603, Training accuracy: 0.09375\nBatch: 500: Validation loss: 0.32547, Validation accuracy: 0.11260\nBatch: 525: Training loss: 0.32534, Training accuracy: 0.12500\nBatch: 550: Training loss: 0.32686, Training accuracy: 0.07812\nBatch: 575: Training loss: 0.32492, Training accuracy: 0.07812\nBatch: 600: Validation loss: 0.32540, Validation accuracy: 0.09580\nBatch: 625: Training loss: 0.32462, Training accuracy: 0.09375\nBatch: 650: Training loss: 0.32704, Training accuracy: 0.09375\nBatch: 675: Training loss: 0.32967, Training accuracy: 0.06250\nBatch: 700: Validation loss: 0.32521, Validation accuracy: 0.11260\nBatch: 725: Training loss: 0.32479, Training accuracy: 0.09375\nBatch: 750: Training loss: 0.32452, Training accuracy: 0.10938\nBatch: 775: Training loss: 0.32749, Training accuracy: 0.09375\nFinal validation accuracy: 0.11000\nFinal test accuracy: 0.10280\nAccuracy on 100 samples: 0.15\n"
]
],
[
[
"With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)\n\nUsing batch normalization, you'll be able to train this same network to over 90% in that same number of batches.\n\n# Add batch normalization\n\nTo add batch normalization to the layers created by `fully_connected`, we did the following:\n1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.\n2. Removed the bias and activation function from the `dense` layer.\n3. Used `tf.layers.batch_normalization` to normalize the layer's output. Notice we pass `is_training` to this layer to ensure the network updates its population statistics appropriately.\n4. Passed the normalized values into a ReLU activation function.",
"_____no_output_____"
]
],
[
[
"def fully_connected(prev_layer, num_units, is_training):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :param is_training: bool or Tensor\n Indicates whether or not the network is currently training, which tells the batch normalization\n layer whether or not it should update or use its population statistics.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)\n layer = tf.layers.batch_normalization(layer, training=is_training)\n layer = tf.nn.relu(layer)\n return layer",
"_____no_output_____"
]
],
[
[
"To add batch normalization to the layers created by `conv_layer`, we did the following:\n1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.\n2. Removed the bias and activation function from the `conv2d` layer.\n3. Used `tf.layers.batch_normalization` to normalize the convolutional layer's output. Notice we pass `is_training` to this layer to ensure the network updates its population statistics appropriately.\n4. Passed the normalized values into a ReLU activation function.\n\nIf you compare this function to `fully_connected`, you'll see that – when using `tf.layers` – there really isn't any difference between normalizing a fully connected layer and a convolutional layer. However, if you look at the second example in this notebook, where we restrict ourselves to the `tf.nn` package, you'll see a small difference.",
"_____no_output_____"
]
],
[
[
"def conv_layer(prev_layer, layer_depth, is_training):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :param is_training: bool or Tensor\n Indicates whether or not the network is currently training, which tells the batch normalization\n layer whether or not it should update or use its population statistics.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n conv_layer = tf.nn.relu(conv_layer)\n\n return conv_layer",
"_____no_output_____"
]
],
[
[
"Batch normalization is still a new enough idea that researchers are still discovering how best to use it. In general, people seem to agree to remove the layer's bias (because the batch normalization already has terms for scaling and shifting) and add batch normalization _before_ the layer's non-linear activation function. However, for some networks it will work well in other ways, too. \n\nJust to demonstrate this point, the following three versions of `conv_layer` show other ways to implement batch normalization. If you try running with any of these versions of the function, they should all still work fine (although some versions may still work better than others). \n\n**Alternate solution that uses bias in the convolutional layer but still adds batch normalization before the ReLU activation function.**",
"_____no_output_____"
]
],
[
[
"def conv_layer(prev_layer, layer_num, is_training):\n strides = 2 if layer_num % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=True, activation=None)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n conv_layer = tf.nn.relu(conv_layer)\n return conv_layer",
"_____no_output_____"
]
],
[
[
"**Alternate solution that uses a bias and ReLU activation function _before_ batch normalization.**",
"_____no_output_____"
]
],
[
[
"def conv_layer(prev_layer, layer_num, is_training):\n strides = 2 if layer_num % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=True, activation=tf.nn.relu)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n return conv_layer",
"_____no_output_____"
]
],
[
[
"**Alternate solution that uses a ReLU activation function _before_ normalization, but no bias.**",
"_____no_output_____"
]
],
[
[
"def conv_layer(prev_layer, layer_num, is_training):\n strides = 2 if layer_num % 3 == 0 else 1\n conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=False, activation=tf.nn.relu)\n conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)\n return conv_layer",
"_____no_output_____"
]
],
[
[
"To modify `train`, we did the following:\n1. Added `is_training`, a placeholder to store a boolean value indicating whether or not the network is training.\n2. Passed `is_training` to the `conv_layer` and `fully_connected` functions.\n3. Each time we call `run` on the session, we added to `feed_dict` the appropriate value for `is_training`.\n4. Moved the creation of `train_opt` inside a `with tf.control_dependencies...` statement. This is necessary to get the normalization layers created with `tf.layers.batch_normalization` to update their population statistics, which we need when performing inference.",
"_____no_output_____"
]
],
[
[
"def train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Add placeholder to indicate whether or not we're training the model\n is_training = tf.placeholder(tf.bool)\n\n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i, is_training)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100, is_training)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define loss and training operations\n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n \n # Tell TensorFlow to update the population statistics while training\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n # Create operations to test accuracy\n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training: False})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels, \n is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels,\n is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually, just to make sure batch normalization really worked\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]],\n is_training: False})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)",
"Batch: 0: Validation loss: 0.69061, Validation accuracy: 0.11000\nBatch: 25: Training loss: 0.58497, Training accuracy: 0.09375\nBatch: 50: Training loss: 0.50025, Training accuracy: 0.04688\nBatch: 75: Training loss: 0.44787, Training accuracy: 0.15625\nBatch: 100: Validation loss: 0.40292, Validation accuracy: 0.09240\nBatch: 125: Training loss: 0.36928, Training accuracy: 0.09375\nBatch: 150: Training loss: 0.34604, Training accuracy: 0.09375\nBatch: 175: Training loss: 0.32639, Training accuracy: 0.18750\nBatch: 200: Validation loss: 0.27034, Validation accuracy: 0.39880\nBatch: 225: Training loss: 0.24011, Training accuracy: 0.51562\nBatch: 250: Training loss: 0.16094, Training accuracy: 0.68750\nBatch: 275: Training loss: 0.18570, Training accuracy: 0.62500\nBatch: 300: Validation loss: 0.10192, Validation accuracy: 0.82900\nBatch: 325: Training loss: 0.08171, Training accuracy: 0.85938\nBatch: 350: Training loss: 0.08473, Training accuracy: 0.85938\nBatch: 375: Training loss: 0.04348, Training accuracy: 0.95312\nBatch: 400: Validation loss: 0.02907, Validation accuracy: 0.95720\nBatch: 425: Training loss: 0.02878, Training accuracy: 0.95312\nBatch: 450: Training loss: 0.01637, Training accuracy: 0.96875\nBatch: 475: Training loss: 0.02256, Training accuracy: 0.95312\nBatch: 500: Validation loss: 0.02859, Validation accuracy: 0.96020\nBatch: 525: Training loss: 0.03854, Training accuracy: 0.95312\nBatch: 550: Training loss: 0.05188, Training accuracy: 0.93750\nBatch: 575: Training loss: 0.02280, Training accuracy: 0.96875\nBatch: 600: Validation loss: 0.02596, Validation accuracy: 0.96060\nBatch: 625: Training loss: 0.02988, Training accuracy: 0.96875\nBatch: 650: Training loss: 0.03927, Training accuracy: 0.93750\nBatch: 675: Training loss: 0.02937, Training accuracy: 0.95312\nBatch: 700: Validation loss: 0.04644, Validation accuracy: 0.93860\nBatch: 725: Training loss: 0.01638, Training accuracy: 0.98438\nBatch: 750: Training loss: 0.06205, Training accuracy: 0.93750\nBatch: 775: Training loss: 0.00866, Training accuracy: 0.98438\nFinal validation accuracy: 0.97040\nFinal test accuracy: 0.97220\nAccuracy on 100 samples: 0.99\n"
]
],
[
[
"With batch normalization, we now get excellent performance. In fact, validation accuracy is almost 94% after only 500 batches. Notice also the last line of the output: `Accuracy on 100 samples`. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference.\n\n# Batch Normalization using `tf.nn.batch_normalization`<a id=\"example_2\"></a>\n\nMost of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things.\n\nThis version of the network uses `tf.nn` for almost everything, and expects you to implement batch normalization using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).\n\nThis implementation of `fully_connected` is much more involved than the one that uses `tf.layers`. However, if you went through the `Batch_Normalization_Lesson` notebook, things should look pretty familiar. To add batch normalization, we did the following:\n1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.\n2. Removed the bias and activation function from the `dense` layer.\n3. Added `gamma`, `beta`, `pop_mean`, and `pop_variance` variables.\n4. Used `tf.cond` to make handle training and inference differently.\n5. When training, we use `tf.nn.moments` to calculate the batch mean and variance. Then we update the population statistics and use `tf.nn.batch_normalization` to normalize the layer's output using the batch statistics. Notice the `with tf.control_dependencies...` statement - this is required to force TensorFlow to run the operations that update the population statistics.\n6. During inference (i.e. when not training), we use `tf.nn.batch_normalization` to normalize the layer's output using the population statistics we calculated during training.\n7. Passed the normalized values into a ReLU activation function.\n\nIf any of thise code is unclear, it is almost identical to what we showed in the `fully_connected` function in the `Batch_Normalization_Lesson` notebook. Please see that for extensive comments. ",
"_____no_output_____"
]
],
[
[
"def fully_connected(prev_layer, num_units, is_training):\n \"\"\"\n Create a fully connectd layer with the given layer as input and the given number of neurons.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param num_units: int\n The size of the layer. That is, the number of units, nodes, or neurons.\n :param is_training: bool or Tensor\n Indicates whether or not the network is currently training, which tells the batch normalization\n layer whether or not it should update or use its population statistics.\n :returns Tensor\n A new fully connected layer\n \"\"\"\n\n layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)\n\n gamma = tf.Variable(tf.ones([num_units]))\n beta = tf.Variable(tf.zeros([num_units]))\n\n pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)\n pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)\n\n epsilon = 1e-3\n \n def batch_norm_training():\n batch_mean, batch_variance = tf.nn.moments(layer, [0])\n\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)\n\n batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)\n return tf.nn.relu(batch_normalized_output)",
"_____no_output_____"
]
],
[
[
"The changes we made to `conv_layer` are _almost_ exactly the same as the ones we made to `fully_connected`. However, there is an important difference. Convolutional layers have multiple feature maps, and each feature map uses shared weights. So we need to make sure we calculate our batch and population statistics **per feature map** instead of per node in the layer.\n\nTo accomplish this, we do **the same things** that we did in `fully_connected`, with two exceptions:\n1. The sizes of `gamma`, `beta`, `pop_mean` and `pop_variance` are set to the number of feature maps (output channels) instead of the number of output nodes.\n2. We change the parameters we pass to `tf.nn.moments` to make sure it calculates the mean and variance for the correct dimensions.",
"_____no_output_____"
]
],
[
[
"def conv_layer(prev_layer, layer_depth, is_training):\n \"\"\"\n Create a convolutional layer with the given layer as input.\n \n :param prev_layer: Tensor\n The Tensor that acts as input into this layer\n :param layer_depth: int\n We'll set the strides and number of feature maps based on the layer's depth in the network.\n This is *not* a good way to make a CNN, but it helps us create this example with very little code.\n :param is_training: bool or Tensor\n Indicates whether or not the network is currently training, which tells the batch normalization\n layer whether or not it should update or use its population statistics.\n :returns Tensor\n A new convolutional layer\n \"\"\"\n strides = 2 if layer_depth % 3 == 0 else 1\n \n in_channels = prev_layer.get_shape().as_list()[3]\n out_channels = layer_depth*4\n \n weights = tf.Variable(\n tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))\n \n layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')\n\n gamma = tf.Variable(tf.ones([out_channels]))\n beta = tf.Variable(tf.zeros([out_channels]))\n\n pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)\n pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)\n\n epsilon = 1e-3\n \n def batch_norm_training():\n # Important to use the correct dimensions here to ensure the mean and variance are calculated \n # per feature map instead of for the entire layer\n batch_mean, batch_variance = tf.nn.moments(layer, [0,1,2], keep_dims=False)\n\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)\n\n batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)\n return tf.nn.relu(batch_normalized_output)",
"_____no_output_____"
]
],
[
[
"To modify `train`, we did the following:\n1. Added `is_training`, a placeholder to store a boolean value indicating whether or not the network is training.\n2. Each time we call `run` on the session, we added to `feed_dict` the appropriate value for `is_training`.\n3. We did **not** need to add the `with tf.control_dependencies...` statement that we added in the network that used `tf.layers.batch_normalization` because we handled updating the population statistics ourselves in `conv_layer` and `fully_connected`.",
"_____no_output_____"
]
],
[
[
"def train(num_batches, batch_size, learning_rate):\n # Build placeholders for the input samples and labels \n inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Add placeholder to indicate whether or not we're training the model\n is_training = tf.placeholder(tf.bool)\n\n # Feed the inputs into a series of 20 convolutional layers \n layer = inputs\n for layer_i in range(1, 20):\n layer = conv_layer(layer, layer_i, is_training)\n\n # Flatten the output from the convolutional layers \n orig_shape = layer.get_shape().as_list()\n layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])\n\n # Add one fully connected layer\n layer = fully_connected(layer, 100, is_training)\n\n # Create the output layer with 1 node for each \n logits = tf.layers.dense(layer, 10)\n \n # Define loss and training operations\n model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\n train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)\n \n # Create operations to test accuracy\n correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # Train and test the network\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for batch_i in range(num_batches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n # train this batch\n sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})\n \n # Periodically check the validation or training loss and accuracy\n if batch_i % 100 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,\n labels: mnist.validation.labels,\n is_training: False})\n print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n elif batch_i % 25 == 0:\n loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})\n print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))\n\n # At the end, score the final accuracy for both the validation and test sets\n acc = sess.run(accuracy, {inputs: mnist.validation.images,\n labels: mnist.validation.labels, \n is_training: False})\n print('Final validation accuracy: {:>3.5f}'.format(acc))\n acc = sess.run(accuracy, {inputs: mnist.test.images,\n labels: mnist.test.labels,\n is_training: False})\n print('Final test accuracy: {:>3.5f}'.format(acc))\n \n # Score the first 100 test images individually, just to make sure batch normalization really worked\n correct = 0\n for i in range(100):\n correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]],\n is_training: False})\n\n print(\"Accuracy on 100 samples:\", correct/100)\n\n\nnum_batches = 800\nbatch_size = 64\nlearning_rate = 0.002\n\ntf.reset_default_graph()\nwith tf.Graph().as_default():\n train(num_batches, batch_size, learning_rate)",
"Batch: 0: Validation loss: 0.69145, Validation accuracy: 0.09580\nBatch: 25: Training loss: 0.58359, Training accuracy: 0.06250\nBatch: 50: Training loss: 0.48370, Training accuracy: 0.20312\nBatch: 75: Training loss: 0.41495, Training accuracy: 0.07812\nBatch: 100: Validation loss: 0.37167, Validation accuracy: 0.11000\nBatch: 125: Training loss: 0.36280, Training accuracy: 0.07812\nBatch: 150: Training loss: 0.36758, Training accuracy: 0.10938\nBatch: 175: Training loss: 0.43072, Training accuracy: 0.10938\nBatch: 200: Validation loss: 0.50506, Validation accuracy: 0.11260\nBatch: 225: Training loss: 0.57283, Training accuracy: 0.12500\nBatch: 250: Training loss: 0.64025, Training accuracy: 0.17188\nBatch: 275: Training loss: 0.84651, Training accuracy: 0.07812\nBatch: 300: Validation loss: 1.00750, Validation accuracy: 0.11260\nBatch: 325: Training loss: 1.04066, Training accuracy: 0.06250\nBatch: 350: Training loss: 1.33937, Training accuracy: 0.10938\nBatch: 375: Training loss: 0.84299, Training accuracy: 0.23438\nBatch: 400: Validation loss: 0.48859, Validation accuracy: 0.45040\nBatch: 425: Training loss: 0.30235, Training accuracy: 0.64062\nBatch: 450: Training loss: 0.09913, Training accuracy: 0.82812\nBatch: 475: Training loss: 0.26570, Training accuracy: 0.67188\nBatch: 500: Validation loss: 0.07143, Validation accuracy: 0.90320\nBatch: 525: Training loss: 0.08000, Training accuracy: 0.89062\nBatch: 550: Training loss: 0.04408, Training accuracy: 0.93750\nBatch: 575: Training loss: 0.04122, Training accuracy: 0.95312\nBatch: 600: Validation loss: 0.06432, Validation accuracy: 0.90580\nBatch: 625: Training loss: 0.04591, Training accuracy: 0.90625\nBatch: 650: Training loss: 0.04976, Training accuracy: 0.92188\nBatch: 675: Training loss: 0.04407, Training accuracy: 0.92188\nBatch: 700: Validation loss: 0.03021, Validation accuracy: 0.95820\nBatch: 725: Training loss: 0.03208, Training accuracy: 0.95312\nBatch: 750: Training loss: 0.06863, Training accuracy: 0.93750\nBatch: 775: Training loss: 0.03505, Training accuracy: 0.93750\nFinal validation accuracy: 0.94440\nFinal test accuracy: 0.94640\nAccuracy on 100 samples: 0.96\n"
]
],
[
[
"Once again, the model with batch normalization quickly reaches a high accuracy. But in our run, notice that it doesn't seem to learn anything for the first 250 batches, then the accuracy starts to climb. That just goes to show - even with batch normalization, it's important to give your network a bit of time to learn before you decide it isn't working.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a229c62be8697731b9e4e04e8436705cdb5cdde
| 827,854 |
ipynb
|
Jupyter Notebook
|
overview_positions_insitu.ipynb
|
cmoestl/heliocats
|
5e2b054990319e14859669561a361cc0c7ca4295
|
[
"MIT"
] | 4 |
2020-10-03T07:39:56.000Z
|
2022-03-28T12:39:54.000Z
|
overview_positions_insitu.ipynb
|
cmoestl/heliocats
|
5e2b054990319e14859669561a361cc0c7ca4295
|
[
"MIT"
] | 2 |
2021-01-08T13:21:54.000Z
|
2022-03-12T00:11:50.000Z
|
overview_positions_insitu.ipynb
|
cmoestl/heliocats
|
5e2b054990319e14859669561a361cc0c7ca4295
|
[
"MIT"
] | 4 |
2020-04-09T13:49:50.000Z
|
2020-06-18T04:36:07.000Z
| 406.408444 | 635,128 | 0.920046 |
[
[
[
"\n## In situ data and trajectories incl. Bepi Colombo, PSP, Solar Orbiter\nhttps://github.com/cmoestl/heliocats\n\nAuthor: C. Moestl, IWF Graz, Austria\n\ntwitter @chrisoutofspace, https://github.com/cmoestl\n\nlast update: 2021 August 24\n\n\nneeds python 3.7 with the conda helio environment (see README.md)\n\n\nuses heliopy for generating spacecraft positions, for data source files see README.md\n\n \n---\n\nMIT LICENSE\nCopyright 2020-2021, Christian Moestl \nPermission is hereby granted, free of charge, to any person obtaining a copy of this \nsoftware and associated documentation files (the \"Software\"), to deal in the Software\nwithout restriction, including without limitation the rights to use, copy, modify, \nmerge, publish, distribute, sublicense, and/or sell copies of the Software, and to \npermit persons to whom the Software is furnished to do so, subject to the following \nconditions:\nThe above copyright notice and this permission notice shall be included in all copies \nor substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF \nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n",
"_____no_output_____"
]
],
[
[
"#change path for ffmpeg for animation production if needed\n\nffmpeg_path=''\n\nimport os\nimport datetime\nfrom datetime import datetime, timedelta\nfrom sunpy.time import parse_time\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nimport matplotlib.cm as cmap\n\n\nfrom scipy.signal import medfilt\nimport numpy as np\nimport pdb\nimport pickle\nimport seaborn as sns\nimport sys\nimport heliopy.data.spice as spicedata\nimport heliopy.spice as spice\nimport astropy\nimport importlib \nimport time\nimport numba\nfrom numba import jit\nimport multiprocessing\nimport urllib\nimport copy\nfrom astropy import constants as const\n\n\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom heliocats import data as hd\nimportlib.reload(hd) #reload again while debugging\n\nfrom heliocats import plot as hp\nimportlib.reload(hp) #reload again while debugging\n\n#where the in situ data files are located is read \n#from config.py \nimport config\nimportlib.reload(config)\nfrom config import data_path",
"_____no_output_____"
]
],
[
[
"## load HIGeoCAT",
"_____no_output_____"
]
],
[
[
"#load HIGeoCAT\nfrom heliocats import cats as hc\nimportlib.reload(hc) #reload again while debugging \n\n#https://www.helcats-fp7.eu/\n#LOAD HELCATS HIGeoCAT\nurl_higeocat='https://www.helcats-fp7.eu/catalogues/data/HCME_WP3_V06.vot'\n\ntry: urllib.request.urlretrieve(url_higeocat,'data/HCME_WP3_V06.vot')\nexcept urllib.error.URLError as e:\n print('higeocat not loaded')\n\nhigeocat=hc.load_higeocat_vot('data/HCME_WP3_V06.vot')\nhigeocat_time=parse_time(higeocat['Date']).datetime \nhigeocat_t0=parse_time(higeocat['SSE Launch']).datetime #backprojected launch time\n\nsse_speed=higeocat['SSE Speed']\nsse_lon=higeocat['SSE HEEQ Long']\nsse_lat=higeocat['SSE HEEQ Lat']\nhigeocat_name=np.array(higeocat['SC'].astype(str))\n\n\n\nprint('done')",
"done\n"
]
],
[
[
"## generate HIGeoCAT kinematics\n",
"_____no_output_____"
]
],
[
[
"print('generate kinematics for each SSEF30 CME')\n\n\n\n\ngenerate_hi_kin=False\n\n\nif generate_hi_kin:\n \n t0=higeocat_t0\n\n kindays=60\n\n #lists for all times, r, longitude, latitude\n all_time=[]\n all_r=[]\n all_lat=[]\n all_lon=[]\n all_name=[]\n\n #go through all HI CMEs\n for i in np.arange(len(higeocat)):\n\n #for i in np.arange(100):\n\n #times for each event kinematic\n time1=[]\n tstart1=copy.deepcopy(t0[i])\n tend1=tstart1+timedelta(days=kindays)\n #make 30 min datetimes\n while tstart1 < tend1:\n time1.append(tstart1) \n tstart1 += timedelta(minutes=30) \n\n\n #make kinematics\n timestep=np.zeros(kindays*24*2)\n cme_r=np.zeros(kindays*24*2)\n cme_lon=np.zeros(kindays*24*2)\n cme_lat=np.zeros(kindays*24*2)\n cme_name=np.chararray(kindays*24*2)\n\n\n\n\n for j in np.arange(0,len(cme_r)-1,1):\n\n cme_r[j]=sse_speed[i]*timestep[j]/(const.au.value*1e-3) #km to AU\n cme_lon[j]=sse_lon[i]\n cme_lat[j]=sse_lat[i]\n timestep[j+1]=timestep[j]+30*60 #seconds\n cme_name[j]=higeocat_name[i]\n\n\n\n #### linear interpolate to 30 min resolution\n\n #find next full hour after t0\n format_str = '%Y-%m-%d %H' \n t0r = datetime.strptime(datetime.strftime(t0[i], format_str), format_str) +timedelta(hours=1)\n time2=[]\n tstart2=copy.deepcopy(t0r)\n tend2=tstart2+timedelta(days=kindays)\n #make 30 min datetimes \n while tstart2 < tend2:\n time2.append(tstart2) \n tstart2 += timedelta(minutes=30) \n\n time2_num=parse_time(time2).plot_date \n time1_num=parse_time(time1).plot_date\n\n\n\n #linear interpolation to time_mat times \n cme_r = np.interp(time2_num, time1_num,cme_r )\n cme_lat = np.interp(time2_num, time1_num,cme_lat )\n cme_lon = np.interp(time2_num, time1_num,cme_lon )\n\n\n\n #cut at 5 AU \n cutoff=np.where(cme_r<5)[0]\n #write to all\n #print(cutoff[0],cutoff[-1])\n all_time.extend(time2[cutoff[0]:cutoff[-2]])\n all_r.extend(cme_r[cutoff[0]:cutoff[-2]])\n all_lat.extend(cme_lat[cutoff[0]:cutoff[-2]])\n all_lon.extend(cme_lon[cutoff[0]:cutoff[-2]])\n all_name.extend(cme_name[cutoff[0]:cutoff[-2]])\n\n\n\n\n\n\n plt.figure(1) \n plt.plot(all_time,all_r)\n\n plt.figure(2) \n plt.plot(all_time,all_lat,'ok')\n\n\n plt.figure(3) \n plt.plot(all_time,all_lon,'ok')\n\n ################### sort all kinematics by time\n all_time_num=mdates.date2num(all_time)\n\n\n all_r=np.array(all_r)\n all_lat=np.array(all_lat)\n all_lon=np.array(all_lon)\n\n\n all_name=np.array(all_name)\n\n #get indices for sorting for time\n sortind=np.argsort(all_time_num,axis=0)\n\n #cme_time_sort=mdates.num2date(all_time_num[sortind])\n cme_time_sort_num=all_time_num[sortind]\n cme_r_sort=all_r[sortind]\n cme_lat_sort=all_lat[sortind]\n cme_lon_sort=all_lon[sortind]\n cme_name_sort=all_name[sortind].astype(str)\n\n\n #plt.plot(cme_time_sort,cme_r_sort)\n #plt.plot(cme_time_sort,cme_r_sort)\n plt.figure(4) \n plt.plot(all_time,all_lon,'.k')\n plt.plot(cme_time_sort_num,cme_lon_sort,'.b')\n\n pickle.dump([cme_time_sort_num,cme_r_sort,cme_lat_sort,cme_lon_sort,cme_name_sort], open('data/higeocat_kinematics.p', \"wb\"))\n\n\n\nprint('load HIGEOCAT kinematics')\n[hc_time_num,hc_r,hc_lat,hc_lon,hc_name]=pickle.load(open('data/higeocat_kinematics.p', \"rb\"))\n\n\nprint('done')",
"generate kinematics for each SSEF30 CME\nload HIGEOCAT kinematics\ndone\n"
]
],
[
[
"### define functions",
"_____no_output_____"
]
],
[
[
"def make_positions():\n\n ############### PSP\n\n starttime =datetime(2018, 8,13)\n endtime = datetime(2025, 8, 31)\n psp_time = []\n while starttime < endtime:\n psp_time.append(starttime)\n starttime += timedelta(days=res_in_days)\n psp_time_num=mdates.date2num(psp_time) \n\n spice.furnish(spicedata.get_kernel('psp_pred'))\n psp=spice.Trajectory('SPP')\n psp.generate_positions(psp_time,'Sun',frame)\n print('PSP pos')\n\n psp.change_units(astropy.units.AU) \n [psp_r, psp_lat, psp_lon]=hd.cart2sphere(psp.x,psp.y,psp.z)\n print('PSP conv')\n\n\n ############### BepiColombo\n\n starttime =datetime(2018, 10, 21)\n endtime = datetime(2025, 11, 2)\n bepi_time = []\n while starttime < endtime:\n bepi_time.append(starttime)\n starttime += timedelta(days=res_in_days)\n bepi_time_num=mdates.date2num(bepi_time) \n\n spice.furnish(spicedata.get_kernel('bepi_pred'))\n bepi=spice.Trajectory('BEPICOLOMBO MPO') # or BEPICOLOMBO MMO\n bepi.generate_positions(bepi_time,'Sun',frame)\n bepi.change_units(astropy.units.AU) \n [bepi_r, bepi_lat, bepi_lon]=hd.cart2sphere(bepi.x,bepi.y,bepi.z)\n print('Bepi')\n\n\n\n ############### Solar Orbiter\n\n starttime = datetime(2020, 3, 1)\n endtime = datetime(2029, 12, 31)\n solo_time = []\n while starttime < endtime:\n solo_time.append(starttime)\n starttime += timedelta(days=res_in_days)\n solo_time_num=mdates.date2num(solo_time) \n\n spice.furnish(spicedata.get_kernel('solo_2020'))\n solo=spice.Trajectory('Solar Orbiter')\n solo.generate_positions(solo_time, 'Sun',frame)\n solo.change_units(astropy.units.AU)\n [solo_r, solo_lat, solo_lon]=hd.cart2sphere(solo.x,solo.y,solo.z)\n print('Solo')\n\n\n\n ########### plots\n\n\n plt.figure(1, figsize=(12,9))\n plt.plot_date(psp_time,psp_r,'-', label='R')\n plt.plot_date(psp_time,psp_lat,'-',label='lat')\n plt.plot_date(psp_time,psp_lon,'-',label='lon')\n plt.ylabel('AU / RAD')\n plt.legend()\n\n plt.figure(2, figsize=(12,9))\n plt.plot_date(bepi_time,bepi_r,'-', label='R')\n plt.plot_date(bepi_time,bepi_lat,'-',label='lat')\n plt.plot_date(bepi_time,bepi_lon,'-',label='lon')\n plt.title('Bepi Colombo position '+frame)\n plt.ylabel('AU / RAD')\n plt.legend()\n\n\n plt.figure(3, figsize=(12,9))\n plt.plot_date(solo_time,solo_r,'-', label='R')\n plt.plot_date(solo_time,solo_lat,'-',label='lat')\n plt.plot_date(solo_time,solo_lon,'-',label='lon')\n plt.title('Solar Orbiter position '+frame)\n plt.ylabel('AU / RAD')\n plt.legend()\n\n\n ######## R with all three\n plt.figure(4, figsize=(16,10))\n plt.plot_date(psp_time,psp.r,'-',label='PSP')\n plt.plot_date(bepi_time,bepi.r,'-',label='Bepi Colombo')\n plt.plot_date(solo_time,solo.r,'-',label='Solar Orbiter')\n plt.legend()\n plt.title('Heliocentric distance of heliospheric observatories')\n plt.ylabel('AU')\n plt.savefig(positions_plot_directory+'/bepi_psp_solo_R.png')\n\n ##### Longitude all three\n plt.figure(5, figsize=(16,10))\n plt.plot_date(psp_time,psp_lon*180/np.pi,'-',label='PSP')\n plt.plot_date(bepi_time,bepi_lon*180/np.pi,'-',label='Bepi Colombo')\n plt.plot_date(solo_time,solo_lon*180/np.pi,'-',label='Solar Orbiter')\n plt.legend()\n plt.title(frame+' longitude')\n plt.ylabel('DEG')\n plt.savefig(positions_plot_directory+'/bepi_psp_solo_longitude_'+frame+'.png')\n\n ############# Earth, Mercury, Venus, STA\n #see https://docs.heliopy.org/en/stable/data/spice.html\n\n planet_kernel=spicedata.get_kernel('planet_trajectories')\n\n starttime =datetime(2018, 1, 1)\n endtime = datetime(2029, 12, 31)\n earth_time = []\n while starttime < endtime:\n earth_time.append(starttime)\n starttime += timedelta(days=res_in_days)\n earth_time_num=mdates.date2num(earth_time) \n\n earth=spice.Trajectory('399') #399 for Earth, not barycenter (because of moon)\n earth.generate_positions(earth_time,'Sun',frame)\n earth.change_units(astropy.units.AU) \n [earth_r, earth_lat, earth_lon]=hd.cart2sphere(earth.x,earth.y,earth.z)\n print('Earth')\n\n ################ mercury\n mercury_time_num=earth_time_num\n mercury=spice.Trajectory('1') #barycenter\n mercury.generate_positions(earth_time,'Sun',frame) \n mercury.change_units(astropy.units.AU) \n [mercury_r, mercury_lat, mercury_lon]=hd.cart2sphere(mercury.x,mercury.y,mercury.z)\n print('mercury') \n\n ################# venus\n venus_time_num=earth_time_num\n venus=spice.Trajectory('2') \n venus.generate_positions(earth_time,'Sun',frame) \n venus.change_units(astropy.units.AU) \n [venus_r, venus_lat, venus_lon]=hd.cart2sphere(venus.x,venus.y,venus.z)\n print('venus') \n\n ############### Mars\n\n mars_time_num=earth_time_num\n mars=spice.Trajectory('4') \n mars.generate_positions(earth_time,'Sun',frame) \n mars.change_units(astropy.units.AU) \n [mars_r, mars_lat, mars_lon]=hd.cart2sphere(mars.x,mars.y,mars.z)\n print('mars') \n\n #############stereo-A\n sta_time_num=earth_time_num\n spice.furnish(spicedata.get_kernel('stereo_a_pred'))\n sta=spice.Trajectory('-234') \n sta.generate_positions(earth_time,'Sun',frame) \n sta.change_units(astropy.units.AU) \n [sta_r, sta_lat, sta_lon]=hd.cart2sphere(sta.x,sta.y,sta.z)\n print('STEREO-A') \n\n\n\n #save positions \n if high_res_mode:\n pickle.dump([psp_time,psp_time_num,psp_r,psp_lon,psp_lat,bepi_time,bepi_time_num,bepi_r,bepi_lon,bepi_lat,solo_time,solo_time_num,solo_r,solo_lon,solo_lat], open( 'positions_plots/psp_solo_bepi_'+frame+'_1min.p', \"wb\" ) )\n else: \n psp=np.rec.array([psp_time_num,psp_r,psp_lon,psp_lat, psp.x, psp.y,psp.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n bepi=np.rec.array([bepi_time_num,bepi_r,bepi_lon,bepi_lat,bepi.x, bepi.y,bepi.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n solo=np.rec.array([solo_time_num,solo_r,solo_lon,solo_lat,solo.x, solo.y,solo.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n sta=np.rec.array([sta_time_num,sta_r,sta_lon,sta_lat,sta.x, sta.y,sta.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n earth=np.rec.array([earth_time_num,earth_r,earth_lon,earth_lat, earth.x, earth.y,earth.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n venus=np.rec.array([venus_time_num,venus_r,venus_lon,venus_lat, venus.x, venus.y,venus.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n mars=np.rec.array([mars_time_num,mars_r,mars_lon,mars_lat, mars.x, mars.y,mars.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n mercury=np.rec.array([mercury_time_num,mercury_r,mercury_lon,mercury_lat,mercury.x, mercury.y,mercury.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')])\n pickle.dump([psp, bepi, solo, sta, earth, venus, mars, mercury,frame], open( 'data/positions_psp_solo_bepi_sta_planets_'+frame+'_1hour.p', \"wb\" ) )\n #load with [psp, bepi, solo, sta, earth, venus, mars, mercury,frame]=pickle.load( open( 'positions_psp_solo_bepi_sta_planets_HCI_6hours_2018_2025.p', \"rb\" ) )\n \n \n end=time.time()\n print( 'generate position took time in seconds:', round((end-start),1) )",
"_____no_output_____"
],
[
"def make_frame(k):\n '''\n loop each frame in multiprocessing\n '''\n \n\n fig=plt.figure(1, figsize=(19.2,10.8), dpi=100) #full hd\n #fig=plt.figure(1, figsize=(19.2*2,10.8*2), dpi=100) #4k\n ax = plt.subplot2grid((7,2), (0, 0), rowspan=7, projection='polar')\n backcolor='black'\n psp_color='black'\n bepi_color='blue'\n solo_color='coral'\n\n\n frame_time_str=str(mdates.num2date(frame_time_num+k*res_in_days))\n #print( 'current frame_time_num', frame_time_str, ' ',k)\n\n #these have their own times\n dct=frame_time_num+k*res_in_days-psp.time\n psp_timeind=np.argmin(abs(dct))\n\n dct=frame_time_num+k*res_in_days-bepi.time\n bepi_timeind=np.argmin(abs(dct))\n\n dct=frame_time_num+k*res_in_days-solo.time\n solo_timeind=np.argmin(abs(dct))\n\n #all same times\n dct=frame_time_num+k*res_in_days-earth.time\n earth_timeind=np.argmin(abs(dct))\n\n #plot all positions including text R lon lat for some \n\n #white background\n\n ax.scatter(venus.lon[earth_timeind], venus.r[earth_timeind]*np.cos(venus.lat[earth_timeind]), s=symsize_planet, c='orange', alpha=1,lw=0,zorder=3)\n ax.scatter(mercury.lon[earth_timeind], mercury.r[earth_timeind]*np.cos(mercury.lat[earth_timeind]), s=symsize_planet, c='dimgrey', alpha=1,lw=0,zorder=3)\n ax.scatter(earth.lon[earth_timeind], earth.r[earth_timeind]*np.cos(earth.lat[earth_timeind]), s=symsize_planet, c='mediumseagreen', alpha=1,lw=0,zorder=3)\n ax.scatter(sta.lon[earth_timeind], sta.r[earth_timeind]*np.cos(sta.lat[earth_timeind]), s=symsize_spacecraft, c='red', marker='s', alpha=1,lw=0,zorder=3)\n ax.scatter(mars.lon[earth_timeind], mars.r[earth_timeind]*np.cos(mars.lat[earth_timeind]), s=symsize_planet, c='orangered', alpha=1,lw=0,zorder=3)\n\n\n #plot stereoa fov hi1/2 \n hp.plot_stereo_hi_fov(sta,frame_time_num, earth_timeind, ax,'A')\n\n\n\n #positions text\n f10=plt.figtext(0.01,0.93,' R lon lat', fontsize=fsize+2, ha='left',color=backcolor)\n\n if frame=='HEEQ': earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{0.0:8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}')\n else: earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(earth.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}')\n\n mars_text='Mars: '+str(f'{mars.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(mars.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(mars.lat[earth_timeind]):8.1f}')\n sta_text='STA: '+str(f'{sta.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(sta.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(sta.lat[earth_timeind]):8.1f}')\n\n #position and text \n if psp_timeind > 0:\n #plot trajectorie\n ax.scatter(psp.lon[psp_timeind], psp.r[psp_timeind]*np.cos(psp.lat[psp_timeind]), s=symsize_spacecraft, c=psp_color, marker='s', alpha=1,lw=0,zorder=3)\n #plot positiona as text\n psp_text='PSP: '+str(f'{psp.r[psp_timeind]:6.2f}')+str(f'{np.rad2deg(psp.lon[psp_timeind]):8.1f}')+str(f'{np.rad2deg(psp.lat[psp_timeind]):8.1f}')\n f5=plt.figtext(0.01,0.78,psp_text, fontsize=fsize, ha='left',color=psp_color)\n if plot_orbit: \n fadestart=psp_timeind-fadeind\n if fadestart < 0: fadestart=0\n ax.plot(psp.lon[fadestart:psp_timeind+fadeind], psp.r[fadestart:psp_timeind+fadeind]*np.cos(psp.lat[fadestart:psp_timeind+fadeind]), c=psp_color, alpha=0.6,lw=1,zorder=3)\n\n if bepi_timeind > 0:\n ax.scatter(bepi.lon[bepi_timeind], bepi.r[bepi_timeind]*np.cos(bepi.lat[bepi_timeind]), s=symsize_spacecraft, c=bepi_color, marker='s', alpha=1,lw=0,zorder=3)\n bepi_text='Bepi: '+str(f'{bepi.r[bepi_timeind]:6.2f}')+str(f'{np.rad2deg(bepi.lon[bepi_timeind]):8.1f}')+str(f'{np.rad2deg(bepi.lat[bepi_timeind]):8.1f}')\n f6=plt.figtext(0.01,0.74,bepi_text, fontsize=fsize, ha='left',color=bepi_color)\n if plot_orbit: \n fadestart=bepi_timeind-fadeind\n if fadestart < 0: fadestart=0 \n ax.plot(bepi.lon[fadestart:bepi_timeind+fadeind], bepi.r[fadestart:bepi_timeind+fadeind]*np.cos(bepi.lat[fadestart:bepi_timeind+fadeind]), c=bepi_color, alpha=0.6,lw=1,zorder=3)\n\n if solo_timeind > 0:\n ax.scatter(solo.lon[solo_timeind], solo.r[solo_timeind]*np.cos(solo.lat[solo_timeind]), s=symsize_spacecraft, c=solo_color, marker='s', alpha=1,lw=0,zorder=3)\n solo_text='SolO: '+str(f'{solo.r[solo_timeind]:6.2f}')+str(f'{np.rad2deg(solo.lon[solo_timeind]):8.1f}')+str(f'{np.rad2deg(solo.lat[solo_timeind]):8.1f}')\n f7=plt.figtext(0.01,0.7,solo_text, fontsize=fsize, ha='left',color=solo_color)\n if plot_orbit: \n fadestart=solo_timeind-fadeind\n if fadestart < 0: fadestart=0 \n ax.plot(solo.lon[fadestart:solo_timeind+fadeind], solo.r[fadestart:solo_timeind+fadeind]*np.cos(solo.lat[fadestart:solo_timeind+fadeind]), c=solo_color, alpha=0.6,lw=1,zorder=3)\n\n f10=plt.figtext(0.01,0.9,earth_text, fontsize=fsize, ha='left',color='mediumseagreen')\n f9=plt.figtext(0.01,0.86,mars_text, fontsize=fsize, ha='left',color='orangered')\n f8=plt.figtext(0.01,0.82,sta_text, fontsize=fsize, ha='left',color='red')\n \n \n \n\n ######################## 1 plot all active CME circles\n\n plot_hi_geo=True\n \n \n if plot_hi_geo:\n lamda=30\n #check for active CME indices from HIGeoCAT (with the lists produced above in this notebook)\n #check where time is identical to frame time\n cmeind=np.where(hc_time_num == frame_time_num+k*res_in_days)\n #print(cmeind)\n #plot all active CME circles\n #if np.size(cmeind) >0:\n for p in range(0,np.size(cmeind)):\n\n #print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]]\n #central d\n dir=np.array([np.cos(hc_lon[cmeind[0][p]]*np.pi/180),np.sin(hc_lon[cmeind[0][p]]*np.pi/180)])*hc_r[cmeind[0][p]]\n\n #points on circle, correct for longitude\n circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(hc_lon[cmeind[0][p]]*np.pi/180)\n\n #these equations are from moestl and davies 2013\n xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang)\n yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang)\n #now convert to polar coordinates\n rcirc=np.sqrt(xc**2+yc**2)\n longcirc=np.arctan2(yc,xc)\n #plot in correct color\n if hc_name[cmeind[0][p]] == 'A': \n #make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 - \n #so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events\n ax.plot(longcirc,rcirc, c='red', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) \n if hc_name[cmeind[0][p]] == 'B':\n ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) \n\n \n \n\n\n #parker spiral\n if plot_parker:\n for q in np.arange(0,12):\n omega=2*np.pi/(sun_rot*60*60*24) #solar rotation in seconds\n v=400/AUkm #km/s\n r0=695000/AUkm\n r=v/omega*theta+r0*7\n if not black: \n ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/12*q), r, alpha=0.4, lw=0.5,color='grey',zorder=2)\n if black: \n ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/12*q), r, alpha=0.7, lw=0.7,color='grey',zorder=2)\n\n #set axes and grid\n ax.set_theta_zero_location('E')\n #plt.thetagrids(range(0,360,45),(u'0\\u00b0 '+frame+' longitude',u'45\\u00b0',u'90\\u00b0',u'135\\u00b0',u'+/- 180\\u00b0',u'- 135\\u00b0',u'- 90\\u00b0',u'- 45\\u00b0'), ha='right', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9)\n plt.thetagrids(range(0,360,45),(u'0\\u00b0',u'45\\u00b0',u'90\\u00b0',u'135\\u00b0',u'+/- 180\\u00b0',u'- 135\\u00b0',u'- 90\\u00b0',u'- 45\\u00b0'), ha='center', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9,zorder=4)\n\n\n #plt.rgrids((0.10,0.39,0.72,1.00,1.52),('0.10','0.39','0.72','1.0','1.52 AU'),angle=125, fontsize=fsize,alpha=0.9, color=backcolor)\n plt.rgrids((0.1,0.3,0.5,0.7,1.0),('0.10','0.3','0.5','0.7','1.0 AU'),angle=125, fontsize=fsize-3,alpha=0.5, color=backcolor)\n\n #ax.set_ylim(0, 1.75) #with Mars\n ax.set_ylim(0, 1.2) \n\n #Sun\n ax.scatter(0,0,s=100,c='yellow',alpha=1, edgecolors='black', linewidth=0.3)\n\n\n \n\n #------------------------------------------------ IN SITU DATA ------------------------------------------------------\n\n\n time_now=frame_time_num+k*res_in_days\n \n #cut data for plot window so faster\n \n windex1=np.where(w_time_num > time_now-days_window)[0][0]\n windex2=np.where(w_time_num > time_now+days_window)[0][0]\n w=w1[windex1:windex2]\n\n sindex1=np.where(s_time_num > time_now-days_window)[0][0]\n sindex2=np.where(s_time_num > time_now+days_window)[0][0]\n s=s1[sindex1:sindex2]\n\n #is data available from new missions?\n \n if p_time_num[-1] > time_now+days_window:\n pindex1=np.where(p_time_num > time_now-days_window)[0][0]\n pindex2=np.where(p_time_num > time_now+days_window)[0][0]\n #pindex2=np.size(p1)-1\n p=p1[pindex1:pindex2]\n elif np.logical_and((p_time_num[-1] < time_now+days_window),(p_time_num[-1] > time_now-days_window)):\n pindex1=np.where(p_time_num > time_now-days_window)[0][0]\n pindex2=np.size(p1)-1\n p=p1[pindex1:pindex2]\n else: p=[] \n\n \n if o_time_num[-1] > time_now+days_window:\n oindex1=np.where(o_time_num > time_now-days_window)[0][0]\n oindex2=np.where(o_time_num > time_now+days_window)[0][0]\n #use last index oindex2=np.size(o1)-1\n o=o1[oindex1:oindex2]\n elif np.logical_and((o_time_num[-1] < time_now+days_window),(o_time_num[-1] > time_now-days_window)):\n oindex1=np.where(o_time_num > time_now-days_window)[0][0]\n oindex2=np.size(o1)-1\n o=o1[oindex1:oindex2]\n else: o=[] \n\n\n if b_time_num[-1] > time_now+days_window:\n bindex1=np.where(b_time_num > time_now-days_window)[0][0]\n bindex2=np.where(b_time_num > time_now+days_window)[0][0]\n #bindex2=np.size(b1)-1\n b=b1[bindex1:bindex2]\n else: b=[] \n\n\n\n\n #---------------- Wind mag\n\n ax4 = plt.subplot2grid((7,2), (0, 1))\n #plt.plot_date(w_tm,wbx,'-r',label='BR',linewidth=0.5)\n #plt.plot_date(w_tm,wby,'-g',label='BT',linewidth=0.5)\n #plt.plot_date(w_tm,wbz,'-b',label='BN',linewidth=0.5)\n #plt.plot_date(w_tm,wbt,'-k',label='Btotal',lw=0.5)\n plt.plot_date(w.time,w.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(w.time,w.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(w.time,w.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(w.time,w.bt,'-k',label='Btotal',lw=0.5)\n\n ax4.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax4.set_ylabel('B [nT] HEEQ',fontsize=fsize-1)\n ax4.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax4.set_xlim(time_now-days_window,time_now+days_window)\n ax4.set_ylim(np.nanmin(-w.bt)-5, np.nanmax(w.bt)+5)\n #plt.ylim((-18, 18))\n plt.yticks(fontsize=fsize-1) \n ax4.set_xticklabels([])\n\n\n #---------------- STEREO-A mag\n\n ax6 = plt.subplot2grid((7,2), (1, 1))\n #plt.plot_date(s_tm,sbx,'-r',label='BR',linewidth=0.5)\n #plt.plot_date(s_tm,sby,'-g',label='BT',linewidth=0.5)\n #plt.plot_date(s_tm,sbz,'-b',label='BN',linewidth=0.5)\n #plt.plot_date(s_tm,sbt,'-k',label='Btotal')\n plt.plot_date(s.time,s.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(s.time,s.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(s.time,s.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(s.time,s.bt,'-k',label='Btotal',linewidth=0.5)\n\n ax6.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax6.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n #ax6.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax6.set_xlim(time_now-days_window,time_now+days_window)\n ax6.set_xticklabels([])\n ax6.set_ylim(np.nanmin(-s.bt)-5, np.nanmax(s.bt)+5)\n plt.yticks(fontsize=fsize-1) \n plt.tick_params( axis='x', labelbottom='off')\n #plt.ylim((-18, 18))\n \n #---------------- STEREO, Wind speed \n \n ax5 = plt.subplot2grid((7,2), (2, 1))\n plt.plot_date(w.time,w.vt,'-g',label='Wind',linewidth=0.7)\n plt.plot_date(s.time,s.vt,'-r',label='STEREO-A',linewidth=0.7)\n\n #ax5.legend(loc=1, fontsize=10)\n ax5.plot_date([time_now,time_now], [0,900],'-k', lw=0.5, alpha=0.8)\n ax5.set_xlim(time_now-days_window,time_now+days_window)\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.ylim((240, 750))\n plt.yticks(fontsize=fsize-1) \n ax5.set_xticklabels([])\n \n\n #ax7 = plt.subplot2grid((6,2), (5, 1))\n #plt.plot_date(s.time,s.vt,'-k',label='V',linewidth=0.7)\n #ax7.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8)\n #ax7.set_xlim(time_now-days_window,time_now+days_window)\n #ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n #plt.ylabel('V [km/s]',fontsize=fsize-1)\n #plt.tick_params(axis='x', labelbottom='off') \n #plt.ylim((240, 810))\n #plt.yticks(fontsize=fsize-1)\n #plt.xticks(fontsize=fsize)\n\n #---------------------- PSP speed\n\n ax3 = plt.subplot2grid((7,2), (3, 1))\n \n ax3.plot_date([time_now,time_now], [0,1000],'-k', lw=0.5, alpha=0.8)\n ax3.set_xticklabels([])\n ax3.set_xlim(time_now-days_window,time_now+days_window)\n ax3.set_ylim((240, 810))\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n ax3.set_xticklabels([])\n\n \n if np.size(p)>0:\n \n #plt.plot_date(p_tp,pv,'-k',label='V',linewidth=0.5)\n plt.plot_date(p.time,p.vt,'-k',label='V',linewidth=0.7)\n\n ax3.set_xlim(time_now-days_window,time_now+days_window)\n ax3.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8)\n ax3.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.ylim((240, 750))\n plt.yticks(fontsize=fsize-1)\n ax3.set_xticklabels([])\n\n\n \n\n #---------------------- PSP mag\n\n ax2 = plt.subplot2grid((7,2), (4, 1))\n ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8)\n ax2.set_xticklabels([])\n ax2.set_xlim(time_now-days_window,time_now+days_window)\n ax2.set_ylim((-18, 18))\n ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n\n #when there is data, plot:\n \n if np.size(p)>0:\n\n plt.plot_date(p.time,p.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(p.time,p.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(p.time,p.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(p.time,p.bt,'-k',label='Btotal',lw=0.5)\n\n ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8)\n ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax2.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax2.set_xlim(time_now-days_window,time_now+days_window)\n if np.isfinite(np.nanmin(-p.bt)): ax2.set_ylim(np.nanmin(-p.bt)-5, np.nanmax(p.bt)+5)\n ax2.set_xticklabels([])\n plt.yticks(fontsize=fsize-1)\n \n\n #---------------------- SolO mag\n\n ax7 = plt.subplot2grid((7,2), (5, 1))\n ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax7.set_xticklabels([])\n ax7.set_xlim(time_now-days_window,time_now+days_window)\n ax7.set_ylim((-18, 18))\n ax7.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n ax7.set_xticklabels([])\n\n #when there is data, plot:\n \n if np.size(o)>0:\n \n plt.plot_date(o.time,o.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(o.time,o.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(o.time,o.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(o.time,o.bt,'-k',label='Btotal',lw=0.5)\n \n ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax7.set_xlim(time_now-days_window,time_now+days_window)\n \n \n if np.isfinite(np.nanmax(o.bt)):\n ax7.set_ylim((np.nanmin(-o.bt)-5, np.nanmax(o.bt)+5)) \n else:\n ax7.set_ylim((-15, 15)) \n\n \n ax7.set_xticklabels([])\n plt.yticks(fontsize=fsize-1)\n \n\n \n #---------------------- Bepi mag\n\n ax8 = plt.subplot2grid((7,2), (6, 1))\n ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax8.set_xlim(time_now-days_window,time_now+days_window)\n ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax8.set_ylim((-18, 18))\n ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n\n if np.size(b)>0:\n\n plt.plot_date(b.time,b.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(b.time,b.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(b.time,b.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(b.time,b.bt,'-k',label='Btotal',lw=0.5)\n\n ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax8.set_xlim(time_now-days_window,time_now+days_window)\n \n if np.isfinite(np.nanmax(b.bt)):\n ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) \n else:\n ax8.set_ylim((-15, 15)) \n\n #ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5))\n plt.yticks(fontsize=fsize-1)\n\n plt.figtext(0.95,0.82,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.71,'STEREO-A', color='red', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.63,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.58,'STEREO-A', color='red', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.49,'PSP ', color='black', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.38,'PSP ', color='black', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.28,'Solar Orbiter', color='coral', ha='center',fontsize=fsize+5)\n plt.figtext(0.95,0.16,'BepiColombo', color='blue', ha='center',fontsize=fsize+5)\n\n\n\n ############################\n\n #plot text for date extra so it does not move \n #year\n f1=plt.figtext(0.45,0.93,frame_time_str[0:4], ha='center',color=backcolor,fontsize=fsize+6)\n #month\n f2=plt.figtext(0.45+0.04,0.93,frame_time_str[5:7], ha='center',color=backcolor,fontsize=fsize+6)\n #day\n f3=plt.figtext(0.45+0.08,0.93,frame_time_str[8:10], ha='center',color=backcolor,fontsize=fsize+6)\n #hours\n f4=plt.figtext(0.45+0.12,0.93,frame_time_str[11:13], ha='center',color=backcolor,fontsize=fsize+6)\n\n plt.figtext(0.02, 0.02,'Spacecraft trajectories '+frame+' 2D projection', fontsize=fsize-1, ha='left',color=backcolor)\t\n \n plt.figtext(0.32,0.02,'――― trajectory from - 60 days to + 60 days', color='black', ha='center',fontsize=fsize-1)\n\n #signature\n \n #BC MPO-MAG (IGEP/IWF/ISAS/IC)\n #auch für Solar Orbiter (MAG, IC), Parker (FIELDS, UCB), STA (IMPACT/PLASTIC, UNH, UCLA), Wind (MFI, SWE, NASA??) STA-HI (RAL)\n\n plt.figtext(0.85,0.02,'Data sources: BepiColombo: MPO-MAG (IGEP/IWF/ISAS/IC), PSP (FIELDS, UCB), Solar Orbiter (MAG, IC)', fontsize=fsize-2, ha='right',color=backcolor) \n\n\n \n #signature\n plt.figtext(0.99,0.01/2,'Möstl, Weiss, Bailey, Reiss / Helio4Cast', fontsize=fsize-4, ha='right',color=backcolor) \n\n #save figure\n framestr = '%05i' % (k) \n filename=outputdirectory+'/pos_anim_'+framestr+'.jpg' \n if k==0: print(filename)\n plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none')\n #plt.clf()\n #if close==True: plt.close('all')\n\n\n plt.close('all')\n\n\n\n ########################################### loop end",
"_____no_output_____"
],
[
"def make_frame2(k):\n '''\n loop each frame in multiprocessing\n '''\n \n\n fig=plt.figure(1, figsize=(19.2,10.8), dpi=100) #full hd\n #fig=plt.figure(1, figsize=(19.2*2,10.8*2), dpi=100) #4k\n ax = plt.subplot2grid((7,2), (0, 0), rowspan=7, projection='polar')\n backcolor='black'\n psp_color='black'\n bepi_color='blue'\n solo_color='coral'\n\n\n frame_time_str=str(mdates.num2date(frame_time_num+k*res_in_days))\n print( 'current frame_time_num', frame_time_str, ' ',k)\n\n #these have their own times\n dct=frame_time_num+k*res_in_days-psp.time\n psp_timeind=np.argmin(abs(dct))\n\n dct=frame_time_num+k*res_in_days-bepi.time\n bepi_timeind=np.argmin(abs(dct))\n\n dct=frame_time_num+k*res_in_days-solo.time\n solo_timeind=np.argmin(abs(dct))\n\n #all same times\n dct=frame_time_num+k*res_in_days-earth.time\n earth_timeind=np.argmin(abs(dct))\n\n #plot all positions including text R lon lat for some \n\n #white background\n\n ax.scatter(venus.lon[earth_timeind], venus.r[earth_timeind]*np.cos(venus.lat[earth_timeind]), s=symsize_planet, c='orange', alpha=1,lw=0,zorder=3)\n ax.scatter(mercury.lon[earth_timeind], mercury.r[earth_timeind]*np.cos(mercury.lat[earth_timeind]), s=symsize_planet, c='dimgrey', alpha=1,lw=0,zorder=3)\n ax.scatter(earth.lon[earth_timeind], earth.r[earth_timeind]*np.cos(earth.lat[earth_timeind]), s=symsize_planet, c='mediumseagreen', alpha=1,lw=0,zorder=3)\n ax.scatter(sta.lon[earth_timeind], sta.r[earth_timeind]*np.cos(sta.lat[earth_timeind]), s=symsize_spacecraft, c='red', marker='s', alpha=1,lw=0,zorder=3)\n ax.scatter(mars.lon[earth_timeind], mars.r[earth_timeind]*np.cos(mars.lat[earth_timeind]), s=symsize_planet, c='orangered', alpha=1,lw=0,zorder=3)\n\n\n\n\n #plot stereoa fov hi1/2 \n hp.plot_stereo_hi_fov(sta,frame_time_num, earth_timeind, ax,'A')\n\n\n\n #positions text\n f10=plt.figtext(0.01,0.93,' R lon lat', fontsize=fsize+2, ha='left',color=backcolor)\n\n if frame=='HEEQ': earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{0.0:8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}')\n else: earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(earth.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}')\n\n mars_text='Mars: '+str(f'{mars.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(mars.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(mars.lat[earth_timeind]):8.1f}')\n sta_text='STA: '+str(f'{sta.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(sta.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(sta.lat[earth_timeind]):8.1f}')\n\n #position and text \n if psp_timeind > 0:\n #plot trajectorie\n ax.scatter(psp.lon[psp_timeind], psp.r[psp_timeind]*np.cos(psp.lat[psp_timeind]), s=symsize_spacecraft, c=psp_color, marker='s', alpha=1,lw=0,zorder=3)\n #plot positiona as text\n psp_text='PSP: '+str(f'{psp.r[psp_timeind]:6.2f}')+str(f'{np.rad2deg(psp.lon[psp_timeind]):8.1f}')+str(f'{np.rad2deg(psp.lat[psp_timeind]):8.1f}')\n f5=plt.figtext(0.01,0.78,psp_text, fontsize=fsize, ha='left',color=psp_color)\n if plot_orbit: \n fadestart=psp_timeind-fadeind\n if fadestart < 0: fadestart=0\n ax.plot(psp.lon[fadestart:psp_timeind+fadeind], psp.r[fadestart:psp_timeind+fadeind]*np.cos(psp.lat[fadestart:psp_timeind+fadeind]), c=psp_color, alpha=0.6,lw=1,zorder=3)\n\n if bepi_timeind > 0:\n ax.scatter(bepi.lon[bepi_timeind], bepi.r[bepi_timeind]*np.cos(bepi.lat[bepi_timeind]), s=symsize_spacecraft, c=bepi_color, marker='s', alpha=1,lw=0,zorder=3)\n bepi_text='Bepi: '+str(f'{bepi.r[bepi_timeind]:6.2f}')+str(f'{np.rad2deg(bepi.lon[bepi_timeind]):8.1f}')+str(f'{np.rad2deg(bepi.lat[bepi_timeind]):8.1f}')\n f6=plt.figtext(0.01,0.74,bepi_text, fontsize=fsize, ha='left',color=bepi_color)\n if plot_orbit: \n fadestart=bepi_timeind-fadeind\n if fadestart < 0: fadestart=0 \n ax.plot(bepi.lon[fadestart:bepi_timeind+fadeind], bepi.r[fadestart:bepi_timeind+fadeind]*np.cos(bepi.lat[fadestart:bepi_timeind+fadeind]), c=bepi_color, alpha=0.6,lw=1,zorder=3)\n\n if solo_timeind > 0:\n ax.scatter(solo.lon[solo_timeind], solo.r[solo_timeind]*np.cos(solo.lat[solo_timeind]), s=symsize_spacecraft, c=solo_color, marker='s', alpha=1,lw=0,zorder=3)\n solo_text='SolO: '+str(f'{solo.r[solo_timeind]:6.2f}')+str(f'{np.rad2deg(solo.lon[solo_timeind]):8.1f}')+str(f'{np.rad2deg(solo.lat[solo_timeind]):8.1f}')\n f7=plt.figtext(0.01,0.7,solo_text, fontsize=fsize, ha='left',color=solo_color)\n if plot_orbit: \n fadestart=solo_timeind-fadeind\n if fadestart < 0: fadestart=0 \n ax.plot(solo.lon[fadestart:solo_timeind+fadeind], solo.r[fadestart:solo_timeind+fadeind]*np.cos(solo.lat[fadestart:solo_timeind+fadeind]), c=solo_color, alpha=0.6,lw=1,zorder=3)\n\n f10=plt.figtext(0.01,0.9,earth_text, fontsize=fsize, ha='left',color='mediumseagreen')\n f9=plt.figtext(0.01,0.86,mars_text, fontsize=fsize, ha='left',color='orangered')\n f8=plt.figtext(0.01,0.82,sta_text, fontsize=fsize, ha='left',color='red')\n \n \n \n\n ######################## 1 plot all active CME circles\n\n plot_hi_geo=True\n \n \n if plot_hi_geo:\n lamda=30\n #check for active CME indices from HIGeoCAT (with the lists produced above in this notebook)\n #check where time is identical to frame time\n cmeind=np.where(hc_time_num == frame_time_num+k*res_in_days)\n #print(cmeind)\n #plot all active CME circles\n #if np.size(cmeind) >0:\n for p in range(0,np.size(cmeind)):\n\n #print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]]\n #central d\n dir=np.array([np.cos(hc_lon[cmeind[0][p]]*np.pi/180),np.sin(hc_lon[cmeind[0][p]]*np.pi/180)])*hc_r[cmeind[0][p]]\n\n #points on circle, correct for longitude\n circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(hc_lon[cmeind[0][p]]*np.pi/180)\n\n #these equations are from moestl and davies 2013\n xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang)\n yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang)\n #now convert to polar coordinates\n rcirc=np.sqrt(xc**2+yc**2)\n longcirc=np.arctan2(yc,xc)\n #plot in correct color\n if hc_name[cmeind[0][p]] == 'A': \n #make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 - \n #so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events\n ax.plot(longcirc,rcirc, c='red', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) \n if hc_name[cmeind[0][p]] == 'B':\n ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) \n\n \n \n\n\n\n #set axes and grid\n ax.set_theta_zero_location('E')\n #plt.thetagrids(range(0,360,45),(u'0\\u00b0 '+frame+' longitude',u'45\\u00b0',u'90\\u00b0',u'135\\u00b0',u'+/- 180\\u00b0',u'- 135\\u00b0',u'- 90\\u00b0',u'- 45\\u00b0'), ha='right', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9)\n plt.thetagrids(range(0,360,45),(u'0\\u00b0',u'45\\u00b0',u'90\\u00b0',u'135\\u00b0',u'+/- 180\\u00b0',u'- 135\\u00b0',u'- 90\\u00b0',u'- 45\\u00b0'), ha='center', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9,zorder=4)\n\n\n #plt.rgrids((0.10,0.39,0.72,1.00,1.52),('0.10','0.39','0.72','1.0','1.52 AU'),angle=125, fontsize=fsize,alpha=0.9, color=backcolor)\n plt.rgrids((0.1,0.3,0.5,0.7,1.0),('0.10','0.3','0.5','0.7','1.0 AU'),angle=125, fontsize=fsize-3,alpha=0.5, color=backcolor)\n\n #ax.set_ylim(0, 1.75) #with Mars\n ax.set_ylim(0, 1.2) \n\n #Sun\n ax.scatter(0,0,s=100,c='yellow',alpha=1, edgecolors='black', linewidth=0.3)\n\n\n \n\n #------------------------------------------------ IN SITU DATA ------------------------------------------------------\n\n\n time_now=frame_time_num+k*res_in_days\n \n #cut data for plot window so faster\n \n windex1=np.where(w_time_num > time_now-days_window)[0][0]\n windex2=np.where(w_time_num > time_now+days_window)[0][0]\n w=w1[windex1:windex2]\n\n sindex1=np.where(s_time_num > time_now-days_window)[0][0]\n sindex2=np.where(s_time_num > time_now+days_window)[0][0]\n s=s1[sindex1:sindex2]\n\n #is data available from new missions?\n \n if p_time_num[-1] > time_now+days_window:\n pindex1=np.where(p_time_num > time_now-days_window)[0][0]\n pindex2=np.where(p_time_num > time_now+days_window)[0][0]\n #pindex2=np.size(p1)-1\n p=p1[pindex1:pindex2]\n elif np.logical_and((p_time_num[-1] < time_now+days_window),(p_time_num[-1] > time_now-days_window)):\n pindex1=np.where(p_time_num > time_now-days_window)[0][0]\n pindex2=np.size(p1)-1\n p=p1[pindex1:pindex2]\n else: p=[] \n\n \n if o_time_num[-1] > time_now+days_window:\n oindex1=np.where(o_time_num > time_now-days_window)[0][0]\n oindex2=np.where(o_time_num > time_now+days_window)[0][0]\n #use last index oindex2=np.size(o1)-1\n o=o1[oindex1:oindex2]\n elif np.logical_and((o_time_num[-1] < time_now+days_window),(o_time_num[-1] > time_now-days_window)):\n oindex1=np.where(o_time_num > time_now-days_window)[0][0]\n oindex2=np.size(o1)-1\n o=o1[oindex1:oindex2]\n else: o=[] \n\n\n if b_time_num[-1] > time_now+days_window:\n bindex1=np.where(b_time_num > time_now-days_window)[0][0]\n bindex2=np.where(b_time_num > time_now+days_window)[0][0]\n #bindex2=np.size(b1)-1\n b=b1[bindex1:bindex2]\n else: b=[] \n\n\n\n\n #---------------- Wind mag\n\n ax4 = plt.subplot2grid((7,2), (0, 1))\n #plt.plot_date(w_tm,wbx,'-r',label='BR',linewidth=0.5)\n #plt.plot_date(w_tm,wby,'-g',label='BT',linewidth=0.5)\n #plt.plot_date(w_tm,wbz,'-b',label='BN',linewidth=0.5)\n #plt.plot_date(w_tm,wbt,'-k',label='Btotal',lw=0.5)\n plt.plot_date(w.time,w.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(w.time,w.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(w.time,w.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(w.time,w.bt,'-k',label='Btotal',lw=0.5)\n\n ax4.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax4.set_ylabel('B [nT] HEEQ',fontsize=fsize-1)\n ax4.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax4.set_xlim(time_now-days_window,time_now+days_window)\n ax4.set_ylim(np.nanmin(-w.bt)-5, np.nanmax(w.bt)+5)\n #plt.ylim((-18, 18))\n plt.yticks(fontsize=fsize-1) \n ax4.set_xticklabels([])\n\n\n #---------------- STEREO-A mag\n\n ax6 = plt.subplot2grid((7,2), (1, 1))\n #plt.plot_date(s_tm,sbx,'-r',label='BR',linewidth=0.5)\n #plt.plot_date(s_tm,sby,'-g',label='BT',linewidth=0.5)\n #plt.plot_date(s_tm,sbz,'-b',label='BN',linewidth=0.5)\n #plt.plot_date(s_tm,sbt,'-k',label='Btotal')\n plt.plot_date(s.time,s.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(s.time,s.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(s.time,s.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(s.time,s.bt,'-k',label='Btotal',linewidth=0.5)\n\n ax6.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax6.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n #ax6.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax6.set_xlim(time_now-days_window,time_now+days_window)\n ax6.set_xticklabels([])\n ax6.set_ylim(np.nanmin(-s.bt)-5, np.nanmax(s.bt)+5)\n plt.yticks(fontsize=fsize-1) \n plt.tick_params( axis='x', labelbottom='off')\n #plt.ylim((-18, 18))\n \n #---------------- STEREO, Wind speed \n \n ax5 = plt.subplot2grid((7,2), (2, 1))\n plt.plot_date(w.time,w.vt,'-g',label='Wind',linewidth=0.7)\n plt.plot_date(s.time,s.vt,'-r',label='STEREO-A',linewidth=0.7)\n\n #ax5.legend(loc=1, fontsize=10)\n ax5.plot_date([time_now,time_now], [0,900],'-k', lw=0.5, alpha=0.8)\n ax5.set_xlim(time_now-days_window,time_now+days_window)\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.ylim((240, 750))\n plt.yticks(fontsize=fsize-1) \n ax5.set_xticklabels([])\n \n\n #ax7 = plt.subplot2grid((6,2), (5, 1))\n #plt.plot_date(s.time,s.vt,'-k',label='V',linewidth=0.7)\n #ax7.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8)\n #ax7.set_xlim(time_now-days_window,time_now+days_window)\n #ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n #plt.ylabel('V [km/s]',fontsize=fsize-1)\n #plt.tick_params(axis='x', labelbottom='off') \n #plt.ylim((240, 810))\n #plt.yticks(fontsize=fsize-1)\n #plt.xticks(fontsize=fsize)\n\n #---------------------- PSP speed\n\n ax3 = plt.subplot2grid((7,2), (3, 1))\n \n ax3.plot_date([time_now,time_now], [0,1000],'-k', lw=0.5, alpha=0.8)\n ax3.set_xticklabels([])\n ax3.set_xlim(time_now-days_window,time_now+days_window)\n ax3.set_ylim((240, 810))\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n ax3.set_xticklabels([])\n\n \n if np.size(p)>0:\n \n #plt.plot_date(p_tp,pv,'-k',label='V',linewidth=0.5)\n plt.plot_date(p.time,p.vt,'-k',label='V',linewidth=0.7)\n\n ax3.set_xlim(time_now-days_window,time_now+days_window)\n ax3.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8)\n ax3.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n plt.ylabel('V [km/s]',fontsize=fsize-1)\n plt.ylim((240, 750))\n plt.yticks(fontsize=fsize-1)\n ax3.set_xticklabels([])\n\n\n \n\n #---------------------- PSP mag\n\n ax2 = plt.subplot2grid((7,2), (4, 1))\n ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8)\n ax2.set_xticklabels([])\n ax2.set_xlim(time_now-days_window,time_now+days_window)\n ax2.set_ylim((-18, 18))\n ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n\n #when there is data, plot:\n \n if np.size(p)>0:\n\n plt.plot_date(p.time,p.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(p.time,p.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(p.time,p.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(p.time,p.bt,'-k',label='Btotal',lw=0.5)\n\n ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8)\n ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax2.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax2.set_xlim(time_now-days_window,time_now+days_window)\n if np.isfinite(np.nanmin(-p.bt)): ax2.set_ylim(np.nanmin(-p.bt)-5, np.nanmax(p.bt)+5)\n ax2.set_xticklabels([])\n plt.yticks(fontsize=fsize-1)\n \n\n #---------------------- SolO mag\n\n ax7 = plt.subplot2grid((7,2), (5, 1))\n ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax7.set_xticklabels([])\n ax7.set_xlim(time_now-days_window,time_now+days_window)\n ax7.set_ylim((-18, 18))\n ax7.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n ax7.set_xticklabels([])\n\n #when there is data, plot:\n \n if np.size(o)>0:\n \n plt.plot_date(o.time,o.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(o.time,o.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(o.time,o.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(o.time,o.bt,'-k',label='Btotal',lw=0.5)\n \n ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax7.set_xlim(time_now-days_window,time_now+days_window)\n \n \n if np.isfinite(np.nanmax(o.bt)):\n ax7.set_ylim((np.nanmin(-o.bt)-5, np.nanmax(o.bt)+5)) \n else:\n ax7.set_ylim((-15, 15)) \n\n \n ax7.set_xticklabels([])\n plt.yticks(fontsize=fsize-1)\n \n\n \n #---------------------- Bepi mag\n\n ax8 = plt.subplot2grid((7,2), (6, 1))\n ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax8.set_xlim(time_now-days_window,time_now+days_window)\n ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax8.set_ylim((-18, 18))\n ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n plt.yticks(fontsize=fsize-1)\n\n if np.size(b)>0:\n\n plt.plot_date(b.time,b.bx,'-r',label='BR',linewidth=0.5)\n plt.plot_date(b.time,b.by,'-g',label='BT',linewidth=0.5)\n plt.plot_date(b.time,b.bz,'-b',label='BN',linewidth=0.5)\n plt.plot_date(b.time,b.bt,'-k',label='Btotal',lw=0.5)\n\n ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8)\n ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1)\n ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') )\n ax8.set_xlim(time_now-days_window,time_now+days_window)\n \n if np.isfinite(np.nanmax(b.bt)):\n ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) \n else:\n ax8.set_ylim((-15, 15)) \n\n #ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5))\n plt.yticks(fontsize=fsize-1)\n\n plt.figtext(0.95,0.82,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.71,'STEREO-A', color='red', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.63,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.58,'STEREO-A', color='red', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.49,'PSP ', color='black', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.38,'PSP ', color='black', ha='center',fontsize=fsize+3)\n plt.figtext(0.95,0.28,'Solar Orbiter', color='coral', ha='center',fontsize=fsize+5)\n plt.figtext(0.95,0.16,'BepiColombo', color='blue', ha='center',fontsize=fsize+5)\n\n\n\n ############################\n\n #plot text for date extra so it does not move \n #year\n f1=plt.figtext(0.45,0.93,frame_time_str[0:4], ha='center',color=backcolor,fontsize=fsize+6)\n #month\n f2=plt.figtext(0.45+0.04,0.93,frame_time_str[5:7], ha='center',color=backcolor,fontsize=fsize+6)\n #day\n f3=plt.figtext(0.45+0.08,0.93,frame_time_str[8:10], ha='center',color=backcolor,fontsize=fsize+6)\n #hours\n f4=plt.figtext(0.45+0.12,0.93,frame_time_str[11:13], ha='center',color=backcolor,fontsize=fsize+6)\n\n plt.figtext(0.02, 0.02,'Spacecraft trajectories in '+frame+' coordinates', fontsize=fsize-1, ha='left',color=backcolor)\t\n \n plt.figtext(0.32,0.02,'――― trajectory from - 60 days to + 60 days', color='black', ha='center',fontsize=fsize-1)\n\n #signature\n \n #BC MPO-MAG (IGEP/IWF/ISAS/IC)\n #auch für Solar Orbiter (MAG, IC), Parker (FIELDS, UCB), STA (IMPACT/PLASTIC, UNH, UCLA), Wind (MFI, SWE, NASA??) STA-HI (RAL)\n\n plt.figtext(0.85,0.02,'Data sources: BepiColombo: MPO-MAG (IGEP/IWF/ISAS/IC), PSP (FIELDS, UCB), Solar Orbiter (MAG, IC)', fontsize=fsize-2, ha='right',color=backcolor) \n\n\n \n #signature\n plt.figtext(0.99,0.01/2,'Möstl, Weiss, Bailey, Reiss / Helio4Cast', fontsize=fsize-4, ha='right',color=backcolor) \n \n \n \n categories = np.array([0, 2, 1, 1, 1, 2, 0, 0])\n\n colormap = np.array(['r', 'g', 'b'])\n \n \n steps=60\n #parker spiral\n if plot_parker:\n for q in np.arange(0,steps):\n omega=2*np.pi/(sun_rot*60*60*24) #solar rotation in seconds\n v=400/AUkm #km/s\n r0=695000/AUkm\n r=v/omega*theta+r0*7\n windcolor=cmap.hot(w.vt[2315]/5)\n #print(windcolor)\n #print(w.vt[2315+q*10])\n ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/steps*q), r, alpha=0.1, lw=5.0,color=windcolor, zorder=2)\n #print(theta) \n \n #save figure\n framestr = '%05i' % (k) \n filename=outputdirectory+'/pos_anim_'+framestr+'.jpg' \n if k==0: print(filename)\n plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none')\n #plt.clf()\n #if close==True: plt.close('all')\n \n filename='lineups/pos_anim_'+framestr+'.png' \n plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none')\n \n filename='lineups/pos_anim_'+framestr+'.jpg' \n plt.savefig(filename,dpi=100,facecolor=fig.get_facecolor(), edgecolor='none')\n\n #plt.close('all')\n\n \n\n\n ########################################### loop end\n \n\n \n#for multipoint lineup paper\n#june event\n#make_frame2(3810)\n#nov event\nmake_frame2(10910)",
"_____no_output_____"
]
],
[
[
"### get data",
"_____no_output_____"
]
],
[
[
"get_data=1\n\nif get_data > 0:\n\n\n file=data_path+'wind_2018_now_heeq.p'\n [w,wh]=pickle.load(open(file, \"rb\" ) ) \n \n #function for spike removal, see list with times in that function\n w=hd.remove_wind_spikes_gaps(w)\n\n\n #cut with 2018 Oct 1\n wcut=np.where(w.time> parse_time('2018-10-01').datetime)[0][0]\n w=w[wcut:-1]\n\n #file=data_path+'stereoa_2007_2019_sceq.p'\n #[s,sh]=pickle.load(open(file, \"rb\" ) )\n #file=data_path+'stereoa_2019_now_sceq.p'\n \n \n\n ########### STA\n \n print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ \n filesta1='stereoa_2007_2020_rtn.p'\n sta1=pickle.load(open(data_path+filesta1, \"rb\" ) ) \n \n #beacon data\n #filesta2=\"stereoa_2019_2020_sceq_beacon.p\"\n #filesta2='stereoa_2019_2020_sept_sceq_beacon.p'\n #filesta2='stereoa_2019_now_sceq_beacon.p'\n #filesta2=\"stereoa_2020_august_november_rtn_beacon.p\" \n filesta2='stereoa_2020_now_sceq_beacon.p'\n [sta2,hsta2]=pickle.load(open(data_path+filesta2, \"rb\" ) ) \n #sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]\n\n #make array\n sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\\\n ('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\\\n ('x', float),('y', float),('z', float),\\\n ('r', float),('lat', float),('lon', float)]) \n\n #convert to recarray\n sta = sta.view(np.recarray) \n sta.time=np.hstack((sta1.time,sta2.time))\n sta.bx=np.hstack((sta1.bx,sta2.bx))\n sta.by=np.hstack((sta1.by,sta2.by))\n sta.bz=np.hstack((sta1.bz,sta2.bz))\n sta.bt=np.hstack((sta1.bt,sta2.bt))\n sta.vt=np.hstack((sta1.vt,sta2.vt))\n sta.np=np.hstack((sta1.np,sta2.np))\n sta.tp=np.hstack((sta1.tp,sta2.tp))\n sta.x=np.hstack((sta1.x,sta2.x))\n sta.y=np.hstack((sta1.y,sta2.y))\n sta.z=np.hstack((sta1.z,sta2.z))\n sta.r=np.hstack((sta1.r,sta2.r))\n sta.lon=np.hstack((sta1.lon,sta2.lon))\n sta.lat=np.hstack((sta1.lat,sta2.lat))\n print('STA Merging done')\n\n\n\n #cut with 2018 Oct 1\n scut=np.where(sta.time> parse_time('2018-10-01').datetime)[0][0]\n\n s=sta[scut:-1]\n \n \n ######### Bepi \n file=data_path+'bepi_2019_2021_rtn.p'\n b1=pickle.load(open(file, \"rb\" ) ) \n \n file=data_path+'bepi_2021_ib_rtn.p'\n b2=pickle.load(open(file, \"rb\" ) ) \n \n \n #make array\n b=np.zeros(np.size(b1.time)+np.size(b2.time),dtype=[('time',object),('bx', float),('by', float),\\\n ('bz', float),('bt', float),\\\n ('x', float),('y', float),('z', float),\\\n ('r', float),('lat', float),('lon', float)]) \n\n #convert to recarray\n b = b.view(np.recarray) \n b.time=np.hstack((b1.time,b2.time))\n b.bx=np.hstack((b1.bx,b2.bx))\n b.by=np.hstack((b1.by,b2.by))\n b.bz=np.hstack((b1.bz,b2.bz))\n b.bt=np.hstack((b1.bt,b2.bt))\n b.x=np.hstack((b1.x,b2.x))\n b.y=np.hstack((b1.y,b2.y))\n b.z=np.hstack((b1.z,b2.z))\n b.r=np.hstack((b1.r,b2.r))\n b.lon=np.hstack((b1.lon,b2.lon))\n b.lat=np.hstack((b1.lat,b2.lat))\n print('Bepi Merging done')\n\n \n\n #################################### PSP, SolO\n file=data_path+'psp_2018_2021_rtn.p'\n [p,ph]=pickle.load(open(file, \"rb\" ) ) \n \n file=data_path+'solo_2020_april_2021_july_rtn.p'\n o=pickle.load(open(file, \"rb\" ) ) \n\n \n #save data for faster use\n file='data/movie_data_aug21.p'\n pickle.dump([p,w,s,o,b], open(file, 'wb'))\n\n \nprint('load data from data/movie_data_aug21.p')\n[p1,w1,s1,o1,b1]=pickle.load(open('data/movie_data_aug21.p', \"rb\" ) ) \n\n\np_time_num=parse_time(p1.time).plot_date\nw_time_num=parse_time(w1.time).plot_date\ns_time_num=parse_time(s1.time).plot_date\no_time_num=parse_time(o1.time).plot_date\nb_time_num=parse_time(b1.time).plot_date\n\n\n\n#median filter psp speed because of spikes\np1.vt=medfilt(p1.vt,31)\n\nprint('done')",
"load and merge STEREO-A data SCEQ\nSTA Merging done\nBepi Merging done\nload data from data/movie_data_aug21.p\ndone\n"
]
],
[
[
"# Make movie",
"_____no_output_____"
],
[
"### Settings",
"_____no_output_____"
]
],
[
[
"plt.close('all')\n\n#Coordinate System\n#frame='HCI'\nframe='HEEQ'\nprint(frame)\n\n#sidereal solar rotation rate\nif frame=='HCI': sun_rot=24.47\n#synodic\nif frame=='HEEQ': sun_rot=26.24\n\nAUkm=149597870.7 \n\n#black background on or off\n#black=True\nblack=False\n\n#animation settings\nplot_orbit=True\n#plot_orbit=False\nplot_parker=True\n#plot_parker=False\n\nhigh_res_mode=False\n\n#orbit 1\n#outputdirectory='results/anim_plots_sc_insitu_final_orbit1'\n#animdirectory='results/anim_movie_sc_insitu_final_orbit1'\n#t_start ='2018-Oct-15'\n#t_end ='2018-Dec-06'\n\n#t_start ='2018-Dec-03'\n#t_end ='2018-Dec-06'\n#orbit all\n\n\n#from Parker start\n#outputdirectory='results/overview_movie_nov_2020_frames_2'\n#animdirectory='results/overview_movie_nov_2020_2'\n#t_start ='2018-Oct-25'\n#t_end ='2020-Apr-15'\n#res_in_days=1/24. #1hour =1/24\n#make time range\n#time_array = [ parse_time(t_start).datetime + timedelta(hours=1*n) \\\n# for n in range(int ((parse_time(t_end).datetime - parse_time(t_start).datetime).days*24))] \n\n\n\n######## from Solar Orbiter Start \noutputdirectory='results/overview_movie_apr21_sep21_frames'\nanimdirectory='results/overview_movie_apr21_sep21'\nt_start ='2021-Apr-1'\nt_end ='2021-Sep-30'\n#t_end ='2021-Jun-20'\nres_in_days=1/48. #1hour =1/24\n\n#make time range to see how much frames are needed \nstarttime = parse_time(t_start).datetime\nendtime = parse_time(t_end).datetime\nalltimes = []\nwhile starttime < endtime:\n alltimes.append(starttime)\n starttime += timedelta(days=res_in_days)\nk_all=np.size(alltimes)\n\ndays_window=3 #size of in situ timerange\n\n\nif os.path.isdir(outputdirectory) == False: os.mkdir(outputdirectory)\nif os.path.isdir(animdirectory) == False: os.mkdir(animdirectory)\n\npositions_plot_directory='results/plots_positions/'\nif os.path.isdir(positions_plot_directory) == False: os.mkdir(positions_plot_directory)\n\nprint(k_all)\n\n########## MAKE TRAJECTORIES\n#make_positions()\n\n\nprint('load positions')\n#load positions\n[psp, bepi, solo, sta, stb, messenger, ulysses, earth, venus, mars, mercury,jupiter, saturn, uranus, neptune,frame]=pickle.load( open( 'results/positions_HEEQ_1hr.p', \"rb\" ) )\n\nprint('load HIGEOCAT kinematics')\n[hc_time,hc_r,hc_lat,hc_lon,hc_name]=pickle.load(open('data/higeocat_kinematics.p', \"rb\"))\n\n\nprint('done')",
"HEEQ\n8736\nload positions\nload HIGEOCAT kinematics\ndone\n"
]
],
[
[
"## test animation frames",
"_____no_output_____"
]
],
[
[
"#for server\n#matplotlib.use('Qt5Agg')\n\n%matplotlib inline\n\nstart_time=time.time()\n\nprint()\nprint('make animation')\n\n#animation start time in matplotlib format\n\nframe_time_num=parse_time(t_start).plot_date\n\nsns.set_context('talk')\nif not black: sns.set_style('darkgrid'),#{'grid.linestyle': ':', 'grid.color': '.35'}) \nif black: sns.set_style('white',{'grid.linestyle': ':', 'grid.color': '.35'}) \n\n# animation settings \n\nfsize=13\nfadeind=int(60/res_in_days)\n\nsymsize_planet=110\nsymsize_spacecraft=80\n\n#for parker spiral \ntheta=np.arange(0,np.deg2rad(180),0.01)\n\n######################## make frames\n\n#for debugging\n#don't close plot in make_frame when testing\n\nmake_frame2(5500)\n\n#for i in np.arange(6454,6576,1):\n# make_frame(i)\n\nprint('done') \n",
"\nmake animation\ncurrent frame_time_num 2021-07-24 14:00:00+00:00 5500\ndone\n"
]
],
[
[
"## Make full movie",
"_____no_output_____"
]
],
[
[
"matplotlib.use('Agg')\n\nprint(k_all,' frames in total')\n\nprint()\n\n#number of processes depends on your machines memory; check with command line \"top\"\n#how much memory is used by all your processesii\nnr_of_processes_used=100\nprint('Using multiprocessing, nr of cores',multiprocessing.cpu_count(), \\\n 'with nr of processes used: ',nr_of_processes_used)\n\n#run multiprocessing pool to make all movie frames, depending only on frame number\npool = multiprocessing.Pool(processes=nr_of_processes_used)\ninput=[i for i in range(k_all)]\n#input=[i for i in np.arange(6721,6851,1)]\npool.map(make_frame, input)\npool.close()\n# pool.join()\n\n\nprint('time in min: ',np.round((time.time()-start_time)/60))\nprint('plots done, frames saved in ',outputdirectory)\n \n#os.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \\\n# -r 30 '+str(animdirectory)+'/overview_27nov_2020_from2018.mp4 -y -loglevel quiet')\n\n#os.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \\\n# -r 30 '+str(animdirectory)+'/overview_apr2020_jul2021.mp4 -y -loglevel quiet')\n\n\nos.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \\\n -r 30 '+str(animdirectory)+'/overview_apr2021_sep2021.mp4 -y -loglevel quiet')\n\n\n\nprint('movie done, saved in ',animdirectory)",
"8736 frames in total\n\nUsing multiprocessing, nr of cores 128 with nr of processes used: 100\nresults/overview_movie_apr21_sep21_frames/pos_anim_00000.jpg\ntime in min: 5.0\nplots done, frames saved in results/overview_movie_apr21_sep21_frames\nmovie done, saved in results/overview_movie_apr21_sep21\n"
]
],
[
[
"## Lineup event images",
"_____no_output_____"
]
],
[
[
"#load lineup catalog\nurl='lineups/HELIO4CAST_multipoint_v10.csv'\nlineups=pd.read_csv(url)\n\n\n#alltimes are the movie frame times\n\n#time of event 1\netime1=parse_time(lineups['event_start_time'][1]).datetime\neframe1=np.where(np.array(alltimes)> etime1)[0][0]\nmake_frame2(eframe1)\n\nplt.close('all')\n\netime2=parse_time(lineups['event_start_time'][6]).datetime\neframe2=np.where(np.array(alltimes)> etime2)[0][0]\nmake_frame2(eframe2)\nplt.close('all')\n\netime4=parse_time(lineups['event_start_time'][12]).datetime\neframe4=np.where(np.array(alltimes)> etime4)[0][0]\nmake_frame2(eframe4)\nplt.close('all')\n\netime4_2=parse_time(lineups['event_start_time'][11]).datetime\neframe4_2=np.where(np.array(alltimes)> etime4_2)[0][0]\nmake_frame2(eframe4_2)\nplt.close('all')\n\netime12=parse_time(lineups['event_start_time'][29]).datetime\neframe12=np.where(np.array(alltimes)> etime12)[0][0]\nmake_frame2(eframe12)\nplt.close('all')\n\n\n\n\n\n\n\n\n",
"current frame_time_num 2020-04-19 05:30:00+00:00 443\ncurrent frame_time_num 2020-05-29 21:30:00+00:00 2395\ncurrent frame_time_num 2020-06-29 10:00:00+00:00 3860\ncurrent frame_time_num 2020-06-25 16:00:00+00:00 3680\ncurrent frame_time_num 2020-12-01 02:30:00+00:00 11285\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a22b763c8ba5f67d1303b73e12246b8942abf8c
| 14,441 |
ipynb
|
Jupyter Notebook
|
gui.ipynb
|
beepscore/browsy
|
f4dbf476371fa5caad7aeb34a0f409ac3062523c
|
[
"MIT"
] | null | null | null |
gui.ipynb
|
beepscore/browsy
|
f4dbf476371fa5caad7aeb34a0f409ac3062523c
|
[
"MIT"
] | null | null | null |
gui.ipynb
|
beepscore/browsy
|
f4dbf476371fa5caad7aeb34a0f409ac3062523c
|
[
"MIT"
] | null | null | null | 31.190065 | 162 | 0.277682 |
[
[
[
"# TODO: consider experiment with ipywidgets\n# https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html\n# import ipywidgets as widgets\n\nimport scraper",
"_____no_output_____"
],
[
"search_string = 'segmentLink=17&instrument=OPTIDX&symbol=BANKNIFTY'\ndate_string = '31JAN2019'\nurl = scraper.url(search_string, date_string)\nprint(url)\n# https://www.nseindia.com/live_market/dynaContent/live_watch/option_chain/optionKeys.jsp?segmentLink=17&instrument=OPTIDX&symbol=BANKNIFTY&date=29NOV2018",
"https://www.nseindia.com/live_market/dynaContent/live_watch/option_chain/optionKeys.jsp?segmentLink=17&instrument=OPTIDX&symbol=BANKNIFTY&date=31JAN2019\n"
],
[
"css_id = \"octable\"\n\n# avoid duplicate column names\ncolumn_names = ['c_oi', 'c_chng_in_oi', 'c_volume', 'c_iv', 'c_ltp',\n 'c_net_chng', 'c_bid_qty', 'c_bid_price', 'c_ask_price', 'c_ask_qty',\n 'strike price',\n 'p_bid_qty', 'p_bid_price', 'p_ask_price', 'p_ask_qty',\n 'p_net chng', 'p_ltp', 'p_iv', 'p_volume', 'p_chng_in_oi', 'p_oi']\n\ndf = scraper.get_dataframe(url, css_id, column_names)\ndf.head(10)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a22bce79397a794e8ed24167c7d81eea7d65637
| 567,153 |
ipynb
|
Jupyter Notebook
|
examples/cevae_example.ipynb
|
ibraaaa/causalml
|
340ff52488af5efd01d4a633107e231c88b8307d
|
[
"Apache-2.0"
] | 2,919 |
2019-08-12T23:02:10.000Z
|
2022-03-31T21:59:34.000Z
|
examples/cevae_example.ipynb
|
ibraaaa/causalml
|
340ff52488af5efd01d4a633107e231c88b8307d
|
[
"Apache-2.0"
] | 317 |
2019-08-13T14:16:22.000Z
|
2022-03-26T08:44:06.000Z
|
examples/cevae_example.ipynb
|
ibraaaa/causalml
|
340ff52488af5efd01d4a633107e231c88b8307d
|
[
"Apache-2.0"
] | 466 |
2019-08-18T01:45:14.000Z
|
2022-03-31T08:11:53.000Z
| 102.63355 | 92,308 | 0.783055 |
[
[
[
"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport torch\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom lightgbm import LGBMRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error as mse\nfrom scipy.stats import entropy\nimport warnings\nimport logging\n\nfrom causalml.inference.meta import BaseXRegressor, BaseRRegressor, BaseSRegressor, BaseTRegressor\nfrom causalml.inference.nn import CEVAE\nfrom causalml.propensity import ElasticNetPropensityModel\nfrom causalml.metrics import *\nfrom causalml.dataset import simulate_hidden_confounder\n\n%matplotlib inline\n\nwarnings.filterwarnings('ignore')\nlogger = logging.getLogger('causalml')\nlogger.setLevel(logging.DEBUG)\n\nplt.style.use('fivethirtyeight')\nsns.set_palette('Paired')\nplt.rcParams['figure.figsize'] = (12,8)",
"_____no_output_____"
]
],
[
[
"# IHDP semi-synthetic dataset\n\nHill introduced a semi-synthetic dataset constructed from the Infant Health\nand Development Program (IHDP). This dataset is based on a randomized experiment\ninvestigating the effect of home visits by specialists on future cognitive scores. The IHDP simulation is considered the de-facto standard benchmark for neural network treatment effect\nestimation methods.",
"_____no_output_____"
]
],
[
[
"# load all ihadp data\ndf = pd.DataFrame()\nfor i in range(1, 10):\n data = pd.read_csv('./data/ihdp_npci_' + str(i) + '.csv', header=None)\n df = pd.concat([data, df])\ncols = [\"treatment\", \"y_factual\", \"y_cfactual\", \"mu0\", \"mu1\"] + [i for i in range(25)]\ndf.columns = cols\nprint(df.shape)\n\n# replicate the data 100 times\nreplications = 100\ndf = pd.concat([df]*replications, ignore_index=True)\nprint(df.shape)",
"(6723, 30)\n(672300, 30)\n"
],
[
"# set which features are binary\nbinfeats = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\n# set which features are continuous\ncontfeats = [i for i in range(25) if i not in binfeats]\n\n# reorder features with binary first and continuous after\nperm = binfeats + contfeats",
"_____no_output_____"
],
[
"df = df.reset_index(drop=True)\ndf.head()",
"_____no_output_____"
],
[
"X = df[perm].values\ntreatment = df['treatment'].values\ny = df['y_factual'].values\ny_cf = df['y_cfactual'].values\ntau = df.apply(lambda d: d['y_factual'] - d['y_cfactual'] if d['treatment']==1 \n else d['y_cfactual'] - d['y_factual'], \n axis=1)\nmu_0 = df['mu0'].values\nmu_1 = df['mu1'].values",
"_____no_output_____"
],
[
"# seperate for train and test\nitr, ite = train_test_split(np.arange(X.shape[0]), test_size=0.2, random_state=1)\nX_train, treatment_train, y_train, y_cf_train, tau_train, mu_0_train, mu_1_train = X[itr], treatment[itr], y[itr], y_cf[itr], tau[itr], mu_0[itr], mu_1[itr]\nX_val, treatment_val, y_val, y_cf_val, tau_val, mu_0_val, mu_1_val = X[ite], treatment[ite], y[ite], y_cf[ite], tau[ite], mu_0[ite], mu_1[ite]",
"_____no_output_____"
]
],
[
[
"## CEVAE Model",
"_____no_output_____"
]
],
[
[
"# cevae model settings\noutcome_dist = \"normal\"\nlatent_dim = 20\nhidden_dim = 200\nnum_epochs = 5\nbatch_size = 1000\nlearning_rate = 0.001\nlearning_rate_decay = 0.01\nnum_layers = 2",
"_____no_output_____"
],
[
"cevae = CEVAE(outcome_dist=outcome_dist,\n latent_dim=latent_dim,\n hidden_dim=hidden_dim,\n num_epochs=num_epochs,\n batch_size=batch_size,\n learning_rate=learning_rate,\n learning_rate_decay=learning_rate_decay,\n num_layers=num_layers)",
"_____no_output_____"
],
[
"# fit\nlosses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float),\n treatment=torch.tensor(treatment_train, dtype=torch.float),\n y=torch.tensor(y_train, dtype=torch.float))",
"INFO \t Training with 538 minibatches per epoch\nDEBUG \t step 0 loss = 1021.35\nDEBUG \t step 1 loss = 421.484\nDEBUG \t step 2 loss = 338.296\nDEBUG \t step 3 loss = 319.514\nDEBUG \t step 4 loss = 217.484\nDEBUG \t step 5 loss = 237.474\nDEBUG \t step 6 loss = 242.367\nDEBUG \t step 7 loss = 236.713\nDEBUG \t step 8 loss = 200.399\nDEBUG \t step 9 loss = 201.788\nDEBUG \t step 10 loss = 220.049\nDEBUG \t step 11 loss = 213.79\nDEBUG \t step 12 loss = 190.921\nDEBUG \t step 13 loss = 196.359\nDEBUG \t step 14 loss = 189.747\nDEBUG \t step 15 loss = 167.321\nDEBUG \t step 16 loss = 159.207\nDEBUG \t step 17 loss = 154.599\nDEBUG \t step 18 loss = 150.961\nDEBUG \t step 19 loss = 149.938\nDEBUG \t step 20 loss = 134.768\nDEBUG \t step 21 loss = 140.833\nDEBUG \t step 22 loss = 146.769\nDEBUG \t step 23 loss = 132.524\nDEBUG \t step 24 loss = 134.194\nDEBUG \t step 25 loss = 130.618\nDEBUG \t step 26 loss = 136.787\nDEBUG \t step 27 loss = 126.727\nDEBUG \t step 28 loss = 120.942\nDEBUG \t step 29 loss = 118.619\nDEBUG \t step 30 loss = 120.946\nDEBUG \t step 31 loss = 110.782\nDEBUG \t step 32 loss = 120.907\nDEBUG \t step 33 loss = 106.87\nDEBUG \t step 34 loss = 95.3908\nDEBUG \t step 35 loss = 104.229\nDEBUG \t step 36 loss = 100.688\nDEBUG \t step 37 loss = 102.31\nDEBUG \t step 38 loss = 96.3181\nDEBUG \t step 39 loss = 92.0119\nDEBUG \t step 40 loss = 101.374\nDEBUG \t step 41 loss = 95.1874\nDEBUG \t step 42 loss = 91.693\nDEBUG \t step 43 loss = 83.7838\nDEBUG \t step 44 loss = 76.9446\nDEBUG \t step 45 loss = 77.8403\nDEBUG \t step 46 loss = 81.372\nDEBUG \t step 47 loss = 82.7198\nDEBUG \t step 48 loss = 72.8519\nDEBUG \t step 49 loss = 76.6569\nDEBUG \t step 50 loss = 75.7397\nDEBUG \t step 51 loss = 79.6319\nDEBUG \t step 52 loss = 79.2719\nDEBUG \t step 53 loss = 74.6354\nDEBUG \t step 54 loss = 68.5501\nDEBUG \t step 55 loss = 72.5121\nDEBUG \t step 56 loss = 65.3819\nDEBUG \t step 57 loss = 68.0494\nDEBUG \t step 58 loss = 69.0703\nDEBUG \t step 59 loss = 67.7917\nDEBUG \t step 60 loss = 66.9287\nDEBUG \t step 61 loss = 58.5794\nDEBUG \t step 62 loss = 59.4718\nDEBUG \t step 63 loss = 62.9541\nDEBUG \t step 64 loss = 60.0412\nDEBUG \t step 65 loss = 57.8926\nDEBUG \t step 66 loss = 57.5324\nDEBUG \t step 67 loss = 56.5494\nDEBUG \t step 68 loss = 52.2587\nDEBUG \t step 69 loss = 55.7073\nDEBUG \t step 70 loss = 54.979\nDEBUG \t step 71 loss = 55.4208\nDEBUG \t step 72 loss = 54.7927\nDEBUG \t step 73 loss = 49.0343\nDEBUG \t step 74 loss = 53.8712\nDEBUG \t step 75 loss = 50.4505\nDEBUG \t step 76 loss = 49.2015\nDEBUG \t step 77 loss = 49.1161\nDEBUG \t step 78 loss = 51.0351\nDEBUG \t step 79 loss = 47.8925\nDEBUG \t step 80 loss = 48.4682\nDEBUG \t step 81 loss = 47.0941\nDEBUG \t step 82 loss = 44.807\nDEBUG \t step 83 loss = 43.6143\nDEBUG \t step 84 loss = 48.9903\nDEBUG \t step 85 loss = 46.6454\nDEBUG \t step 86 loss = 46.2746\nDEBUG \t step 87 loss = 47.5599\nDEBUG \t step 88 loss = 45.7764\nDEBUG \t step 89 loss = 42.9916\nDEBUG \t step 90 loss = 43.2444\nDEBUG \t step 91 loss = 43.616\nDEBUG \t step 92 loss = 41.0364\nDEBUG \t step 93 loss = 40.7751\nDEBUG \t step 94 loss = 39.693\nDEBUG \t step 95 loss = 41.2092\nDEBUG \t step 96 loss = 41.3535\nDEBUG \t step 97 loss = 39.0969\nDEBUG \t step 98 loss = 39.176\nDEBUG \t step 99 loss = 41.4575\nDEBUG \t step 100 loss = 40.5371\nDEBUG \t step 101 loss = 39.4805\nDEBUG \t step 102 loss = 37.7776\nDEBUG \t step 103 loss = 36.5425\nDEBUG \t step 104 loss = 37.3177\nDEBUG \t step 105 loss = 37.9773\nDEBUG \t step 106 loss = 36.8961\nDEBUG \t step 107 loss = 36.6936\nDEBUG \t step 108 loss = 35.1503\nDEBUG \t step 109 loss = 37.8622\nDEBUG \t step 110 loss = 36.6135\nDEBUG \t step 111 loss = 34.6556\nDEBUG \t step 112 loss = 32.9034\nDEBUG \t step 113 loss = 35.928\nDEBUG \t step 114 loss = 35.6375\nDEBUG \t step 115 loss = 34.8875\nDEBUG \t step 116 loss = 32.4369\nDEBUG \t step 117 loss = 35.5889\nDEBUG \t step 118 loss = 33.3445\nDEBUG \t step 119 loss = 35.3891\nDEBUG \t step 120 loss = 32.7132\nDEBUG \t step 121 loss = 32.4759\nDEBUG \t step 122 loss = 33.143\nDEBUG \t step 123 loss = 31.3498\nDEBUG \t step 124 loss = 31.6331\nDEBUG \t step 125 loss = 33.2434\nDEBUG \t step 126 loss = 31.1028\nDEBUG \t step 127 loss = 32.8674\nDEBUG \t step 128 loss = 32.8578\nDEBUG \t step 129 loss = 32.625\nDEBUG \t step 130 loss = 31.8448\nDEBUG \t step 131 loss = 30.8554\nDEBUG \t step 132 loss = 31.9763\nDEBUG \t step 133 loss = 29.6616\nDEBUG \t step 134 loss = 30.0425\nDEBUG \t step 135 loss = 30.836\nDEBUG \t step 136 loss = 31.0736\nDEBUG \t step 137 loss = 30.8878\nDEBUG \t step 138 loss = 30.43\nDEBUG \t step 139 loss = 30.6093\nDEBUG \t step 140 loss = 30.7339\nDEBUG \t step 141 loss = 30.0207\nDEBUG \t step 142 loss = 29.3626\nDEBUG \t step 143 loss = 29.7463\nDEBUG \t step 144 loss = 29.4184\nDEBUG \t step 145 loss = 29.2421\nDEBUG \t step 146 loss = 29.7529\nDEBUG \t step 147 loss = 29.3111\nDEBUG \t step 148 loss = 28.7811\nDEBUG \t step 149 loss = 29.3185\nDEBUG \t step 150 loss = 28.3709\nDEBUG \t step 151 loss = 30.2563\nDEBUG \t step 152 loss = 29.5989\nDEBUG \t step 153 loss = 28.8563\nDEBUG \t step 154 loss = 27.3948\nDEBUG \t step 155 loss = 28.3484\nDEBUG \t step 156 loss = 29.0616\nDEBUG \t step 157 loss = 28.8883\nDEBUG \t step 158 loss = 27.0463\nDEBUG \t step 159 loss = 27.3796\nDEBUG \t step 160 loss = 29.0732\nDEBUG \t step 161 loss = 26.8263\nDEBUG \t step 162 loss = 27.2883\nDEBUG \t step 163 loss = 28.6272\nDEBUG \t step 164 loss = 26.7478\nDEBUG \t step 165 loss = 27.6244\nDEBUG \t step 166 loss = 26.3508\nDEBUG \t step 167 loss = 26.1734\nDEBUG \t step 168 loss = 26.4877\nDEBUG \t step 169 loss = 26.9542\nDEBUG \t step 170 loss = 27.5395\nDEBUG \t step 171 loss = 26.4924\nDEBUG \t step 172 loss = 26.2203\nDEBUG \t step 173 loss = 26.039\nDEBUG \t step 174 loss = 25.7883\nDEBUG \t step 175 loss = 25.7104\nDEBUG \t step 176 loss = 25.9135\nDEBUG \t step 177 loss = 25.8419\nDEBUG \t step 178 loss = 26.897\nDEBUG \t step 179 loss = 24.8235\nDEBUG \t step 180 loss = 25.8669\nDEBUG \t step 181 loss = 26.442\nDEBUG \t step 182 loss = 24.7512\nDEBUG \t step 183 loss = 25.4444\nDEBUG \t step 184 loss = 25.7225\nDEBUG \t step 185 loss = 24.9703\nDEBUG \t step 186 loss = 25.5197\nDEBUG \t step 187 loss = 25.3311\nDEBUG \t step 188 loss = 25.0711\nDEBUG \t step 189 loss = 25.5542\nDEBUG \t step 190 loss = 25.2289\nDEBUG \t step 191 loss = 24.9589\nDEBUG \t step 192 loss = 24.5436\nDEBUG \t step 193 loss = 24.4451\nDEBUG \t step 194 loss = 23.3428\nDEBUG \t step 195 loss = 24.6046\nDEBUG \t step 196 loss = 25.1871\nDEBUG \t step 197 loss = 24.1005\nDEBUG \t step 198 loss = 24.287\nDEBUG \t step 199 loss = 24.4165\nDEBUG \t step 200 loss = 24.5855\nDEBUG \t step 201 loss = 23.2874\nDEBUG \t step 202 loss = 23.8787\nDEBUG \t step 203 loss = 24.5806\nDEBUG \t step 204 loss = 24.0906\nDEBUG \t step 205 loss = 25.0818\nDEBUG \t step 206 loss = 23.9177\nDEBUG \t step 207 loss = 25.0566\nDEBUG \t step 208 loss = 23.0722\nDEBUG \t step 209 loss = 23.8822\nDEBUG \t step 210 loss = 24.3339\nDEBUG \t step 211 loss = 24.7321\nDEBUG \t step 212 loss = 22.9672\nDEBUG \t step 213 loss = 23.6966\nDEBUG \t step 214 loss = 23.0869\nDEBUG \t step 215 loss = 23.5599\nDEBUG \t step 216 loss = 23.6307\nDEBUG \t step 217 loss = 23.1928\nDEBUG \t step 218 loss = 23.9375\nDEBUG \t step 219 loss = 23.65\nDEBUG \t step 220 loss = 22.5324\nDEBUG \t step 221 loss = 23.7082\nDEBUG \t step 222 loss = 22.854\nDEBUG \t step 223 loss = 21.8886\nDEBUG \t step 224 loss = 23.4573\nDEBUG \t step 225 loss = 22.4752\nDEBUG \t step 226 loss = 22.2281\nDEBUG \t step 227 loss = 22.6597\nDEBUG \t step 228 loss = 22.8313\nDEBUG \t step 229 loss = 22.8756\nDEBUG \t step 230 loss = 22.1289\nDEBUG \t step 231 loss = 22.6235\nDEBUG \t step 232 loss = 22.0739\nDEBUG \t step 233 loss = 22.7643\nDEBUG \t step 234 loss = 21.5396\nDEBUG \t step 235 loss = 21.5537\nDEBUG \t step 236 loss = 21.8743\nDEBUG \t step 237 loss = 22.6117\nDEBUG \t step 238 loss = 22.8206\nDEBUG \t step 239 loss = 22.8641\nDEBUG \t step 240 loss = 22.5666\n"
],
[
"# predict\nite_train = cevae.predict(X_train)\nite_val = cevae.predict(X_val)",
"INFO \t Evaluating 538 minibatches\nDEBUG \t batch ate = 0.62191\nDEBUG \t batch ate = 0.613137\nDEBUG \t batch ate = 0.688279\nDEBUG \t batch ate = 0.530233\nDEBUG \t batch ate = 0.814089\nDEBUG \t batch ate = 0.623182\nDEBUG \t batch ate = 0.657884\nDEBUG \t batch ate = 0.594205\nDEBUG \t batch ate = 0.319953\nDEBUG \t batch ate = 0.557599\nDEBUG \t batch ate = 0.718177\nDEBUG \t batch ate = 0.441256\nDEBUG \t batch ate = 0.654653\nDEBUG \t batch ate = 0.70725\nDEBUG \t batch ate = 0.715862\nDEBUG \t batch ate = 0.193786\nDEBUG \t batch ate = 0.557451\nDEBUG \t batch ate = 0.788378\nDEBUG \t batch ate = 0.605489\nDEBUG \t batch ate = 0.669786\nDEBUG \t batch ate = 0.852794\nDEBUG \t batch ate = 0.755987\nDEBUG \t batch ate = 0.510262\nDEBUG \t batch ate = 0.502153\nDEBUG \t batch ate = 0.254691\nDEBUG \t batch ate = 0.369999\nDEBUG \t batch ate = 0.59401\nDEBUG \t batch ate = 0.608015\nDEBUG \t batch ate = 0.661765\nDEBUG \t batch ate = 0.25462\nDEBUG \t batch ate = 0.771231\nDEBUG \t batch ate = 0.530303\nDEBUG \t batch ate = 0.566246\nDEBUG \t batch ate = 0.683882\nDEBUG \t batch ate = 0.616635\nDEBUG \t batch ate = 0.324804\nDEBUG \t batch ate = 0.383451\nDEBUG \t batch ate = 0.690402\nDEBUG \t batch ate = 0.558513\nDEBUG \t batch ate = 0.618007\nDEBUG \t batch ate = 0.551096\nDEBUG \t batch ate = 0.462644\nDEBUG \t batch ate = 0.615761\nDEBUG \t batch ate = 0.543891\nDEBUG \t batch ate = 0.432806\nDEBUG \t batch ate = 0.562174\nDEBUG \t batch ate = 0.654926\nDEBUG \t batch ate = 0.421796\nDEBUG \t batch ate = 0.719893\nDEBUG \t batch ate = 0.454017\nDEBUG \t batch ate = 0.699385\nDEBUG \t batch ate = 0.54048\nDEBUG \t batch ate = 0.333772\nDEBUG \t batch ate = 0.737522\nDEBUG \t batch ate = 0.5696\nDEBUG \t batch ate = 0.467629\nDEBUG \t batch ate = 0.601579\nDEBUG \t batch ate = 0.509313\nDEBUG \t batch ate = 0.385523\nDEBUG \t batch ate = 0.510085\nDEBUG \t batch ate = 0.661952\nDEBUG \t batch ate = 0.600664\nDEBUG \t batch ate = 0.066584\nDEBUG \t batch ate = 0.552528\nDEBUG \t batch ate = 0.467475\nDEBUG \t batch ate = 0.539326\nDEBUG \t batch ate = 0.694311\nDEBUG \t batch ate = 0.198014\nDEBUG \t batch ate = 0.61709\nDEBUG \t batch ate = 0.408558\nDEBUG \t batch ate = 0.684187\nDEBUG \t batch ate = 0.447501\nDEBUG \t batch ate = 0.347885\nDEBUG \t batch ate = 0.561035\nDEBUG \t batch ate = 0.617192\nDEBUG \t batch ate = 0.81278\nDEBUG \t batch ate = 0.61961\nDEBUG \t batch ate = 1.01213\nDEBUG \t batch ate = 0.345585\nDEBUG \t batch ate = 0.51818\nDEBUG \t batch ate = 0.436719\nDEBUG \t batch ate = 0.604546\nDEBUG \t batch ate = 0.706353\nDEBUG \t batch ate = 0.661419\nDEBUG \t batch ate = 0.787418\nDEBUG \t batch ate = 0.61231\nDEBUG \t batch ate = 0.629355\nDEBUG \t batch ate = 0.550861\nDEBUG \t batch ate = 0.472948\nDEBUG \t batch ate = 0.594738\nDEBUG \t batch ate = 0.844747\nDEBUG \t batch ate = 0.682486\nDEBUG \t batch ate = 0.607738\nDEBUG \t batch ate = 0.49322\nDEBUG \t batch ate = 0.547857\nDEBUG \t batch ate = 0.255665\nDEBUG \t batch ate = 0.564768\nDEBUG \t batch ate = 0.34345\nDEBUG \t batch ate = 0.40075\nDEBUG \t batch ate = 0.72982\nDEBUG \t batch ate = 0.878728\nDEBUG \t batch ate = 0.860621\nDEBUG \t batch ate = 0.544359\nDEBUG \t batch ate = 0.777127\nDEBUG \t batch ate = 0.590297\nDEBUG \t batch ate = 0.880415\nDEBUG \t batch ate = 0.67375\nDEBUG \t batch ate = 0.784914\nDEBUG \t batch ate = 0.511374\nDEBUG \t batch ate = 0.327954\nDEBUG \t batch ate = 0.628989\nDEBUG \t batch ate = 0.529468\nDEBUG \t batch ate = 0.688235\nDEBUG \t batch ate = 0.872871\nDEBUG \t batch ate = 0.3485\nDEBUG \t batch ate = 0.572016\nDEBUG \t batch ate = 0.565154\nDEBUG \t batch ate = 0.588927\nDEBUG \t batch ate = 0.520636\nDEBUG \t batch ate = 0.345301\nDEBUG \t batch ate = 0.611386\nDEBUG \t batch ate = 0.702772\nDEBUG \t batch ate = 0.764302\nDEBUG \t batch ate = 0.638517\nDEBUG \t batch ate = 0.498749\nDEBUG \t batch ate = 0.922372\nDEBUG \t batch ate = 0.648347\nDEBUG \t batch ate = 0.930839\nDEBUG \t batch ate = 0.841956\nDEBUG \t batch ate = 0.687886\nDEBUG \t batch ate = 0.804776\nDEBUG \t batch ate = 0.550305\nDEBUG \t batch ate = 0.625526\nDEBUG \t batch ate = 0.856957\nDEBUG \t batch ate = 0.470616\nDEBUG \t batch ate = 0.507122\nDEBUG \t batch ate = 0.358198\nDEBUG \t batch ate = 0.6335\nDEBUG \t batch ate = 0.473881\nDEBUG \t batch ate = 0.415356\nDEBUG \t batch ate = 0.309733\nDEBUG \t batch ate = 0.290068\nDEBUG \t batch ate = 0.470317\nDEBUG \t batch ate = 0.668486\nDEBUG \t batch ate = 0.580281\nDEBUG \t batch ate = 0.772137\nDEBUG \t batch ate = 0.490976\nDEBUG \t batch ate = 0.511012\nDEBUG \t batch ate = 0.441551\nDEBUG \t batch ate = 0.575225\nDEBUG \t batch ate = 0.591247\nDEBUG \t batch ate = 0.368313\nDEBUG \t batch ate = 0.350138\nDEBUG \t batch ate = 0.603038\nDEBUG \t batch ate = 0.241947\nDEBUG \t batch ate = 0.599275\nDEBUG \t batch ate = 0.41003\nDEBUG \t batch ate = 0.447525\nDEBUG \t batch ate = 0.79099\nDEBUG \t batch ate = 0.506499\nDEBUG \t batch ate = 0.61826\nDEBUG \t batch ate = 0.651964\nDEBUG \t batch ate = 0.52761\nDEBUG \t batch ate = 0.888067\nDEBUG \t batch ate = 0.367077\nDEBUG \t batch ate = 0.524761\nDEBUG \t batch ate = 0.6165\nDEBUG \t batch ate = 0.72863\nDEBUG \t batch ate = 0.516559\nDEBUG \t batch ate = 0.385291\nDEBUG \t batch ate = 0.660073\nDEBUG \t batch ate = 0.465947\nDEBUG \t batch ate = 0.586065\nDEBUG \t batch ate = 0.533599\nDEBUG \t batch ate = 0.916433\nDEBUG \t batch ate = 0.658235\nDEBUG \t batch ate = 0.770213\nDEBUG \t batch ate = 0.634768\nDEBUG \t batch ate = 0.887955\nDEBUG \t batch ate = 0.374664\nDEBUG \t batch ate = 0.649699\nDEBUG \t batch ate = 0.550386\nDEBUG \t batch ate = 0.516355\nDEBUG \t batch ate = 0.425265\nDEBUG \t batch ate = 0.264789\nDEBUG \t batch ate = 0.775339\nDEBUG \t batch ate = 0.636203\nDEBUG \t batch ate = 0.507562\nDEBUG \t batch ate = 0.885973\nDEBUG \t batch ate = 0.951861\nDEBUG \t batch ate = 0.370282\nDEBUG \t batch ate = 0.69922\nDEBUG \t batch ate = 0.956577\nDEBUG \t batch ate = 0.789856\nDEBUG \t batch ate = 0.726278\nDEBUG \t batch ate = 0.165073\nDEBUG \t batch ate = 0.530907\nDEBUG \t batch ate = 0.602567\nDEBUG \t batch ate = 0.682041\nDEBUG \t batch ate = 0.54427\nDEBUG \t batch ate = 0.787318\nDEBUG \t batch ate = 0.491623\nDEBUG \t batch ate = 0.794449\nDEBUG \t batch ate = 0.928849\nDEBUG \t batch ate = 0.771662\nDEBUG \t batch ate = 0.722534\nDEBUG \t batch ate = 0.611424\nDEBUG \t batch ate = 0.754558\nDEBUG \t batch ate = 0.466829\nDEBUG \t batch ate = 0.623566\nDEBUG \t batch ate = 0.595247\nDEBUG \t batch ate = 0.790067\nDEBUG \t batch ate = 0.218814\nDEBUG \t batch ate = 0.551078\nDEBUG \t batch ate = 0.561368\nDEBUG \t batch ate = 0.823733\nDEBUG \t batch ate = 0.725582\nDEBUG \t batch ate = 0.685417\nDEBUG \t batch ate = 0.573616\nDEBUG \t batch ate = 0.408314\nDEBUG \t batch ate = 0.420605\nDEBUG \t batch ate = 0.699393\nDEBUG \t batch ate = 0.485361\nDEBUG \t batch ate = 0.470607\nDEBUG \t batch ate = 0.672379\nDEBUG \t batch ate = 0.515571\nDEBUG \t batch ate = 0.837184\nDEBUG \t batch ate = 0.383294\nDEBUG \t batch ate = 0.631237\nDEBUG \t batch ate = 0.660588\nDEBUG \t batch ate = 0.454409\nDEBUG \t batch ate = 0.277474\nDEBUG \t batch ate = 1.08705\nDEBUG \t batch ate = 0.542072\nDEBUG \t batch ate = 0.667987\nDEBUG \t batch ate = 0.474515\nDEBUG \t batch ate = 0.462981\nDEBUG \t batch ate = 0.581607\nDEBUG \t batch ate = 0.539565\nDEBUG \t batch ate = 0.740687\nDEBUG \t batch ate = 0.672987\nDEBUG \t batch ate = 0.725537\nDEBUG \t batch ate = 0.683099\nDEBUG \t batch ate = 0.695347\nDEBUG \t batch ate = 0.533302\nDEBUG \t batch ate = 0.625668\nDEBUG \t batch ate = 0.744886\nDEBUG \t batch ate = 0.686994\nDEBUG \t batch ate = 0.572683\nDEBUG \t batch ate = 0.431316\nDEBUG \t batch ate = 0.521101\nDEBUG \t batch ate = 0.651604\nDEBUG \t batch ate = 0.514384\nDEBUG \t batch ate = 0.471155\nDEBUG \t batch ate = 0.759972\nDEBUG \t batch ate = 0.633456\nDEBUG \t batch ate = 0.52144\nDEBUG \t batch ate = 0.675739\nDEBUG \t batch ate = 0.713319\nDEBUG \t batch ate = 0.749301\nDEBUG \t batch ate = 0.637229\nDEBUG \t batch ate = 0.690767\nDEBUG \t batch ate = 0.638464\nDEBUG \t batch ate = 0.804409\nDEBUG \t batch ate = 0.379763\nDEBUG \t batch ate = 0.939645\nDEBUG \t batch ate = 0.566416\nDEBUG \t batch ate = 0.722778\nDEBUG \t batch ate = 0.875249\nDEBUG \t batch ate = 0.585553\nDEBUG \t batch ate = 0.452997\nDEBUG \t batch ate = 0.660046\nDEBUG \t batch ate = 0.523958\nDEBUG \t batch ate = 0.743689\nDEBUG \t batch ate = 0.281901\nDEBUG \t batch ate = 0.79823\nDEBUG \t batch ate = 0.501476\nDEBUG \t batch ate = 0.27024\nDEBUG \t batch ate = 0.661638\nDEBUG \t batch ate = 0.530568\nDEBUG \t batch ate = 0.276738\nDEBUG \t batch ate = 0.734873\nDEBUG \t batch ate = 0.547245\n"
],
[
"ate_train = ite_train.mean()\nate_val = ite_val.mean()\nprint(ate_train, ate_val)",
"0.58953923 0.5956359\n"
]
],
[
[
"## Meta Learners",
"_____no_output_____"
]
],
[
[
"# fit propensity model\np_model = ElasticNetPropensityModel()\np_train = p_model.fit_predict(X_train, treatment_train)\np_val = p_model.fit_predict(X_val, treatment_val)",
"_____no_output_____"
],
[
"s_learner = BaseSRegressor(LGBMRegressor())\ns_ate = s_learner.estimate_ate(X_train, treatment_train, y_train)[0]\ns_ite_train = s_learner.fit_predict(X_train, treatment_train, y_train)\ns_ite_val = s_learner.predict(X_val)\n\nt_learner = BaseTRegressor(LGBMRegressor())\nt_ate = t_learner.estimate_ate(X_train, treatment_train, y_train)[0][0]\nt_ite_train = t_learner.fit_predict(X_train, treatment_train, y_train)\nt_ite_val = t_learner.predict(X_val, treatment_val, y_val)\n\nx_learner = BaseXRegressor(LGBMRegressor())\nx_ate = x_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0]\nx_ite_train = x_learner.fit_predict(X_train, treatment_train, y_train, p_train)\nx_ite_val = x_learner.predict(X_val, treatment_val, y_val, p_val)\n\nr_learner = BaseRRegressor(LGBMRegressor())\nr_ate = r_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0]\nr_ite_train = r_learner.fit_predict(X_train, treatment_train, y_train, p_train)\nr_ite_val = r_learner.predict(X_val)",
"_____no_output_____"
]
],
[
[
"## Model Results Comparsion",
"_____no_output_____"
],
[
"### Training",
"_____no_output_____"
]
],
[
[
"df_preds_train = pd.DataFrame([s_ite_train.ravel(),\n t_ite_train.ravel(),\n x_ite_train.ravel(),\n r_ite_train.ravel(),\n ite_train.ravel(),\n tau_train.ravel(),\n treatment_train.ravel(),\n y_train.ravel()],\n index=['S','T','X','R','CEVAE','tau','w','y']).T\n\ndf_cumgain_train = get_cumgain(df_preds_train)",
"_____no_output_____"
],
[
"df_result_train = pd.DataFrame([s_ate, t_ate, x_ate, r_ate, ate_train, tau_train.mean()],\n index=['S','T','X','R','CEVAE','actual'], columns=['ATE'])\ndf_result_train['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_train, t_ite_train, x_ite_train, r_ite_train, ite_train],\n [tau_train.values.reshape(-1,1)]*5 )\n ] + [None]\ndf_result_train['AUUC'] = auuc_score(df_preds_train)",
"_____no_output_____"
],
[
"df_result_train",
"_____no_output_____"
],
[
"plot_gain(df_preds_train)",
"_____no_output_____"
]
],
[
[
"### Validation",
"_____no_output_____"
]
],
[
[
"df_preds_val = pd.DataFrame([s_ite_val.ravel(),\n t_ite_val.ravel(),\n x_ite_val.ravel(),\n r_ite_val.ravel(),\n ite_val.ravel(),\n tau_val.ravel(),\n treatment_val.ravel(),\n y_val.ravel()],\n index=['S','T','X','R','CEVAE','tau','w','y']).T\n\ndf_cumgain_val = get_cumgain(df_preds_val)",
"_____no_output_____"
],
[
"df_result_val = pd.DataFrame([s_ite_val.mean(), t_ite_val.mean(), x_ite_val.mean(), r_ite_val.mean(), ate_val, tau_val.mean()],\n index=['S','T','X','R','CEVAE','actual'], columns=['ATE'])\ndf_result_val['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_val, t_ite_val, x_ite_val, r_ite_val, ite_val],\n [tau_val.values.reshape(-1,1)]*5 )\n ] + [None]\ndf_result_val['AUUC'] = auuc_score(df_preds_val)",
"_____no_output_____"
],
[
"df_result_val",
"_____no_output_____"
],
[
"plot_gain(df_preds_val)",
"_____no_output_____"
]
],
[
[
"# Synthetic Data",
"_____no_output_____"
]
],
[
[
"y, X, w, tau, b, e = simulate_hidden_confounder(n=100000, p=5, sigma=1.0, adj=0.)\n\nX_train, X_val, y_train, y_val, w_train, w_val, tau_train, tau_val, b_train, b_val, e_train, e_val = \\\n train_test_split(X, y, w, tau, b, e, test_size=0.2, random_state=123, shuffle=True)\n\npreds_dict_train = {}\npreds_dict_valid = {}\n\npreds_dict_train['Actuals'] = tau_train\npreds_dict_valid['Actuals'] = tau_val\n\npreds_dict_train['generated_data'] = {\n 'y': y_train,\n 'X': X_train,\n 'w': w_train,\n 'tau': tau_train,\n 'b': b_train,\n 'e': e_train}\npreds_dict_valid['generated_data'] = {\n 'y': y_val,\n 'X': X_val,\n 'w': w_val,\n 'tau': tau_val,\n 'b': b_val,\n 'e': e_val}\n\n# Predict p_hat because e would not be directly observed in real-life\np_model = ElasticNetPropensityModel()\np_hat_train = p_model.fit_predict(X_train, w_train)\np_hat_val = p_model.fit_predict(X_val, w_val)\n\nfor base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor],\n ['S', 'T', 'X', 'R']):\n for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']):\n # RLearner will need to fit on the p_hat\n if label_l != 'R':\n learner = base_learner(model())\n # fit the model on training data only\n learner.fit(X=X_train, treatment=w_train, y=y_train)\n try:\n preds_dict_train['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_train, p=p_hat_train).flatten()\n preds_dict_valid['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_val, p=p_hat_val).flatten()\n except TypeError:\n preds_dict_train['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_train, treatment=w_train, y=y_train).flatten()\n preds_dict_valid['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_val, treatment=w_val, y=y_val).flatten()\n else:\n learner = base_learner(model())\n learner.fit(X=X_train, p=p_hat_train, treatment=w_train, y=y_train)\n preds_dict_train['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_train).flatten()\n preds_dict_valid['{} Learner ({})'.format(\n label_l, label_m)] = learner.predict(X=X_val).flatten()\n\n# cevae model settings\noutcome_dist = \"normal\"\nlatent_dim = 20\nhidden_dim = 200\nnum_epochs = 5\nbatch_size = 1000\nlearning_rate = 1e-3\nlearning_rate_decay = 0.1\nnum_layers = 3\nnum_samples = 10\n\ncevae = CEVAE(outcome_dist=outcome_dist,\n latent_dim=latent_dim,\n hidden_dim=hidden_dim,\n num_epochs=num_epochs,\n batch_size=batch_size,\n learning_rate=learning_rate,\n learning_rate_decay=learning_rate_decay,\n num_layers=num_layers,\n num_samples=num_samples)\n\n# fit\nlosses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float),\n treatment=torch.tensor(w_train, dtype=torch.float),\n y=torch.tensor(y_train, dtype=torch.float))\n\npreds_dict_train['CEVAE'] = cevae.predict(X_train).flatten()\npreds_dict_valid['CEVAE'] = cevae.predict(X_val).flatten()",
"INFO \t Training with 80 minibatches per epoch\nDEBUG \t step 0 loss = 14.0534\nDEBUG \t step 1 loss = 13.2864\nDEBUG \t step 2 loss = 13.0712\nDEBUG \t step 3 loss = 12.4646\nDEBUG \t step 4 loss = 12.0247\nDEBUG \t step 5 loss = 11.5239\nDEBUG \t step 6 loss = 11.2934\nDEBUG \t step 7 loss = 11.3141\nDEBUG \t step 8 loss = 10.8347\nDEBUG \t step 9 loss = 10.7364\nDEBUG \t step 10 loss = 10.5978\nDEBUG \t step 11 loss = 10.2533\nDEBUG \t step 12 loss = 10.131\nDEBUG \t step 13 loss = 10.0307\nDEBUG \t step 14 loss = 9.57977\nDEBUG \t step 15 loss = 9.79295\nDEBUG \t step 16 loss = 9.46927\nDEBUG \t step 17 loss = 9.57581\nDEBUG \t step 18 loss = 9.24119\nDEBUG \t step 19 loss = 9.34084\nDEBUG \t step 20 loss = 9.32529\nDEBUG \t step 21 loss = 9.40313\nDEBUG \t step 22 loss = 9.27057\nDEBUG \t step 23 loss = 9.05239\nDEBUG \t step 24 loss = 9.17952\nDEBUG \t step 25 loss = 8.93083\nDEBUG \t step 26 loss = 8.88059\nDEBUG \t step 27 loss = 9.06328\nDEBUG \t step 28 loss = 8.97881\nDEBUG \t step 29 loss = 8.7639\nDEBUG \t step 30 loss = 8.80499\nDEBUG \t step 31 loss = 8.87173\nDEBUG \t step 32 loss = 8.56747\nDEBUG \t step 33 loss = 8.61066\nDEBUG \t step 34 loss = 8.79932\nDEBUG \t step 35 loss = 8.62871\nDEBUG \t step 36 loss = 8.54852\nDEBUG \t step 37 loss = 8.38022\nDEBUG \t step 38 loss = 8.31573\nDEBUG \t step 39 loss = 8.53857\nDEBUG \t step 40 loss = 8.57149\nDEBUG \t step 41 loss = 8.25793\nDEBUG \t step 42 loss = 8.54684\nDEBUG \t step 43 loss = 8.47699\nDEBUG \t step 44 loss = 8.3233\nDEBUG \t step 45 loss = 8.40228\nDEBUG \t step 46 loss = 8.14949\nDEBUG \t step 47 loss = 8.2015\nDEBUG \t step 48 loss = 8.07472\nDEBUG \t step 49 loss = 8.16795\nDEBUG \t step 50 loss = 8.34108\nDEBUG \t step 51 loss = 8.57682\nDEBUG \t step 52 loss = 8.24426\nDEBUG \t step 53 loss = 8.33251\nDEBUG \t step 54 loss = 8.10115\nDEBUG \t step 55 loss = 8.67902\nDEBUG \t step 56 loss = 8.14677\nDEBUG \t step 57 loss = 8.1041\nDEBUG \t step 58 loss = 8.15102\nDEBUG \t step 59 loss = 8.00679\nDEBUG \t step 60 loss = 8.0271\nDEBUG \t step 61 loss = 7.96041\nDEBUG \t step 62 loss = 7.82294\nDEBUG \t step 63 loss = 8.13456\nDEBUG \t step 64 loss = 8.23367\nDEBUG \t step 65 loss = 8.1886\nDEBUG \t step 66 loss = 8.11654\nDEBUG \t step 67 loss = 8.22645\nDEBUG \t step 68 loss = 8.29743\nDEBUG \t step 69 loss = 8.24127\nDEBUG \t step 70 loss = 7.86166\nDEBUG \t step 71 loss = 8.22115\nDEBUG \t step 72 loss = 7.8913\nDEBUG \t step 73 loss = 7.96265\nDEBUG \t step 74 loss = 7.96243\nDEBUG \t step 75 loss = 7.99336\nDEBUG \t step 76 loss = 7.97742\nDEBUG \t step 77 loss = 7.90728\nDEBUG \t step 78 loss = 7.79539\nDEBUG \t step 79 loss = 8.1732\nDEBUG \t step 80 loss = 8.05217\nDEBUG \t step 81 loss = 8.34642\nDEBUG \t step 82 loss = 8.03199\nDEBUG \t step 83 loss = 7.64226\nDEBUG \t step 84 loss = 7.60438\nDEBUG \t step 85 loss = 7.5962\nDEBUG \t step 86 loss = 7.85927\nDEBUG \t step 87 loss = 7.98567\nDEBUG \t step 88 loss = 7.82793\nDEBUG \t step 89 loss = 7.90716\nDEBUG \t step 90 loss = 7.71277\nDEBUG \t step 91 loss = 7.97724\nDEBUG \t step 92 loss = 7.84886\nDEBUG \t step 93 loss = 7.88323\nDEBUG \t step 94 loss = 7.58179\nDEBUG \t step 95 loss = 7.89912\nDEBUG \t step 96 loss = 7.67735\nDEBUG \t step 97 loss = 7.84808\nDEBUG \t step 98 loss = 7.66705\nDEBUG \t step 99 loss = 7.65615\nDEBUG \t step 100 loss = 7.73811\nDEBUG \t step 101 loss = 7.64997\nDEBUG \t step 102 loss = 8.36613\nDEBUG \t step 103 loss = 7.72687\nDEBUG \t step 104 loss = 7.68498\nDEBUG \t step 105 loss = 7.50849\nDEBUG \t step 106 loss = 7.63987\nDEBUG \t step 107 loss = 7.75501\nDEBUG \t step 108 loss = 7.62423\nDEBUG \t step 109 loss = 7.66921\nDEBUG \t step 110 loss = 7.50166\nDEBUG \t step 111 loss = 7.62314\nDEBUG \t step 112 loss = 7.80907\nDEBUG \t step 113 loss = 7.65659\nDEBUG \t step 114 loss = 7.55159\nDEBUG \t step 115 loss = 7.60577\nDEBUG \t step 116 loss = 7.36759\nDEBUG \t step 117 loss = 7.43037\nDEBUG \t step 118 loss = 7.41372\nDEBUG \t step 119 loss = 7.58245\nDEBUG \t step 120 loss = 7.75382\nDEBUG \t step 121 loss = 7.75345\nDEBUG \t step 122 loss = 7.71091\nDEBUG \t step 123 loss = 7.61762\nDEBUG \t step 124 loss = 7.5415\nDEBUG \t step 125 loss = 7.70995\nDEBUG \t step 126 loss = 7.43083\nDEBUG \t step 127 loss = 7.62284\nDEBUG \t step 128 loss = 7.57494\nDEBUG \t step 129 loss = 7.43229\nDEBUG \t step 130 loss = 7.417\nDEBUG \t step 131 loss = 7.36716\nDEBUG \t step 132 loss = 7.58527\nDEBUG \t step 133 loss = 7.61684\nDEBUG \t step 134 loss = 7.55247\nDEBUG \t step 135 loss = 7.54181\nDEBUG \t step 136 loss = 7.47493\nDEBUG \t step 137 loss = 7.65583\nDEBUG \t step 138 loss = 7.33769\nDEBUG \t step 139 loss = 7.36649\nDEBUG \t step 140 loss = 7.3634\nDEBUG \t step 141 loss = 7.50731\nDEBUG \t step 142 loss = 7.60657\nDEBUG \t step 143 loss = 7.38694\nDEBUG \t step 144 loss = 7.3596\nDEBUG \t step 145 loss = 7.42744\nDEBUG \t step 146 loss = 7.46609\nDEBUG \t step 147 loss = 7.44444\nDEBUG \t step 148 loss = 7.44656\nDEBUG \t step 149 loss = 7.32834\nDEBUG \t step 150 loss = 7.63049\nDEBUG \t step 151 loss = 7.43903\nDEBUG \t step 152 loss = 7.28372\nDEBUG \t step 153 loss = 7.28897\nDEBUG \t step 154 loss = 7.3515\nDEBUG \t step 155 loss = 7.29871\nDEBUG \t step 156 loss = 7.47948\nDEBUG \t step 157 loss = 7.56888\nDEBUG \t step 158 loss = 7.50302\nDEBUG \t step 159 loss = 7.14918\nDEBUG \t step 160 loss = 7.34611\nDEBUG \t step 161 loss = 7.04855\nDEBUG \t step 162 loss = 7.38615\nDEBUG \t step 163 loss = 7.39172\nDEBUG \t step 164 loss = 7.35778\nDEBUG \t step 165 loss = 7.39445\nDEBUG \t step 166 loss = 7.41489\nDEBUG \t step 167 loss = 7.36096\nDEBUG \t step 168 loss = 7.49107\nDEBUG \t step 169 loss = 7.31799\nDEBUG \t step 170 loss = 7.34851\nDEBUG \t step 171 loss = 7.17355\nDEBUG \t step 172 loss = 7.38851\nDEBUG \t step 173 loss = 7.35425\nDEBUG \t step 174 loss = 7.39068\nDEBUG \t step 175 loss = 7.08015\nDEBUG \t step 176 loss = 7.05245\nDEBUG \t step 177 loss = 7.43696\nDEBUG \t step 178 loss = 7.32325\nDEBUG \t step 179 loss = 7.31021\nDEBUG \t step 180 loss = 7.32132\nDEBUG \t step 181 loss = 7.34862\nDEBUG \t step 182 loss = 7.2863\nDEBUG \t step 183 loss = 7.04851\nDEBUG \t step 184 loss = 7.09608\nDEBUG \t step 185 loss = 7.30419\nDEBUG \t step 186 loss = 7.57377\nDEBUG \t step 187 loss = 7.17361\nDEBUG \t step 188 loss = 7.14099\nDEBUG \t step 189 loss = 7.0449\nDEBUG \t step 190 loss = 7.33529\nDEBUG \t step 191 loss = 8.26479\nDEBUG \t step 192 loss = 7.07407\nDEBUG \t step 193 loss = 7.17149\nDEBUG \t step 194 loss = 7.18364\nDEBUG \t step 195 loss = 7.27539\nDEBUG \t step 196 loss = 7.32838\nDEBUG \t step 197 loss = 7.26303\nDEBUG \t step 198 loss = 7.17846\nDEBUG \t step 199 loss = 7.43274\nDEBUG \t step 200 loss = 7.05834\nDEBUG \t step 201 loss = 7.06987\nDEBUG \t step 202 loss = 7.23815\nDEBUG \t step 203 loss = 7.2454\nDEBUG \t step 204 loss = 7.29509\nDEBUG \t step 205 loss = 7.13663\nDEBUG \t step 206 loss = 6.96725\nDEBUG \t step 207 loss = 7.11374\nDEBUG \t step 208 loss = 6.93604\nDEBUG \t step 209 loss = 7.14596\nDEBUG \t step 210 loss = 7.12832\nDEBUG \t step 211 loss = 7.16911\nDEBUG \t step 212 loss = 6.9426\nDEBUG \t step 213 loss = 7.18095\nDEBUG \t step 214 loss = 7.06178\nDEBUG \t step 215 loss = 7.10941\nDEBUG \t step 216 loss = 7.11186\nDEBUG \t step 217 loss = 7.20186\nDEBUG \t step 218 loss = 7.27586\nDEBUG \t step 219 loss = 7.1021\nDEBUG \t step 220 loss = 6.94478\nDEBUG \t step 221 loss = 7.09795\nDEBUG \t step 222 loss = 6.88571\nDEBUG \t step 223 loss = 7.03089\nDEBUG \t step 224 loss = 7.23866\nDEBUG \t step 225 loss = 7.10442\nDEBUG \t step 226 loss = 6.95982\nDEBUG \t step 227 loss = 8.71509\nDEBUG \t step 228 loss = 6.93005\nDEBUG \t step 229 loss = 7.2101\nDEBUG \t step 230 loss = 7.23326\nDEBUG \t step 231 loss = 6.94798\nDEBUG \t step 232 loss = 6.83511\nDEBUG \t step 233 loss = 6.99621\nDEBUG \t step 234 loss = 6.79696\nDEBUG \t step 235 loss = 7.21458\nDEBUG \t step 236 loss = 6.97841\nDEBUG \t step 237 loss = 7.12467\nDEBUG \t step 238 loss = 6.98927\nDEBUG \t step 239 loss = 7.13294\nDEBUG \t step 240 loss = 7.17033\n"
],
[
"actuals_train = preds_dict_train['Actuals']\nactuals_validation = preds_dict_valid['Actuals']\n\nsynthetic_summary_train = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_train)] for label, preds\n in preds_dict_train.items() if 'generated' not in label.lower()},\n index=['ATE', 'MSE']).T\nsynthetic_summary_train['Abs % Error of ATE'] = np.abs(\n (synthetic_summary_train['ATE']/synthetic_summary_train.loc['Actuals', 'ATE']) - 1)\n\nsynthetic_summary_validation = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_validation)]\n for label, preds in preds_dict_valid.items()\n if 'generated' not in label.lower()},\n index=['ATE', 'MSE']).T\nsynthetic_summary_validation['Abs % Error of ATE'] = np.abs(\n (synthetic_summary_validation['ATE']/synthetic_summary_validation.loc['Actuals', 'ATE']) - 1)\n\n# calculate kl divergence for training\nfor label in synthetic_summary_train.index:\n stacked_values = np.hstack((preds_dict_train[label], actuals_train))\n stacked_low = np.percentile(stacked_values, 0.1)\n stacked_high = np.percentile(stacked_values, 99.9)\n bins = np.linspace(stacked_low, stacked_high, 100)\n\n distr = np.histogram(preds_dict_train[label], bins=bins)[0]\n distr = np.clip(distr/distr.sum(), 0.001, 0.999)\n true_distr = np.histogram(actuals_train, bins=bins)[0]\n true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999)\n\n kl = entropy(distr, true_distr)\n synthetic_summary_train.loc[label, 'KL Divergence'] = kl\n\n# calculate kl divergence for validation\nfor label in synthetic_summary_validation.index:\n stacked_values = np.hstack((preds_dict_valid[label], actuals_validation))\n stacked_low = np.percentile(stacked_values, 0.1)\n stacked_high = np.percentile(stacked_values, 99.9)\n bins = np.linspace(stacked_low, stacked_high, 100)\n\n distr = np.histogram(preds_dict_valid[label], bins=bins)[0]\n distr = np.clip(distr/distr.sum(), 0.001, 0.999)\n true_distr = np.histogram(actuals_validation, bins=bins)[0]\n true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999)\n\n kl = entropy(distr, true_distr)\n synthetic_summary_validation.loc[label, 'KL Divergence'] = kl",
"_____no_output_____"
],
[
"df_preds_train = pd.DataFrame([preds_dict_train['S Learner (LR)'].ravel(),\n preds_dict_train['S Learner (XGB)'].ravel(),\n preds_dict_train['T Learner (LR)'].ravel(),\n preds_dict_train['T Learner (XGB)'].ravel(),\n preds_dict_train['X Learner (LR)'].ravel(),\n preds_dict_train['X Learner (XGB)'].ravel(),\n preds_dict_train['R Learner (LR)'].ravel(),\n preds_dict_train['R Learner (XGB)'].ravel(), \n preds_dict_train['CEVAE'].ravel(),\n preds_dict_train['generated_data']['tau'].ravel(),\n preds_dict_train['generated_data']['w'].ravel(),\n preds_dict_train['generated_data']['y'].ravel()],\n index=['S Learner (LR)','S Learner (XGB)',\n 'T Learner (LR)','T Learner (XGB)',\n 'X Learner (LR)','X Learner (XGB)',\n 'R Learner (LR)','R Learner (XGB)',\n 'CEVAE','tau','w','y']).T\n\nsynthetic_summary_train['AUUC'] = auuc_score(df_preds_train).iloc[:-1]",
"_____no_output_____"
],
[
"df_preds_validation = pd.DataFrame([preds_dict_valid['S Learner (LR)'].ravel(),\n preds_dict_valid['S Learner (XGB)'].ravel(),\n preds_dict_valid['T Learner (LR)'].ravel(),\n preds_dict_valid['T Learner (XGB)'].ravel(),\n preds_dict_valid['X Learner (LR)'].ravel(),\n preds_dict_valid['X Learner (XGB)'].ravel(),\n preds_dict_valid['R Learner (LR)'].ravel(),\n preds_dict_valid['R Learner (XGB)'].ravel(), \n preds_dict_valid['CEVAE'].ravel(),\n preds_dict_valid['generated_data']['tau'].ravel(),\n preds_dict_valid['generated_data']['w'].ravel(),\n preds_dict_valid['generated_data']['y'].ravel()],\n index=['S Learner (LR)','S Learner (XGB)',\n 'T Learner (LR)','T Learner (XGB)',\n 'X Learner (LR)','X Learner (XGB)',\n 'R Learner (LR)','R Learner (XGB)',\n 'CEVAE','tau','w','y']).T\n\nsynthetic_summary_validation['AUUC'] = auuc_score(df_preds_validation).iloc[:-1]",
"_____no_output_____"
],
[
"synthetic_summary_train",
"_____no_output_____"
],
[
"synthetic_summary_validation",
"_____no_output_____"
],
[
"plot_gain(df_preds_train)",
"_____no_output_____"
],
[
"plot_gain(df_preds_validation)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a22c0b6106834f6a3106077d061f40628dcde1f
| 73,681 |
ipynb
|
Jupyter Notebook
|
results/ThirdBreakoutRewardLog.ipynb
|
ProFrenchToast/comp300
|
7a5a53fa8d21ad22e3db3b723e1bac15d46d329a
|
[
"MIT"
] | null | null | null |
results/ThirdBreakoutRewardLog.ipynb
|
ProFrenchToast/comp300
|
7a5a53fa8d21ad22e3db3b723e1bac15d46d329a
|
[
"MIT"
] | null | null | null |
results/ThirdBreakoutRewardLog.ipynb
|
ProFrenchToast/comp300
|
7a5a53fa8d21ad22e3db3b723e1bac15d46d329a
|
[
"MIT"
] | null | null | null | 127.696707 | 44,876 | 0.834829 |
[
[
[
"this notebook will be used to show the performance of the first attempt at learning reward.\n\nfirst load the trained reward network anbd setup methods.",
"_____no_output_____"
]
],
[
[
"from baselines.common.vec_env import VecFrameStack\nfrom LearningModel.AgentClasses import *\nfrom baselines.common.cmd_util import make_vec_env\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport re",
"/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
"\n#load the reward network\ntrainedNetwork = RewardNetwork(\"\")\n\ntrainedNetwork.load_state_dict(torch.load(\"/home/patrick/models/breakout-reward3/fullTest.params\", map_location=torch.device('cpu')))\n\n#setup the env\nmodel_path = \"/home/patrick/models/BreakoutNoFrameskip-v4-demonstrator3\"\nenv_id = 'BreakoutNoFrameskip-v4'\nenv_type = 'atari'\n\nenv = make_vec_env(env_id, env_type, 1, 0,\n wrapper_kwargs={\n 'clip_rewards': False,\n 'episode_life': False,\n })\nenv = VecFrameStack(env, 4)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nagent = PPO2Agent(env, 'atari', True)\ntrainedNetwork.to(device)\n\n#run the agent in the env once and save the ground truth reward and observations\ndef GetDemoFromAgent(agent, network, env):\n trueReward = 0\n learnedReward = 0\n\n currentReward = 0\n currentObservation = env.reset()\n timeSteps = 0\n done = False\n\n #run the demo\n while True:\n trueReward += currentReward\n shapedObservation = torch.from_numpy(currentObservation).float().to(device)\n reward, abs_reward = network.predict_reward(shapedObservation)\n learnedReward += reward.tolist()\n\n action = agent.act(currentObservation, currentReward, done)\n currentObservation, currentReward, done, info = env.step(action)\n shapedObservations = currentObservation\n timeSteps += 1\n\n if done:\n trueReward += currentReward\n reward, abs_reward = network.predict_reward(shapedObservation)\n learnedReward += reward.tolist()\n break\n print(\"{}, {}\".format(trueReward, learnedReward))\n return trueReward, learnedReward\n\n#a method to find all the models in a given dir that are just numbers\ndef Find_all_Models(model_dir):\n\n checkpoints = []\n filesandDirs = listdir(model_dir)\n allFiles = []\n for i in filesandDirs:\n if isfile(join(model_dir, i)):\n allFiles.append(i)\n\n for file in allFiles:\n if re.match('^[0-9]+$',file.title()):\n checkpoints.append(file.title())\n\n return checkpoints\n\n\n",
"Logging to /tmp/openai-2020-03-02-21-05-23-191059\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/misc_util.py:58: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/tf_util.py:53: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/tf_util.py:63: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/tf_util.py:70: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/ppo2/model.py:34: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/input.py:57: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.cast` instead.\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/common/policies.py:43: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.flatten instead.\nWARNING:tensorflow:Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING: Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING:tensorflow:Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING: Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86fa951ef0>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING:tensorflow:Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING: Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING:tensorflow:Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING: Entity <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method Flatten.call of <tensorflow.python.layers.core.Flatten object at 0x7f86d8564908>>: AttributeError: module 'gast' has no attribute 'Num'\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/baselines-master/baselines/ppo2/model.py:100: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nWARNING:tensorflow:From /home/patrick/PycharmProjects/comp300/venv/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n"
]
],
[
[
"now load all models and run each to get demos to run the network on",
"_____no_output_____"
]
],
[
[
"trueRewards = []\nlearnedRewards = []\n\nmodels = Find_all_Models(model_path)\n\nfor model in models:\n agent.load(model_path + \"/\" + model)\n trueReward, learnedReward = GetDemoFromAgent(agent, trainedNetwork, env)\n tf.keras.backend.clear_session()\n trueRewards.append(trueReward[0])\n learnedRewards.append(learnedReward)\n \n\n",
"[57.], 52.65373594593257\n[308.], 191.76234501600266\n[124.], 83.56235053390265\n[141.], 73.47029684856534\n[93.], 142.89483132306486\n[93.], 79.50916735362262\n[94.], 79.76580621488392\n[65.], 122.45286505669355\n[316.], 102.71511636581272\n[267.], 36.02437886130065\n[283.], 112.93279913347214\n[152.], 136.68532236572355\n[59.], 19.870022861286998\n[122.], 146.03969802428037\n[78.], 109.94045857619494\n[130.], 132.63688319455832\n[82.], 124.68824950419366\n[98.], 68.68249187152833\n[126.], 151.28730838745832\n[88.], 107.32665666565299\n[323.], 82.49900289811194\n[62.], 75.0363013362512\n[281.], 161.55833268165588\n[289.], 112.83115648292005\n[125.], 57.39856562204659\n[129.], 107.71072018146515\n[305.], 106.15777249354869\n[340.], 147.6365279853344\n[70.], 83.5810166420415\n[173.], 96.60258932691067\n[139.], 51.487426582723856\n[113.], 98.27144344709814\n[93.], 104.98326349351555\n[267.], 102.69184742402285\n[320.], 99.6245203120634\n[340.], 141.7067567864433\n[343.], 94.33877188432962\n[349.], 65.47162680048496\n[248.], 77.89093473274261\n[73.], 133.1529985340312\n[269.], 135.9128506137058\n[286.], 105.09613138902932\n[108.], 80.6545086633414\n[250.], 133.46000437997282\n[224.], 264.76804697327316\n[70.], 67.80750926211476\n[95.], 52.20986775122583\n[108.], 73.11667365022004\n[117.], 95.83276813384145\n[322.], 141.35372887179255\n[283.], 100.30567076429725\n[78.], 98.23023703787476\n[175.], 77.57010953687131\n[355.], 150.59071560110897\n[93.], 86.76145241688937\n[313.], 146.93669094704092\n[127.], 6.701068799942732\n[150.], 67.02904352825135\n[336.], 118.9798309057951\n[186.], 153.8492277143523\n[384.], 129.34115943592042\n[117.], 83.30188045464456\n[368.], 106.87870887760073\n[104.], 124.77998706046492\n[257.], 89.60902629699558\n[73.], 85.42804668284953\n[109.], 98.01205138023943\n[103.], 87.56727641820908\n[141.], 143.0932534225285\n[353.], 106.74860554095358\n[79.], 59.52388375997543\n[49.], 71.62184754945338\n[100.], 83.85413444414735\n[85.], 77.26270123384893\n[360.], 199.50096812658012\n[112.], 77.08946455456316\n[145.], 117.13880152627826\n[271.], 117.39379448909312\n[395.], 195.3167693540454\n[288.], 109.68061538878828\n[289.], 92.83005775604397\n[321.], 63.23741071391851\n[55.], 64.48723636195064\n[249.], 1.7382835242897272\n[282.], 125.71506372280419\n[340.], 96.17853480298072\n[113.], 72.17341868393123\n[350.], 176.27817711047828\n[180.], 152.38801443576813\n[93.], 106.04666901752353\n[87.], 148.3739481633529\n[112.], 93.31697918567806\n[291.], 129.88463889807463\n[71.], 38.789601046592\n[66.], 38.39256653469056\n[146.], 102.26708268560469\n[197.], 52.71566751599312\n[336.], 113.7253508688882\n[168.], 115.27991340495646\n[374.], 148.7274799225852\n[114.], 56.83437640406191\n[77.], 86.20308840740472\n[116.], 145.90816802624613\n[94.], 103.55583883170038\n[97.], 70.12062647845596\n[349.], 86.52316235750914\n[295.], 110.63867236301303\n[80.], 76.26499244663864\n[283.], 124.52499457634985\n[131.], 154.8919510487467\n[320.], 54.70806602947414\n[269.], 134.4457847448066\n[358.], 177.28552507515997\n[157.], 121.7494330778718\n[80.], 121.59056012518704\n[112.], 116.68595107831061\n[91.], 101.41709168069065\n[62.], 71.27097797300667\n[89.], 60.62751367036253\n[335.], 143.09341783169657\n[168.], 123.58663384057581\n[59.], 82.24334625061601\n[145.], 122.53048359043896\n[179.], 88.78973637148738\n[266.], 170.6861750939861\n[52.], 56.685034992173314\n[306.], 163.85194522328675\n[315.], 115.27947876509279\n[73.], 14.612768628634512\n[114.], 100.45254064071923\n[351.], 125.48294920474291\n[166.], 125.97563017718494\n[107.], 70.69065168872476\n[342.], 75.6729501420632\n[200.], 160.42483417410403\n[337.], 142.47951181605458\n[287.], 128.16453834064305\n[40.], 30.455199384130538\n[293.], 150.28059664648026\n[73.], 97.38854240439832\n[220.], 119.29799693636596\n[104.], 104.59192889370024\n[63.], 56.30680039152503\n[125.], 90.80746328737587\n[246.], 118.77956010494381\n[69.], 82.51955921016634\n[274.], 92.48051985073835\n[297.], 103.49611903727055\n[313.], 117.74338198732585\n[333.], 138.6719002937898\n[317.], 81.25987342279404\n[287.], 99.92114053294063\n[255.], 1339.1136268386617\n[315.], 121.36254670843482\n[262.], 79.40907689090818\n[77.], 79.63217249326408\n[288.], 115.24412882328033\n[71.], 52.693233570083976\n[348.], 69.29205689020455\n[52.], 43.76776155177504\n[281.], 64.78335389494896\n[43.], 50.87224349100143\n[149.], 141.19322386849672\n[101.], 102.9409810276702\n[62.], 72.77659108676016\n[318.], 116.00869151949883\n[57.], 84.67975354101509\n[94.], 108.29948772955686\n[83.], 60.20293819066137\n[282.], 91.83837767131627\n[64.], 102.28272223472595\n[88.], 84.6447250796482\n[276.], 111.15425467956811\n[354.], 198.1087228944525\n[59.], 106.74657609499991\n[91.], 66.81675578467548\n[207.], 79.41751826088876\n[118.], 154.36900007165968\n[182.], 111.41848908923566\n[61.], 55.89375979080796\n[364.], 105.50917889829725\n[271.], 56.95634676143527\n[218.], 75.18044184148312\n[174.], 102.50699022132903\n[259.], 49.215318916365504\n[254.], 33.68908238969743\n[67.], 61.989940469153225\n[301.], 106.75326817948371\n[37.], 59.750898835249245\n[33.], 41.65689632110298\n[70.], 50.902413008734584\n[52.], 59.51762952376157\n[244.], 169.8078524256125\n[360.], 153.487445066683\n[84.], 97.14832558017224\n[85.], 97.49442064762115\n[74.], 73.19115662109107\n[284.], 50.733473759144545\n[294.], 105.73020786419511\n[354.], 156.84080788027495\n"
],
[
"maxTrue = max(trueRewards)\nminLearned = min(learnedRewards)\nnormalisedRewards = [x-minLearned for x in learnedRewards]\ncopyLearned = []\ncopyTrue = []\nfor i in range(len(normalisedRewards)):\n if normalisedRewards[i] > 1000:\n pass\n else:\n copyLearned.append(normalisedRewards[i])\n copyTrue.append(trueRewards[i])\nmaxLearned = max(copyLearned)\ncopyLearned = (copyLearned) / (maxLearned / maxTrue)",
"_____no_output_____"
],
[
"from matplotlib.pyplot import figure\n\nprint(\"{},{}\".format(maxTrue, max(normalisedRewards)))\n\nfigure(num=None, figsize=(10, 7), dpi=80, facecolor='w', edgecolor='k')\nplt.scatter(np.array(copyTrue), np.array(copyLearned), c='b')\nplt.plot(np.arange(500), np.arange(500))\nplt.ylabel(\"Learned reward\")\nplt.xlabel(\"Ground truth reward\")\nplt.title(\"graph of learned reward against ground truth\")\nplt.show()",
"395.0,1337.375343314372\n"
],
[
"minReward = [min(trueRewards)]\nmaxReward = [max(trueRewards)]\naverage = [sum(trueRewards) / len(trueRewards)]\n\nfrom LearningModel.getAverageReward import *\nagent.load(\"~/models/breakout-reward-RL3/breakout_50M_ppo2\")\nmeanR, minR,maxR, std = getAvgReward(agent, env, 200)\n\nminReward.append(minR)\nmaxReward.append(maxR)\naverage.append(meanR)\n",
"_____no_output_____"
],
[
"minReward[0] = min(trueRewards)\nmaxReward[0] = max(trueRewards)\naverage[0] = sum(trueRewards) /len(trueRewards)\nprint(\"mins: {}, maxs: {}, means: {}\".format(minReward, maxReward, average))\n# create plot\nfigure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n#fig, ax = plt.subplots()\nindex = np.arange(2)\nbar_width = 0.3\nopacity = 0.8\n\nrects1 = plt.bar(index, minReward, bar_width,\nalpha=opacity,\ncolor='b',\nlabel='Minimum Reward')\n\nrects2 = plt.bar(index + bar_width, average, bar_width,\nalpha=opacity,\ncolor='g',\nlabel='Average Reward')\n\nrects3 = plt.bar(index + bar_width +bar_width, maxReward, bar_width,\nalpha=opacity,\ncolor='r',\nlabel='Max Reward')\n\nplt.xlabel('Agent')\nplt.ylabel('Reward')\nplt.title('The min, max and mean reward of the demonstrator and trained agent')\nplt.xticks(index + bar_width, ('Demonstrations', 'Trained agent'))\nplt.legend()\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a22dd6b6f13b527da5b802bb35b87861dfa3449
| 9,980 |
ipynb
|
Jupyter Notebook
|
PyTorch Exercises FASHION-MNIST/Part 1 - Tensors in PyTorch (Exercises)-chinese.ipynb
|
jyrj/deeplearning-using-pytorch
|
235acd67d9b53981a44b8860e3eea5ec2a1da333
|
[
"MIT"
] | 1 |
2019-06-01T15:03:50.000Z
|
2019-06-01T15:03:50.000Z
|
PyTorch Exercises FASHION-MNIST/Part 1 - Tensors in PyTorch (Exercises)-chinese.ipynb
|
jyrj/deeplearning-using-pytorch
|
235acd67d9b53981a44b8860e3eea5ec2a1da333
|
[
"MIT"
] | 5 |
2020-09-26T00:59:18.000Z
|
2022-02-10T01:40:44.000Z
|
PyTorch Exercises FASHION-MNIST/Part 1 - Tensors in PyTorch (Exercises)-chinese.ipynb
|
jyrj/deeplearning-using-pytorch
|
235acd67d9b53981a44b8860e3eea5ec2a1da333
|
[
"MIT"
] | 1 |
2022-02-10T03:23:47.000Z
|
2022-02-10T03:23:47.000Z
| 29.181287 | 284 | 0.552605 |
[
[
[
"# 深度学习工具 PyTorch 简介\n\n在此 notebook 中,你将了解 [PyTorch](http://pytorch.org/),一款用于构建和训练神经网络的框架。PyTorch 在很多方面都和 Numpy 数组很像。毕竟,这些 Numpy 数组也是张量。PyTorch 会将这些张量当做输入并使我们能够轻松地将张量移到 GPU 中,以便在训练神经网络时加快处理速度。它还提供了一个自动计算梯度的模块(用于反向传播),以及另一个专门用于构建神经网络的模块。总之,与 TensorFlow 和其他框架相比,PyTorch 与 Python 和 Numpy/Scipy 堆栈更协调。\n\n\n\n## 神经网络\n\n深度学习以人工神经网络为基础。人工神经网络大致产生于上世纪 50 年代末。神经网络由多个像神经元一样的单个部分组成,这些部分通常称为单元或直接叫做“神经元”。每个单元都具有一定数量的加权输入。我们对这些加权输入求和,然后将结果传递给激活函数,以获得单元的输出。\n\n<img src=\"assets/simple_neuron.png\" width=400px>\n\n数学公式如下所示: \n\n$$\n\\begin{align}\ny &= f(w_1 x_1 + w_2 x_2 + b) \\\\\ny &= f\\left(\\sum_i w_i x_i +b \\right)\n\\end{align}\n$$\n\n对于向量来说,为两个向量的点积/内积:\n\n$$\nh = \\begin{bmatrix}\nx_1 \\, x_2 \\cdots x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_1 \\\\\n w_2 \\\\\n \\vdots \\\\\n w_n\n\\end{bmatrix}\n$$\n\n## 张量\n\n实际上神经网络计算只是对*张量*进行一系列线性代数运算,张量是矩阵的泛化形式。向量是一维张量,矩阵是二维张量,包含 3 个索引的数组是三维张量(例如 RGB 彩色图像)。神经网络的基本数据结构是张量,PyTorch(以及几乎所有其他深度学习框架)都是以张量为基础。\n\n<img src=\"assets/tensor_examples.svg\" width=600px>\n\n这些是基本知识,我们现在来看 PyTorch 如何构建简单的神经网络。",
"_____no_output_____"
]
],
[
[
"# First, import PyTorch\nimport torch",
"_____no_output_____"
],
[
"def activation(x):\n \"\"\" Sigmoid activation function \n \n Arguments\n ---------\n x: torch.Tensor\n \"\"\"\n return 1/(1+torch.exp(-x))",
"_____no_output_____"
],
[
"### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 3 random normal variables\nfeatures = torch.randn((1, 5))\n# True weights for our data, random normal variables again\nweights = torch.randn_like(features)\n# and a true bias term\nbias = torch.randn((1, 1))",
"_____no_output_____"
]
],
[
[
"我在上面生成了一些数据,我们可以使用该数据获取这个简单网络的输出。这些暂时只是随机数据,之后我们将使用正常数据。我们来看看:\n\n`features = torch.randn((1, 5))` 创建一个形状为 `(1, 5)` 的张量,其中有 1 行和 5 列,包含根据正态分布(均值为 0,标准偏差为 1)随机分布的值。 \n\n`weights = torch.randn_like(features)` 创建另一个形状和 `features` 一样的张量,同样包含来自正态分布的值。\n\n最后,`bias = torch.randn((1, 1))` 根据正态分布创建一个值。\n\n和 Numpy 数组一样,PyTorch 张量可以相加、相乘、相减。行为都很类似。但是 PyTorch 张量具有一些优势,例如 GPU 加速,稍后我们会讲解。请计算这个简单单层网络的输出。 \n> **练习**:计算网络的输出:输入特征为 `features`,权重为 `weights`,偏差为 `bias`。和 Numpy 类似,PyTorch 也有一个对张量求和的 [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) 函数和 `.sum()` 方法。请使用上面定义的函数 `activation` 作为激活函数。",
"_____no_output_____"
]
],
[
[
"## Calculate the output of this network using the weights and bias tensors",
"_____no_output_____"
]
],
[
[
"你可以在同一运算里使用矩阵乘法进行乘法和加法运算。推荐使用矩阵乘法,因为在 GPU 上使用现代库和高效计算资源使矩阵乘法更高效。\n\n如何对特征和权重进行矩阵乘法运算?我们可以使用 [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) 或 [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul),后者更复杂,并支持广播。如果不对`features` 和 `weights` 进行处理,就会报错:",
"_____no_output_____"
]
],
[
[
">> torch.mm(features, weights)\n\n---------------------------------------------------------------------------\nRuntimeError Traceback (most recent call last)\n<ipython-input-13-15d592eb5279> in <module>()\n----> 1 torch.mm(features, weights)\n\nRuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033",
"_____no_output_____"
]
],
[
[
"在任何框架中构建神经网络时,我们都会频繁遇到这种情况。原因是我们的张量不是进行矩阵乘法的正确形状。注意,对于矩阵乘法,第一个张量里的列数必须等于第二个张量里的行数。`features` 和 `weights` 具有相同的形状,即 `(1, 5)`。意味着我们需要更改 `weights` 的形状,以便进行矩阵乘法运算。\n\n**注意:**要查看张量 `tensor` 的形状,请使用 `tensor.shape`。以后也会经常用到。\n\n现在我们有以下几个选择:[`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape)、[`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_) 和 [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view)。\n\n* `weights.reshape(a, b)` 有时候将返回一个新的张量,数据和 `weights` 的一样,大小为 `(a, b)`;有时候返回克隆版,将数据复制到内存的另一个部分。\n* `weights.resize_(a, b)` 返回形状不同的相同张量。但是,如果新形状的元素数量比原始张量的少,则会从张量里删除某些元素(但是不会从内存中删除)。如果新形状的元素比原始张量的多,则新元素在内存里未初始化。注意,方法末尾的下划线表示这个方法是**原地**运算。要详细了解如何在 PyTorch 中进行原地运算,请参阅[此论坛话题](https://discuss.pytorch.org/t/what-is-in-place-operation/16244)。\n* `weights.view(a, b)` 将返回一个张量,数据和 `weights` 的一样,大小为 `(a, b)`。\n\n我通常使用 `.view()`,但这三个方法对此示例来说都可行。现在,我们可以通过 `weights.view(5, 1)` 变形 `weights`,使其具有 5 行和 1 列。\n\n> **练习**:请使用矩阵乘法计算网络的输出",
"_____no_output_____"
]
],
[
[
"## Calculate the output of this network using matrix multiplication",
"_____no_output_____"
]
],
[
[
"### 堆叠\n\n这就是计算单个神经元的输出的方式。当你将单个单元堆叠为层,并将层堆叠为神经元网络后,你就会发现这个算法的强大之处。一个神经元层的输出变成下一层的输入。对于多个输入单元和输出单元,我们现在需要将权重表示为矩阵。\n\n<img src='assets/multilayer_diagram_weights.png' width=450px>\n\n底部显示的第一个层级是输入,称为**输入层**。中间层称为**隐藏层**,最后一层(右侧)是**输出层**。我们可以再次使用矩阵从数学角度来描述这个网络,然后使用矩阵乘法将每个单元线性组合到一起。例如,可以这样计算隐藏层($h_1$ 和 $h_2$): \n\n$$\n\\vec{h} = [h_1 \\, h_2] = \n\\begin{bmatrix}\nx_1 \\, x_2 \\cdots \\, x_n\n\\end{bmatrix}\n\\cdot \n\\begin{bmatrix}\n w_{11} & w_{12} \\\\\n w_{21} &w_{22} \\\\\n \\vdots &\\vdots \\\\\n w_{n1} &w_{n2}\n\\end{bmatrix}\n$$\n\n我们可以将隐藏层当做输出单元的输入,从而得出这个小网络的输出,简单表示为:\n\n$$\ny = f_2 \\! \\left(\\, f_1 \\! \\left(\\vec{x} \\, \\mathbf{W_1}\\right) \\mathbf{W_2} \\right)\n$$",
"_____no_output_____"
]
],
[
[
"### Generate some data\ntorch.manual_seed(7) # Set the random seed so things are predictable\n\n# Features are 3 random normal variables\nfeatures = torch.randn((1, 3))\n\n# Define the size of each layer in our network\nn_input = features.shape[1] # Number of input units, must match number of input features\nn_hidden = 2 # Number of hidden units \nn_output = 1 # Number of output units\n\n# Weights for inputs to hidden layer\nW1 = torch.randn(n_input, n_hidden)\n# Weights for hidden layer to output layer\nW2 = torch.randn(n_hidden, n_output)\n\n# and bias terms for hidden and output layers\nB1 = torch.randn((1, n_hidden))\nB2 = torch.randn((1, n_output))",
"_____no_output_____"
]
],
[
[
"> **练习:**使用权重 `W1` 和 `W2` 以及偏差 `B1` 和 `B2` 计算此多层网络的输出。",
"_____no_output_____"
]
],
[
[
"## Your solution here",
"_____no_output_____"
]
],
[
[
"如果计算正确,输出应该为 `tensor([[ 0.3171]])`。\n\n隐藏层数量是网络的参数,通常称为**超参数**,以便与权重和偏差参数区分开。稍后当我们讨论如何训练网络时会提到,层级越多,网络越能够从数据中学习规律并作出准确的预测。\n\n## Numpy 和 Torch 相互转换\n\n加分题!PyTorch 可以实现 Numpy 数组和 Torch 张量之间的转换。Numpy 数组转换为张量数据,可以用 `torch.from_numpy()`。张量数据转换为 Numpy 数组,可以用 `.numpy()` 。",
"_____no_output_____"
]
],
[
[
"import numpy as np\na = np.random.rand(4,3)\na",
"_____no_output_____"
],
[
"b = torch.from_numpy(a)\nb",
"_____no_output_____"
],
[
"b.numpy()",
"_____no_output_____"
]
],
[
[
"Numpy 数组与 Torch 张量之间共享内存,因此如果你原地更改一个对象的值,另一个对象的值也会更改。",
"_____no_output_____"
]
],
[
[
"# Multiply PyTorch Tensor by 2, in place\nb.mul_(2)",
"_____no_output_____"
]
],
[
[
"```python\n# Numpy array matches new values from Tensor\na\n```",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a22e97e4907a1fa0d284f245f144d1555f21515
| 96,268 |
ipynb
|
Jupyter Notebook
|
3. Landmark Detection and Tracking.ipynb
|
kkumazaki/Computer-Vision_Project3_Implement_SLAM
|
c7684871bd60eb6f01cebb8d433859e2b3ae9d16
|
[
"MIT"
] | null | null | null |
3. Landmark Detection and Tracking.ipynb
|
kkumazaki/Computer-Vision_Project3_Implement_SLAM
|
c7684871bd60eb6f01cebb8d433859e2b3ae9d16
|
[
"MIT"
] | null | null | null |
3. Landmark Detection and Tracking.ipynb
|
kkumazaki/Computer-Vision_Project3_Implement_SLAM
|
c7684871bd60eb6f01cebb8d433859e2b3ae9d16
|
[
"MIT"
] | null | null | null | 108.532131 | 30,996 | 0.807454 |
[
[
[
"# Project 3: Implement SLAM \n\n---\n\n## Project Overview\n\nIn this project, you'll implement SLAM for robot that moves and senses in a 2 dimensional, grid world!\n\nSLAM gives us a way to both localize a robot and build up a map of its environment as a robot moves and senses in real-time. This is an active area of research in the fields of robotics and autonomous systems. Since this localization and map-building relies on the visual sensing of landmarks, this is a computer vision problem. \n\nUsing what you've learned about robot motion, representations of uncertainty in motion and sensing, and localization techniques, you will be tasked with defining a function, `slam`, which takes in six parameters as input and returns the vector `mu`. \n> `mu` contains the (x,y) coordinate locations of the robot as it moves, and the positions of landmarks that it senses in the world\n\nYou can implement helper functions as you see fit, but your function must return `mu`. The vector, `mu`, should have (x, y) coordinates interlaced, for example, if there were 2 poses and 2 landmarks, `mu` will look like the following, where `P` is the robot position and `L` the landmark position:\n```\nmu = matrix([[Px0],\n [Py0],\n [Px1],\n [Py1],\n [Lx0],\n [Ly0],\n [Lx1],\n [Ly1]])\n```\n\nYou can see that `mu` holds the poses first `(x0, y0), (x1, y1), ...,` then the landmark locations at the end of the matrix; we consider a `nx1` matrix to be a vector.\n\n## Generating an environment\n\nIn a real SLAM problem, you may be given a map that contains information about landmark locations, and in this example, we will make our own data using the `make_data` function, which generates a world grid with landmarks in it and then generates data by placing a robot in that world and moving and sensing over some numer of time steps. The `make_data` function relies on a correct implementation of robot move/sense functions, which, at this point, should be complete and in the `robot_class.py` file. The data is collected as an instantiated robot moves and senses in a world. Your SLAM function will take in this data as input. So, let's first create this data and explore how it represents the movement and sensor measurements that our robot takes.\n\n---",
"_____no_output_____"
],
[
"## Create the world\n\nUse the code below to generate a world of a specified size with randomly generated landmark locations. You can change these parameters and see how your implementation of SLAM responds! \n\n`data` holds the sensors measurements and motion of your robot over time. It stores the measurements as `data[i][0]` and the motion as `data[i][1]`.\n\n#### Helper functions\n\nYou will be working with the `robot` class that may look familiar from the first notebook, \n\nIn fact, in the `helpers.py` file, you can read the details of how data is made with the `make_data` function. It should look very similar to the robot move/sense cycle you've seen in the first notebook.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom helpers import make_data\n\n# your implementation of slam should work with the following inputs\n# feel free to change these input values and see how it responds!\n\n# world parameters\nnum_landmarks = 5 # number of landmarks\nN = 20 # time steps\nworld_size = 100.0 # size of world (square)\n\n# robot parameters\nmeasurement_range = 50.0 # range at which we can sense landmarks\nmotion_noise = 2.0 # noise in robot motion\nmeasurement_noise = 2.0 # noise in the measurements\ndistance = 20.0 # distance by which robot (intends to) move each iteratation \n\n\n# make_data instantiates a robot, AND generates random landmarks for a given world size and number of landmarks\ndata = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)",
" \nLandmarks: [[58, 42], [52, 28], [63, 30], [20, 14], [97, 53]]\nRobot: [x=79.66487 y=77.17812]\n"
]
],
[
[
"### A note on `make_data`\n\nThe function above, `make_data`, takes in so many world and robot motion/sensor parameters because it is responsible for:\n1. Instantiating a robot (using the robot class)\n2. Creating a grid world with landmarks in it\n\n**This function also prints out the true location of landmarks and the *final* robot location, which you should refer back to when you test your implementation of SLAM.**\n\nThe `data` this returns is an array that holds information about **robot sensor measurements** and **robot motion** `(dx, dy)` that is collected over a number of time steps, `N`. You will have to use *only* these readings about motion and measurements to track a robot over time and find the determine the location of the landmarks using SLAM. We only print out the true landmark locations for comparison, later.\n\n\nIn `data` the measurement and motion data can be accessed from the first and second index in the columns of the data array. See the following code for an example, where `i` is the time step:\n```\nmeasurement = data[i][0]\nmotion = data[i][1]\n```\n",
"_____no_output_____"
]
],
[
[
"# print out some stats about the data\ntime_step = 0\n\nprint('Example measurements: \\n', data[time_step][0])\nprint('\\n')\nprint('Example motion: \\n', data[time_step][1])",
"Example measurements: \n [[0, 9.792249547059008, -8.457846271490965], [1, 2.4285521183959418, -22.014279578716124], [2, 13.800732124061383, -18.390960913677905], [3, -31.232496073600046, -37.82686845440225], [4, 48.181623653569474, 1.8115961305305603]]\n\n\nExample motion: \n [13.475031702975194, 14.779158318517787]\n"
]
],
[
[
"Try changing the value of `time_step`, you should see that the list of measurements varies based on what in the world the robot sees after it moves. As you know from the first notebook, the robot can only sense so far and with a certain amount of accuracy in the measure of distance between its location and the location of landmarks. The motion of the robot always is a vector with two values: one for x and one for y displacement. This structure will be useful to keep in mind as you traverse this data in your implementation of slam.",
"_____no_output_____"
],
[
"## Initialize Constraints\n\nOne of the most challenging tasks here will be to create and modify the constraint matrix and vector: omega and xi. In the second notebook, you saw an example of how omega and xi could hold all the values the define the relationships between robot poses `xi` and landmark positions `Li` in a 1D world, as seen below, where omega is the blue matrix and xi is the pink vector.\n\n<img src='images/motion_constraint.png' width=50% height=50% />\n\n\nIn *this* project, you are tasked with implementing constraints for a 2D world. We are referring to robot poses as `Px, Py` and landmark positions as `Lx, Ly`, and one way to approach this challenge is to add *both* x and y locations in the constraint matrices.\n\n<img src='images/constraints2D.png' width=50% height=50% />\n\nYou may also choose to create two of each omega and xi (one for x and one for y positions).",
"_____no_output_____"
],
[
"### TODO: Write a function that initializes omega and xi\n\nComplete the function `initialize_constraints` so that it returns `omega` and `xi` constraints for the starting position of the robot. Any values that we do not yet know should be initialized with the value `0`. You may assume that our robot starts out in exactly the middle of the world with 100% confidence (no motion or measurement noise at this point). The inputs `N` time steps, `num_landmarks`, and `world_size` should give you all the information you need to construct intial constraints of the correct size and starting values.\n\n*Depending on your approach you may choose to return one omega and one xi that hold all (x,y) positions *or* two of each (one for x values and one for y); choose whichever makes most sense to you!*",
"_____no_output_____"
]
],
[
[
"def initialize_constraints(N, num_landmarks, world_size):\n ''' This function takes in a number of time steps N, number of landmarks, and a world_size,\n and returns initialized constraint matrices, omega and xi.'''\n \n ## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable\n \n ## TODO: Define the constraint matrix, Omega, with two initial \"strength\" values\n ## for the initial x, y location of our robot\n size = 2*N + 2*num_landmarks\n center = world_size/2.0\n \n omega = np.zeros((size,size))\n omega[0][0] = 1\n omega[1][1] = 1\n \n ## TODO: Define the constraint *vector*, xi\n ## you can assume that the robot starts out in the middle of the world with 100% confidence\n xi = np.zeros((size,1))\n xi[0] = center\n xi[1] = center\n \n return omega, xi\n ",
"_____no_output_____"
]
],
[
[
"### Test as you go\n\nIt's good practice to test out your code, as you go. Since `slam` relies on creating and updating constraint matrices, `omega` and `xi` to account for robot sensor measurements and motion, let's check that they initialize as expected for any given parameters.\n\nBelow, you'll find some test code that allows you to visualize the results of your function `initialize_constraints`. We are using the [seaborn](https://seaborn.pydata.org/) library for visualization.\n\n**Please change the test values of N, landmarks, and world_size and see the results**. Be careful not to use these values as input into your final smal function.\n\nThis code assumes that you have created one of each constraint: `omega` and `xi`, but you can change and add to this code, accordingly. The constraints should vary in size with the number of time steps and landmarks as these values affect the number of poses a robot will take `(Px0,Py0,...Pxn,Pyn)` and landmark locations `(Lx0,Ly0,...Lxn,Lyn)` whose relationships should be tracked in the constraint matrices. Recall that `omega` holds the weights of each variable and `xi` holds the value of the sum of these variables, as seen in Notebook 2. You'll need the `world_size` to determine the starting pose of the robot in the world and fill in the initial values for `xi`.",
"_____no_output_____"
]
],
[
[
"# import data viz resources\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame\nimport seaborn as sns\n%matplotlib inline",
"_____no_output_____"
],
[
"# define a small N and world_size (small for ease of visualization)\nN_test = 5\nnum_landmarks_test = 2\nsmall_world = 10\n\n# initialize the constraints\ninitial_omega, initial_xi = initialize_constraints(N_test, num_landmarks_test, small_world)",
"_____no_output_____"
],
[
"# define figure size\nplt.rcParams[\"figure.figsize\"] = (10,7)\n\n# display omega\nsns.heatmap(DataFrame(initial_omega), cmap='Blues', annot=True, linewidths=.5)",
"_____no_output_____"
],
[
"# define figure size\nplt.rcParams[\"figure.figsize\"] = (1,7)\n\n# display xi\nsns.heatmap(DataFrame(initial_xi), cmap='Oranges', annot=True, linewidths=.5)",
"_____no_output_____"
]
],
[
[
"---\n## SLAM inputs \n\nIn addition to `data`, your slam function will also take in:\n* N - The number of time steps that a robot will be moving and sensing\n* num_landmarks - The number of landmarks in the world\n* world_size - The size (w/h) of your world\n* motion_noise - The noise associated with motion; the update confidence for motion should be `1.0/motion_noise`\n* measurement_noise - The noise associated with measurement/sensing; the update weight for measurement should be `1.0/measurement_noise`\n\n#### A note on noise\n\nRecall that `omega` holds the relative \"strengths\" or weights for each position variable, and you can update these weights by accessing the correct index in omega `omega[row][col]` and *adding/subtracting* `1.0/noise` where `noise` is measurement or motion noise. `Xi` holds actual position values, and so to update `xi` you'll do a similar addition process only using the actual value of a motion or measurement. So for a vector index `xi[row][0]` you will end up adding/subtracting one measurement or motion divided by their respective `noise`.\n\n### TODO: Implement Graph SLAM\n\nFollow the TODO's below to help you complete this slam implementation (these TODO's are in the recommended order), then test out your implementation! \n\n#### Updating with motion and measurements\n\nWith a 2D omega and xi structure as shown above (in earlier cells), you'll have to be mindful about how you update the values in these constraint matrices to account for motion and measurement constraints in the x and y directions. Recall that the solution to these matrices (which holds all values for robot poses `P` and landmark locations `L`) is the vector, `mu`, which can be computed at the end of the construction of omega and xi as the inverse of omega times xi: $\\mu = \\Omega^{-1}\\xi$\n\n**You may also choose to return the values of `omega` and `xi` if you want to visualize their final state!**",
"_____no_output_____"
]
],
[
[
"## TODO: Complete the code to implement SLAM\n\n## slam takes in 6 arguments and returns mu, \n## mu is the entire path traversed by a robot (all x,y poses) *and* all landmarks locations\ndef slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise):\n \n ## TODO: Use your initilization to create constraint matrices, omega and xi\n omega, xi = initialize_constraints(N, num_landmarks, world_size)\n \n print(\"N:\" + str(N))\n print(\"num_landmarks: \" + str(num_landmarks))\n \n ## TODO: Iterate through each time step in the data\n ## get all the motion and measurement data as you iterate\n measurements = []\n motions = []\n \n for i in range(len(data)):\n measurements.append(data[i][0])\n motions.append(data[i][1])\n \n ## TODO: update the constraint matrix/vector to account for all *measurements*\n ## this should be a series of additions that take into account the measurement noise\n k = [] # length of the measurements at each t (0 <= t < N)\n \n #print(\"measurements: \" + str(len(measurements)))\n #print(\"motions: \" + str(len(motions)))\n \n #for t in range(N): # it will overshoot the length of measurements\n for t in range(len(measurements)):\n # length of the measurements\n k_i = len(measurements[t])\n k.append(k_i)\n \n #print(\"t: \" + str(t) + \", k_i: \" + str(k_i)) \n \n for i in range(k_i):\n index = measurements[t][i][0]\n #index = measurements\n \n # add value to Pxi, Pyi in Omega Matrix\n omega[2*t][2*t] += 1/measurement_noise\n omega[2*t+1][2*t+1] += 1/measurement_noise\n \n # add value to Lx_index, Ly_index in Omega Matrix\n omega[2*t ][2*N + 2*index ] -= 1/measurement_noise\n omega[2*t+1][2*N + 2*index+1] -= 1/measurement_noise\n omega[2*N + 2*index ][2*t ] -= 1/measurement_noise\n omega[2*N + 2*index+1][2*t+1] -= 1/measurement_noise\n omega[2*N + 2*index ][2*N + 2*index ] += 1/measurement_noise\n omega[2*N + 2*index+1][2*N + 2*index+1] += 1/measurement_noise\n \n # add value to Pxi, Pyi in Xi Vector\n xi[2*t ] -= measurements[t][i][1]/measurement_noise\n xi[2*t+1] -= measurements[t][i][2]/measurement_noise\n \n # add value to Lx_index, Ly_index in Xi Vector\n xi[2*N + 2*index ] += measurements[t][i][1]/measurement_noise\n xi[2*N + 2*index+1] += measurements[t][i][2]/measurement_noise\n \n ## TODO: update the constraint matrix/vector to account for all *motion* and motion noise\n #for t in range(N): # it will overshoot the length of measurements\n for t in range(len(motions)):\n # add value to Pxi, Pyi in Omega Matrix\n omega[2*t ][2*t ] += 1/motion_noise\n omega[2*t+1][2*t+1] += 1/motion_noise\n omega[2*t+2][2*t+2] += 1/motion_noise\n omega[2*t+3][2*t+3] += 1/motion_noise\n omega[2*t ][2*t+2] -= 1/motion_noise\n omega[2*t+1][2*t+3] -= 1/motion_noise\n omega[2*t+2][2*t ] -= 1/motion_noise\n omega[2*t+3][2*t+1] -= 1/motion_noise\n \n # add value to Pxi, Pyi in Xi Vector\n xi[2*t ] -= motions[t][0]\n xi[2*t+1] -= motions[t][1]\n xi[2*t+2] += motions[t][0]\n xi[2*t+3] += motions[t][1]\n \n ## TODO: After iterating through all the data\n ## Compute the best estimate of poses and landmark positions\n ## using the formula, omega_inverse * Xi\n omega_inv = np.linalg.inv(np.matrix(omega))\n mu = np.dot(omega_inv, xi)\n \n return mu # return `mu`\n",
"_____no_output_____"
]
],
[
[
"## Helper functions\n\nTo check that your implementation of SLAM works for various inputs, we have provided two helper functions that will help display the estimated pose and landmark locations that your function has produced. First, given a result `mu` and number of time steps, `N`, we define a function that extracts the poses and landmarks locations and returns those as their own, separate lists. \n\nThen, we define a function that nicely print out these lists; both of these we will call, in the next step.\n",
"_____no_output_____"
]
],
[
[
"# a helper function that creates a list of poses and of landmarks for ease of printing\n# this only works for the suggested constraint architecture of interlaced x,y poses\ndef get_poses_landmarks(mu, N):\n # create a list of poses\n poses = []\n for i in range(N):\n poses.append((mu[2*i].item(), mu[2*i+1].item()))\n\n # create a list of landmarks\n landmarks = []\n for i in range(num_landmarks):\n landmarks.append((mu[2*(N+i)].item(), mu[2*(N+i)+1].item()))\n\n # return completed lists\n return poses, landmarks\n",
"_____no_output_____"
],
[
"def print_all(poses, landmarks):\n print('\\n')\n print('Estimated Poses:')\n for i in range(len(poses)):\n print('['+', '.join('%.3f'%p for p in poses[i])+']')\n print('\\n')\n print('Estimated Landmarks:')\n for i in range(len(landmarks)):\n print('['+', '.join('%.3f'%l for l in landmarks[i])+']')\n",
"_____no_output_____"
]
],
[
[
"## Run SLAM\n\nOnce you've completed your implementation of `slam`, see what `mu` it returns for different world sizes and different landmarks!\n\n### What to Expect\n\nThe `data` that is generated is random, but you did specify the number, `N`, or time steps that the robot was expected to move and the `num_landmarks` in the world (which your implementation of `slam` should see and estimate a position for. Your robot should also start with an estimated pose in the very center of your square world, whose size is defined by `world_size`.\n\nWith these values in mind, you should expect to see a result that displays two lists:\n1. **Estimated poses**, a list of (x, y) pairs that is exactly `N` in length since this is how many motions your robot has taken. The very first pose should be the center of your world, i.e. `[50.000, 50.000]` for a world that is 100.0 in square size.\n2. **Estimated landmarks**, a list of landmark positions (x, y) that is exactly `num_landmarks` in length. \n\n#### Landmark Locations\n\nIf you refer back to the printout of *exact* landmark locations when this data was created, you should see values that are very similar to those coordinates, but not quite (since `slam` must account for noise in motion and measurement).",
"_____no_output_____"
]
],
[
[
"# call your implementation of slam, passing in the necessary parameters\nmu = slam(data, N, num_landmarks, world_size, 1, 1)\n#mu = slam(data, N, num_landmarks, world_size, motion_noise, measurement_noise)\n\n# print out the resulting landmarks and poses\nif(mu is not None):\n # get the lists of poses and landmarks\n # and print them out\n poses, landmarks = get_poses_landmarks(mu, N)\n print_all(poses, landmarks)",
"N:20\nnum_landmarks: 5\n\n\nEstimated Poses:\n[50.000, 50.000]\n[63.571, 65.072]\n[76.108, 78.143]\n[89.790, 91.866]\n[99.792, 73.155]\n[81.194, 81.277]\n[62.333, 87.114]\n[43.680, 93.670]\n[28.334, 80.026]\n[12.987, 66.382]\n[23.758, 82.438]\n[34.528, 98.495]\n[22.334, 81.841]\n[10.140, 65.187]\n[25.530, 77.141]\n[40.763, 90.858]\n[59.949, 86.935]\n[78.508, 83.714]\n[98.023, 80.099]\n[78.238, 77.175]\n\n\nEstimated Landmarks:\n[58.782, 42.321]\n[53.187, 27.714]\n[63.788, 30.125]\n[18.768, 12.173]\n[98.351, 52.495]\n"
]
],
[
[
"## Visualize the constructed world\n\nFinally, using the `display_world` code from the `helpers.py` file (which was also used in the first notebook), we can actually visualize what you have coded with `slam`: the final position of the robot and the positon of landmarks, created from only motion and measurement data!\n\n**Note that these should be very similar to the printed *true* landmark locations and final pose from our call to `make_data` early in this notebook.**",
"_____no_output_____"
]
],
[
[
"# import the helper function\nfrom helpers import display_world\n\n# Display the final world!\n\n# define figure size\nplt.rcParams[\"figure.figsize\"] = (20,20)\n\n# check if poses has been created\nif 'poses' in locals():\n # print out the last pose\n print('Last pose: ', poses[-1])\n # display the last position of the robot *and* the landmark positions\n display_world(int(world_size), poses[-1], landmarks)",
"Last pose: (83.46796706269217, 41.09486088960657)\n"
]
],
[
[
"### Question: How far away is your final pose (as estimated by `slam`) compared to the *true* final pose? Why do you think these poses are different?\n\nYou can find the true value of the final pose in one of the first cells where `make_data` was called. You may also want to look at the true landmark locations and compare them to those that were estimated by `slam`. Ask yourself: what do you think would happen if we moved and sensed more (increased N)? Or if we had lower/higher noise parameters.",
"_____no_output_____"
],
[
"**Answer**:\nThe true values are following:\nRobot: [x=79.66487 y=77.17812]\nLandmarks: [[58, 42], [52, 28], [63, 30], [20, 14], [97, 53]]\n\nThe result of calculated mu are following:\n[74.730, 76.280]\n[59.521, 46.661], [54.547, 29.230], [65.149, 31.642], [18.768, 12.173], [105.645, 56.867]\n\nI think I had a good solution.\nWhen I set measurement_noise = motion_noise = 1, the results became following:\n[78.238, 77.175]\n[58.782, 42.321], [53.187, 27.714], [63.788, 30.125], [18.768, 12.173], [98.351, 52.495]\n\nI found that the results gets closer to the true values if I lower the noise parameters.",
"_____no_output_____"
],
[
"## Testing\n\nTo confirm that your slam code works before submitting your project, it is suggested that you run it on some test data and cases. A few such cases have been provided for you, in the cells below. When you are ready, uncomment the test cases in the next cells (there are two test cases, total); your output should be **close-to or exactly** identical to the given results. If there are minor discrepancies it could be a matter of floating point accuracy or in the calculation of the inverse matrix.\n\n### Submit your project\n\nIf you pass these tests, it is a good indication that your project will pass all the specifications in the project rubric. Follow the submission instructions to officially submit!",
"_____no_output_____"
]
],
[
[
"# Here is the data and estimated outputs for test case 1\n\ntest_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]\n\n## Test Case 1\n##\n# Estimated Pose(s):\n# [50.000, 50.000]\n# [37.858, 33.921]\n# [25.905, 18.268]\n# [13.524, 2.224]\n# [27.912, 16.886]\n# [42.250, 30.994]\n# [55.992, 44.886]\n# [70.749, 59.867]\n# [85.371, 75.230]\n# [73.831, 92.354]\n# [53.406, 96.465]\n# [34.370, 100.134]\n# [48.346, 83.952]\n# [60.494, 68.338]\n# [73.648, 53.082]\n# [86.733, 38.197]\n# [79.983, 20.324]\n# [72.515, 2.837]\n# [54.993, 13.221]\n# [37.164, 22.283]\n\n\n# Estimated Landmarks:\n# [82.679, 13.435]\n# [70.417, 74.203]\n# [36.688, 61.431]\n# [18.705, 66.136]\n# [20.437, 16.983]\n\n\n### Uncomment the following three lines for test case 1 and compare the output to the values above ###\n\nmu_1 = slam(test_data1, 20, 5, 100.0, 1.0, 1.0)\n#mu_1 = slam(test_data1, 20, 5, 100.0, 2.0, 2.0)\nposes, landmarks = get_poses_landmarks(mu_1, 20)\nprint_all(poses, landmarks)",
"N:20\nnum_landmarks: 5\n\n\nEstimated Poses:\n[50.000, 50.000]\n[37.973, 33.652]\n[26.185, 18.155]\n[13.745, 2.116]\n[28.097, 16.783]\n[42.384, 30.902]\n[55.831, 44.497]\n[70.857, 59.699]\n[85.697, 75.543]\n[74.011, 92.434]\n[53.544, 96.454]\n[34.525, 100.080]\n[48.623, 83.953]\n[60.197, 68.107]\n[73.778, 52.935]\n[87.132, 38.538]\n[80.303, 20.508]\n[72.798, 2.945]\n[55.245, 13.255]\n[37.416, 22.317]\n\n\nEstimated Landmarks:\n[82.956, 13.539]\n[70.495, 74.141]\n[36.740, 61.281]\n[18.698, 66.060]\n[20.635, 16.875]\n"
],
[
"# Here is the data and estimated outputs for test case 2\n\ntest_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]] \n\n\n## Test Case 2\n##\n# Estimated Pose(s):\n# [50.000, 50.000]\n# [69.035, 45.061]\n# [87.655, 38.971]\n# [76.084, 55.541]\n# [64.283, 71.684]\n# [52.396, 87.887]\n# [44.674, 68.948]\n# [37.532, 49.680]\n# [31.392, 30.893]\n# [24.796, 12.012]\n# [33.641, 26.440]\n# [43.858, 43.560]\n# [54.735, 60.659]\n# [65.884, 77.791]\n# [77.413, 94.554]\n# [96.740, 98.020]\n# [76.149, 99.586]\n# [70.211, 80.580]\n# [64.130, 61.270]\n# [58.183, 42.175]\n\n\n# Estimated Landmarks:\n# [76.777, 42.415]\n# [85.109, 76.850]\n# [13.687, 95.386]\n# [59.488, 39.149]\n# [69.283, 93.654]\n\n\n### Uncomment the following three lines for test case 2 and compare to the values above ###\n\nmu_2 = slam(test_data2, 20, 5, 100.0, 1.0, 1.0)\n#mu_2 = slam(test_data2, 20, 5, 100.0, 2.0, 2.0)\nposes, landmarks = get_poses_landmarks(mu_2, 20)\nprint_all(poses, landmarks)\n",
"N:20\nnum_landmarks: 5\n\n\nEstimated Poses:\n[50.000, 50.000]\n[69.181, 45.665]\n[87.743, 39.703]\n[76.270, 56.311]\n[64.317, 72.176]\n[52.257, 88.154]\n[44.059, 69.401]\n[37.002, 49.918]\n[30.924, 30.955]\n[23.508, 11.419]\n[34.180, 27.133]\n[44.155, 43.846]\n[54.806, 60.920]\n[65.698, 78.546]\n[77.468, 95.626]\n[96.802, 98.821]\n[75.957, 99.971]\n[70.200, 81.181]\n[64.054, 61.723]\n[58.107, 42.628]\n\n\nEstimated Landmarks:\n[76.779, 42.887]\n[85.065, 77.438]\n[13.548, 95.652]\n[59.449, 39.595]\n[69.263, 94.240]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.