hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4a6688c6460d0f0a43b040388269d89203592d0b
56,336
ipynb
Jupyter Notebook
Graph_ADT.ipynb
lucabianco78/sciproAlgo2020
d4653489e2305bd7eb45d3dcb98fc1a3fed17e0e
[ "Apache-2.0" ]
null
null
null
Graph_ADT.ipynb
lucabianco78/sciproAlgo2020
d4653489e2305bd7eb45d3dcb98fc1a3fed17e0e
[ "Apache-2.0" ]
null
null
null
Graph_ADT.ipynb
lucabianco78/sciproAlgo2020
d4653489e2305bd7eb45d3dcb98fc1a3fed17e0e
[ "Apache-2.0" ]
1
2020-10-17T15:31:32.000Z
2020-10-17T15:31:32.000Z
32.526559
600
0.395928
[ [ [ "### Code to implement Graphs\n\n\n", "_____no_output_____" ] ], [ [ "class DiGraphAsAdjacencyMatrix:\n def __init__(self):\n #would be better a set, but I need an index\n self.__nodes = list()\n self.__matrix = list()\n \n def __len__(self):\n \"\"\"gets the number of nodes\"\"\"\n return len(self.__nodes)\n \n def nodes(self):\n return self.__nodes\n \n def matrix(self):\n return self.__matrix\n \n def __str__(self):\n header = \"\\t\".join([n for n in self.__nodes])\n data = \"\"\n for i in range(0,len(self.__matrix)):\n data += str(self.__nodes[i]) + \"\\t\" \n data += \"\\t\".join([str(x) for x in self.__matrix[i]]) + \"\\n\"\n\n return \"\\t\"+ header +\"\\n\" + data\n \n def insertNode(self, node):\n #add the node if not there.\n if node not in self.__nodes:\n self.__nodes.append(node)\n #add a row and a column of zeros in the matrix\n if len(self.__matrix) == 0:\n #first node\n self.__matrix = [[0]]\n else:\n N = len(self.__nodes)\n for row in self.__matrix:\n row.append(0)\n self.__matrix.append([0 for x in range(N)])\n \n def insertEdge(self, node1, node2, weight):\n i = -1\n j = -1\n if node1 in self.__nodes:\n i = self.__nodes.index(node1)\n if node2 in self.__nodes:\n j = self.__nodes.index(node2)\n if i != -1 and j != -1:\n self.__matrix[i][j] = weight\n \n def deleteEdge(self, node1,node2):\n \"\"\"removing an edge means to set its\n corresponding place in the matrix to 0\"\"\"\n i = -1\n j = -1\n if node1 in self.__nodes:\n i = self.__nodes.index(node1)\n if node2 in self.__nodes:\n j = self.__nodes.index(node2)\n if i != -1 and j != -1:\n self.__matrix[i][j] = 0\n \n def deleteNode(self, node):\n \"\"\"removing a node means removing\n its corresponding row and column in the matrix\"\"\"\n i = -1\n\n if node in self.__nodes:\n i = self.__nodes.index(node)\n #print(\"Removing {} at index {}\".format(node, i))\n if node != -1:\n self.__matrix.pop(i)\n for row in self.__matrix:\n row.pop(i)\n self.__nodes.pop(i)\n \n def adjacent(self, node, incoming = True):\n \"\"\"Your treat! (see exercise 1)\"\"\"\n \n def edges(self):\n \"\"\"Your treat! (see exercise1). Returns all the edges\"\"\"\n \nif __name__ == \"__main__\":\n G = DiGraphAsAdjacencyMatrix()\n \n for i in range(6):\n n = \"Node_{}\".format(i+1)\n G.insertNode(n)\n\n for i in range(0,4):\n n = \"Node_\" + str(i+1)\n six = \"Node_6\"\n n_plus = \"Node_\" + str((i+2) % 6)\n G.insertEdge(n, n_plus,0.5)\n G.insertEdge(n, six,1)\n G.insertEdge(\"Node_5\", \"Node_1\", 0.5)\n G.insertEdge(\"Node_5\", \"Node_6\", 1)\n G.insertEdge(\"Node_6\", \"Node_6\", 1)\n print(G)\n \n print(\"Nodes:\")\n print(G.nodes())\n print(\"Matrix:\")\n print(G.matrix())\n \n G.insertNode(\"Node_7\")\n G.insertEdge(\"Node_1\", \"Node_7\", -1)\n G.insertEdge(\"Node_2\", \"Node_7\", -2)\n G.insertEdge(\"Node_5\", \"Node_7\", -5)\n G.insertEdge(\"Node_7\", \"Node_2\", -2)\n G.insertEdge(\"Node_7\", \"Node_3\", -3)\n \n print(\"Size is: {}\".format(len(G)))\n print(\"Nodes: {}\".format(G.nodes()))\n print(\"\\nMatrix:\")\n print(G)\n G.deleteNode(\"Node_7\")\n G.deleteEdge(\"Node_6\", \"Node_2\")\n #no effect, nodes do not exist!\n G.insertEdge(\"72\", \"25\",3)\n print(G)", "\tNode_1\tNode_2\tNode_3\tNode_4\tNode_5\tNode_6\nNode_1\t0\t0.5\t0\t0\t0\t1\nNode_2\t0\t0\t0.5\t0\t0\t1\nNode_3\t0\t0\t0\t0.5\t0\t1\nNode_4\t0\t0\t0\t0\t0.5\t1\nNode_5\t0.5\t0\t0\t0\t0\t1\nNode_6\t0\t0\t0\t0\t0\t1\n\nNodes:\n['Node_1', 'Node_2', 'Node_3', 'Node_4', 'Node_5', 'Node_6']\nMatrix:\n[[0, 0.5, 0, 0, 0, 1], [0, 0, 0.5, 0, 0, 1], [0, 0, 0, 0.5, 0, 1], [0, 0, 0, 0, 0.5, 1], [0.5, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]]\nSize is: 7\nNodes: ['Node_1', 'Node_2', 'Node_3', 'Node_4', 'Node_5', 'Node_6', 'Node_7']\n\nMatrix:\n\tNode_1\tNode_2\tNode_3\tNode_4\tNode_5\tNode_6\tNode_7\nNode_1\t0\t0.5\t0\t0\t0\t1\t-1\nNode_2\t0\t0\t0.5\t0\t0\t1\t-2\nNode_3\t0\t0\t0\t0.5\t0\t1\t0\nNode_4\t0\t0\t0\t0\t0.5\t1\t0\nNode_5\t0.5\t0\t0\t0\t0\t1\t-5\nNode_6\t0\t0\t0\t0\t0\t1\t0\nNode_7\t0\t-2\t-3\t0\t0\t0\t0\n\n\tNode_1\tNode_2\tNode_3\tNode_4\tNode_5\tNode_6\nNode_1\t0\t0.5\t0\t0\t0\t1\nNode_2\t0\t0\t0.5\t0\t0\t1\nNode_3\t0\t0\t0\t0.5\t0\t1\nNode_4\t0\t0\t0\t0\t0.5\t1\nNode_5\t0.5\t0\t0\t0\t0\t1\nNode_6\t0\t0\t0\t0\t0\t1\n\n" ], [ "class DiGraphAsAdjacencyMatrix:\n def __init__(self):\n #would be better a set, but I need an index\n self.__nodes = list()\n self.__matrix = list()\n \n def __len__(self):\n \"\"\"gets the number of nodes\"\"\"\n return len(self.__nodes)\n \n def nodes(self):\n return self.__nodes\n \n def matrix(self):\n return self.__matrix\n \n def __str__(self):\n #TODO\n pass\n \n def insertNode(self, node):\n #TODO\n pass\n \n def insertEdge(self, node1, node2, weight):\n #TODO \n pass\n \n def deleteEdge(self, node1,node2):\n \"\"\"removing an edge means to set its\n corresponding place in the matrix to 0\"\"\"\n #TODO\n pass\n \n def deleteNode(self, node):\n \"\"\"removing a node means removing\n its corresponding row and column in the matrix\"\"\"\n #TODO\n pass\n \n def adjacent(self, node, incoming = True):\n #TODO\n pass\n \n def edges(self):\n #TODO\n pass\n \n ", "_____no_output_____" ] ], [ [ "In this implementation of a directed weighted graph, we use a dictionary to store the data.", "_____no_output_____" ] ], [ [ "class Graph:\n \n # initializer, nodes are private!\n def __init__(self):\n self.__nodes = dict()\n \n #returns the size of the Graph\n #accessible through len(Graph)\n def __len__(self):\n return len(self.__nodes)\n \n #returns the nodes\n def V(self):\n return self.__nodes.keys()\n \n #a generator of nodes to access all of them \n #once (not a very useful example!)\n def node_iterator(self):\n for n in self.__nodes.keys():\n yield n\n \n #a generator of edges (as triplets (u,v,w)) to access all of them\n def edge_iterator(self):\n for u in self.__nodes:\n for v in self.__nodes[u]:\n yield (u,v,self.__nodes[u][v])\n \n #returns all the adjacent nodes of node\n #as a dictionary with key as the other node\n #and value the weight\n def adj(self,node):\n if node in self.__nodes.keys():\n return self.__nodes[node]\n \n #adds the node to the graph\n def insert_node(self, node):\n if node not in self.__nodes:\n self.__nodes[node] = dict()\n \n #adds the edge startN --> endN with weight w\n #that has 0 as default\n def insert_edge(self, startN, endN, w = 0):\n #does nothing if already in\n self.insert_node(startN)\n self.insert_node(endN)\n self.__nodes[startN][endN] = w\n \n #converts the graph into a string\n def __str__(self):\n out_str = \"Nodes:\\n\" + \",\".join(self.__nodes)\n out_str +=\"\\nEdges:\\n\"\n for u in self.__nodes:\n for v in self.__nodes[u]:\n out_str +=\"{} --{}--> {}\\n\".format(u,self.__nodes[u][v],v )\n if len(self.__nodes[u]) == 0:\n out_str +=\"{}\\n\".format(u)\n return out_str\n \nif __name__ == \"__main__\":\n G = Graph()\n for u,v in [ ('a', 'b'), ('a', 'd'), ('b', 'c'),\n ('d', 'a'), ('d', 'c'), ('d', 'e'), ('e', 'c') ]:\n G.insert_edge(u,v)\n for edge in G.edge_iterator():\n print(\"{} --{}--> {}\".format(edge[0],\n edge[1],\n edge[2]))\n G.insert_node('f')\n print(\"\\nG has {} nodes:\".format(len(G)))\n for node in G.node_iterator():\n print(\"{}\".format(node), end= \" \")\n print(\"\")\n print(G)\n print(\"Nodes adjacent to 'd': {}\".format(G.adj('d')))\n print(\"\\nNodes adjacent to 'c': {}\".format(G.adj('c')))\n", "a --b--> 0\na --d--> 0\nb --c--> 0\nd --a--> 0\nd --c--> 0\nd --e--> 0\ne --c--> 0\n\nG has 6 nodes:\na b d c e f \nNodes:\na,b,d,c,e,f\nEdges:\na --0--> b\na --0--> d\nb --0--> c\nd --0--> a\nd --0--> c\nd --0--> e\nc\ne --0--> c\nf\n\nNodes adjacent to 'd': {'a': 0, 'c': 0, 'e': 0}\n\nNodes adjacent to 'c': {}\n" ], [ "\n\nfor node in G.V():\n #do something with the node\n \n \nfor u in G.V():\n #for all starting nodes u\n for v in G.adj(u): \n #for all ending nodes v\n #do something with (u,v)\n \n \nfor node in G.node_iterator():\n #do something with the node\n \nfor edge in G.edge_iterator():\n #do something with the edge\n \n ", "_____no_output_____" ], [ "#######\n## WARNING WRONG CODE!!!!\n#######\nfrom collections import deque()\n\ndef BFS(node):\n Q = deque()\n if node != None:\n Q.append(node)\n \n while len(Q) > 0:\n curNode = Q.popleft()\n if curNode != None:\n print(\"{}\".format(curNode))\n for v in G.adj(curNode):\n Q.append(v)\n ", "_____no_output_____" ] ], [ [ "## BFS search", "_____no_output_____" ] ], [ [ "\"CODE NOT SHOWN\"\n\n#Drawing a graph in pygraphviz\nimport pygraphviz as pgv\n\nG=pgv.AGraph(directed=True)\n#for u in 'abcdefghkj':\n# G.add_node(u, color = 'black')\n \n#for u,v,c in [('a', 'c', 'black'), ('a', 'f','red'), ('a', 'e','black'), ('c', 'b','black'), ('c', 'd','black'),\n# ('b', 'f','black'), ('d','f','black'),('d','g','black'), ('f','g','red'),('g','j', 'red'), \n# ('e','h','black'), ('h','j','black'), ('k','l','black'), ('d', 'b','black'), ('j','a','blue'), \n# ('g','b','black'), ('j','d','black')]:\n#for u, v,c in [('a', 'b','black'), ('b', 'a','black'), ('b', 'c','black'), ('c', 'b','black'), ('c', 'd','black'),\n# ('d', 'c','black'), ('d','b','black'),('b','d','black'), ('d','a','black'),('a','d','black'), ('e','g','black'),\n# ('g','e','black'), ('e','f','black'), ('f', 'e','black'), ('f','h','black'), ('h','f','black'), ('h','g','black'),\n# ('g','h','black'),('h','i','black'),('i','h','black'),('f','i','black'),('i','f','black'), ('j','k','black'),('k','j','black')]:\n\nfor u,v,c in [('a','b', 'black'), ('a','c','black'), ('a','e', 'black'),\n ('c','e','black'), ('b','d','black')]:#, ('e','b', 'black')]:\n G.add_edge(u, v, color=c)\n \n print(u,v,c)\n G.add_edge(u, v, color=c)\n\n\n\n\n# write to a dot file\n#G.write('test.dot')\n\n#create a png file\nG.layout(prog='fdp') # use dot\nG.draw('test_top_sort.png')", "a b black\na c black\na e black\nc e black\nb d black\n" ] ], [ [ "### DFS iterative in post-order ", "_____no_output_____" ] ], [ [ "%reset -s -f \n\nfrom collections import deque\nimport math\n\nclass Graph:\n \n # initializer, nodes are private!\n def __init__(self):\n self.__nodes = dict()\n \n #returns the size of the Graph\n #accessible through len(Graph)\n def __len__(self):\n return len(self.__nodes)\n \n #returns the nodes\n def V(self):\n return self.__nodes.keys()\n \n #a generator of nodes to access all of them \n #once (not a very useful example!)\n def node_iterator(self):\n for n in self.__nodes.keys():\n yield n\n \n #a generator of edges (as triplets (u,v,w)) to access all of them\n def edge_iterator(self):\n for u in self.__nodes:\n for v in self.__nodes[u]:\n yield (u,v,self.__nodes[u][v])\n \n #returns all the adjacent nodes of node\n #as a dictionary with key as the other node\n #and value the weight\n def adj(self,node):\n if node in self.__nodes.keys():\n return self.__nodes[node]\n \n #adds the node to the graph\n def insert_node(self, node):\n if node not in self.__nodes:\n self.__nodes[node] = dict()\n \n #adds the edge startN --> endN with weight w\n #that has 0 as default\n def insert_edge(self, startN, endN, w = 0):\n #does nothing if already in\n self.insert_node(startN)\n self.insert_node(endN)\n self.__nodes[startN][endN] = w\n \n #converts the graph into a string\n def __str__(self):\n out_str = \"Nodes:\\n\" + \",\".join(self.__nodes)\n out_str +=\"\\nEdges:\\n\"\n for u in self.__nodes:\n for v in self.__nodes[u]:\n out_str +=\"{} --{}--> {}\\n\".format(u,self.__nodes[u][v],v )\n if len(self.__nodes[u]) == 0:\n out_str +=\"{}\\n\".format(u)\n return out_str\n \n \n\n \n def DFS_rec(self, node, visited):\n visited.add(node)\n ## visit node (preorder)\n print(\"visiting: {}\".format(node))\n for u in self.adj(node):\n if u not in visited:\n self.DFS_rec(u, visited) \n ##visit node (post-order)\n \n \n def DFS(self, root):\n #stack implemented as deque\n S = deque()\n S.append(root)\n visited = set()\n while len(S) > 0:\n node = S.pop()\n if not node in visited:\n #visit node in preorder\n print(\"visiting {}\".format(node))\n visited.add(node)\n for n in self.adj(node):\n #visit edge (node,n)\n S.append(n)\n \n # Idea:\n # when we find a node we add it to the stack with tag \"discovery\"\n # if extracted with tag discovery, it is pushed back with tag \"finish\" and all its neighbors \n # are added \n # When it is extracted with tag finish the post visit is done\n def DFS_postorder(self, root):\n #stack implemented as deque\n S = deque()\n S.append((root, \"discovery\"))\n visited = set()\n discovered = set()\n discovered.add(root)\n cnt = 0\n while len(S) > 0:\n node,tag = S.pop()\n #print(\"{} {}\".format(node, tag))\n if not node in visited:\n if tag == \"discovery\":\n S.append((node, \"finished\"))\n for n in self.adj(node):\n if n not in discovered:\n S.append((n, \"discovery\"))\n discovered.add(n)\n else:\n #visit the node in postorder:\n visited.add(node)\n print(\"visiting {}\".format(node))\n \n\n\nif __name__ == \"__main__\": \n G2 = Graph()\n for u, v in [('a', 'c'), ('a', 'f'), ('a', 'e'), ('c', 'b'), ('c', 'd'),\n ('b', 'f'), ('d','f'),('d','g'), ('f','g'),('g','j'), ('e','h'),\n ('h','j'), ('k','l'), ('d', 'b'), ('j','a'), ('g','b'), ('j','d')]:\n G2.insert_edge(u,v)\n G2.DFS('a')\n print(\"DFS from {}\")\n visited = set()\n G2.DFS_rec('a', visited)\n \n print(\"\\nPostorder:\")\n G2.DFS_postorder('a')", "visiting a\nvisiting e\nvisiting h\nvisiting j\nvisiting d\nvisiting b\nvisiting f\nvisiting g\nvisiting c\nDFS from a:\nvisiting: a\nvisiting: c\nvisiting: b\nvisiting: f\nvisiting: g\nvisiting: j\nvisiting: d\nvisiting: e\nvisiting: h\n\nPostorder:\nvisiting b\nvisiting g\nvisiting d\nvisiting j\nvisiting h\nvisiting e\nvisiting f\nvisiting c\nvisiting a\n" ] ], [ [ "![](test_graph_dist.png)\n\n![](test_CC.png)\n\n![](test_top_sort.png)\n\n![](test_top_sort2.png)", "_____no_output_____" ] ], [ [ "%reset -s -f \n\nfrom collections import deque\nimport math\n\nclass Graph:\n \n # initializer, nodes are private!\n def __init__(self):\n self.__nodes = dict()\n \n #returns the size of the Graph\n #accessible through len(Graph)\n def __len__(self):\n return len(self.__nodes)\n \n #returns the nodes\n def V(self):\n return self.__nodes.keys()\n \n #a generator of nodes to access all of them \n #once (not a very useful example!)\n def node_iterator(self):\n for n in self.__nodes.keys():\n yield n\n \n #a generator of edges (as triplets (u,v,w)) to access all of them\n def edge_iterator(self):\n for u in self.__nodes:\n for v in self.__nodes[u]:\n yield (u,v,self.__nodes[u][v])\n \n #returns all the adjacent nodes of node\n #as a dictionary with key as the other node\n #and value the weight\n def adj(self,node):\n if node in self.__nodes.keys():\n return self.__nodes[node]\n \n #adds the node to the graph\n def insert_node(self, node):\n if node not in self.__nodes:\n self.__nodes[node] = dict()\n \n #adds the edge startN --> endN with weight w\n #that has 0 as default\n def insert_edge(self, startN, endN, w = 0):\n #does nothing if already in\n self.insert_node(startN)\n self.insert_node(endN)\n self.__nodes[startN][endN] = w\n \n #converts the graph into a string\n def __str__(self):\n out_str = \"Nodes:\\n\" + \",\".join(self.__nodes)\n out_str +=\"\\nEdges:\\n\"\n for u in self.__nodes:\n for v in self.__nodes[u]:\n out_str +=\"{} --{}--> {}\\n\".format(u,self.__nodes[u][v],v )\n if len(self.__nodes[u]) == 0:\n out_str +=\"{}\\n\".format(u)\n return out_str\n \n \n def BFS(self, node):\n Q = deque()\n Q.append(node)\n visited = set()\n visited.add(node)\n print(\"visiting: {}\".format(node)) \n \n while len(Q) > 0:\n curNode = Q.popleft()\n \n for n in self.adj(curNode):\n if n not in visited:\n Q.append(n)\n visited.add(n)\n print(\"visiting: {}\".format(n))\n \n #print(\"visited: {}\".format(visited)) \n #print(\"Q: {}\".format(list(Q)))\n \n #computes the distance from root of all nodes \n def get_distance(self, root):\n distances = dict()\n parents = dict()\n for node in self.node_iterator():\n distances[node] = math.inf\n parents[node] = -1\n Q = deque()\n Q.append(root)\n distances[root] = 0\n parents[root] = root\n while len(Q) > 0:\n curNode = Q.popleft()\n for n in self.adj(curNode):\n if distances[n] == math.inf:\n distances[n] = distances[curNode] + 1\n parents[n] = curNode\n Q.append(n)\n return (distances,parents)\n \n \n def get_shortest_path(self, start, end):\n #your courtesy\n #returns [start, node,.., end] \n #if shortest path is start --> node --> ... --> end\n D_s_e,P_s_e = self.get_distance(start)\n D_e_s, P_e_s = self.get_distance(end)\n P = []\n s = None\n e = None\n if D_s_e[end] > D_e_s[start]:\n P = P_e_s\n s = end\n e = start\n else:\n P = P_s_e\n s = start\n e = end\n outPath = str(e)\n #this assumes all the nodes are in the\n #parents structure\n curN = e\n while curN != s and curN != -1:\n curN = P[curN]\n outPath = str(curN) + \" --> \" + outPath\n if str(curN) != s:\n return \"Not available\"\n\n return outPath \n \n def DFS_rec(self, node, visited):\n visited.add(node)\n ## visit node (preorder)\n print(\"visiting: {}\".format(node))\n for u in self.adj(node):\n if u not in visited:\n self.DFS_rec(u, visited) \n ##visit node (post-order)\n \n \n def DFS(self, root):\n #stack implemented as deque\n S = deque()\n S.append(root)\n visited = set()\n while len(S) > 0:\n node = S.pop()\n if not node in visited:\n #visit node in preorder\n print(\"visiting {}\".format(node))\n visited.add(node)\n for n in self.adj(node):\n #visit edge (node,n)\n S.append(n)\n \n\n \n \n \n \n \ndef printPath(startN, endN, parents):\n outPath = str(endN)\n #this assumes all the nodes are in the\n #parents structure\n curN = endN\n while curN != startN and curN != -1:\n curN = parents[curN]\n outPath = str(curN) + \" --> \" + outPath\n if str(curN) != startN:\n return \"Not available\"\n \n return outPath\n\n\ndef cc(G):\n ids = dict()\n for node in G.node_iterator():\n ids[node] = 0\n counter = 0\n for u in G.node_iterator():\n if ids[u] == 0:\n counter += 1\n ccdfs(G, counter, u, ids)\n return (counter, ids)\n\ndef ccdfs(G, counter, u, ids):\n ids[u] = counter\n for v in G.adj(u):\n if ids[v] == 0:\n ccdfs(G, counter, v, ids)\n \n \n\n\nif __name__ == \"__main__\":\n G = Graph()\n for u,v in [ ('a', 'b'), ('a', 'd'), ('b', 'c'),\n ('d', 'a'), ('d', 'c'), ('d', 'e'), ('e', 'c') ]:\n G.insert_edge(u,v)\n for edge in G.edge_iterator():\n print(\"{} --{}--> {}\".format(edge[0],\n edge[1],\n edge[2]))\n G.insert_node('f')\n print(\"\\nG has {} nodes:\".format(len(G)))\n for node in G.node_iterator():\n print(\"{}\".format(node), end= \" \")\n print()\n print(G)\n G1 = Graph()\n \n for u, v in [('a', 'c'), ('a', 'f'), ('a', 'e'), ('c', 'b'), ('c', 'd'),\n ('b', 'f'), ('d','f'),('d','g'), ('f','g'),('g','j'), ('e','h'),\n ('h','j'), ('k','l')]:\n G1.insert_edge(u,v)\n print(\"BFS from {}\".format('a'))\n G1.BFS('a')\n \n \n G2 = Graph()\n for u, v in [('a', 'c'), ('a', 'f'), ('a', 'e'), ('c', 'b'), ('c', 'd'),\n ('b', 'f'), ('d','f'),('d','g'), ('f','g'),('g','j'), ('e','h'),\n ('h','j'), ('k','l'), ('d', 'b'), ('j','a'), ('g','b'), ('j','d')]:\n G2.insert_edge(u,v)\n \n D, P = G2.get_distance('b')\n print(\"Distances from 'b': {}\".format(D))\n print(\"All parents: {}\".format(P))\n print(\"Path from 'b' to 'c': {}\".format(printPath('b','c', P)))\n\n \n \n print(\"Distances from 'a': {}\".format(D))\n print(\"All parents: {}\".format(P))\n \n D, P = G2.get_distance('a')\n \n print(\"Path from 'a' to 'j': {}\".format(printPath('a','j', P)))\n print(\"Path from 'a' to 'k': {}\".format(printPath('a','k', P)))\n print(\"Path from 'a' to 'f': {}\".format(printPath('a','f', P)))\n print(\"Path from 'a' to 'h': {}\".format(printPath('a','h', P)))\n\n sp = G2.get_shortest_path('a','j')\n print(\"Shortest path from 'a' to 'j': {}\".format(sp))\n \n print(\"DFS from a:\")\n G2.DFS('a')\n print(\"DFS from b:\")\n G2.DFS('b')\n \n \n myG = Graph()\n \n for u, v in [('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'),\n ('d', 'c'), ('d','b'),('b','d'), ('d','a'),('a','d'), ('e','g'),\n ('g','e'), ('e','f'), ('f', 'e'), ('f','h'), ('h','f'), ('h','g'),\n ('g','h'),('h','i'),('i','h'),('f','i'),('i','f'), ('j','k'),('k','j')]:\n myG.insert_edge(u,v)\n \n N, con_comp = cc(myG)\n print(\"{} connected components:\\n{}\".format(N,con_comp))\n \n \n ", "a --b--> 0\na --d--> 0\nb --c--> 0\nd --a--> 0\nd --c--> 0\nd --e--> 0\ne --c--> 0\n\nG has 6 nodes:\na b d c e f \nNodes:\na,b,d,c,e,f\nEdges:\na --0--> b\na --0--> d\nb --0--> c\nd --0--> a\nd --0--> c\nd --0--> e\nc\ne --0--> c\nf\n\nBFS from a\nvisiting: a\nvisiting: c\nvisiting: f\nvisiting: e\nvisiting: b\nvisiting: d\nvisiting: g\nvisiting: h\nvisiting: j\nDistances from 'b': {'a': 4, 'c': 5, 'f': 1, 'e': 5, 'b': 0, 'd': 4, 'g': 2, 'j': 3, 'h': 6, 'k': inf, 'l': inf}\nAll parents: {'a': 'j', 'c': 'a', 'f': 'b', 'e': 'a', 'b': 'b', 'd': 'j', 'g': 'f', 'j': 'g', 'h': 'e', 'k': -1, 'l': -1}\nPath from 'b' to 'c': b --> f --> g --> j --> a --> c\nDistances from 'a': {'a': 4, 'c': 5, 'f': 1, 'e': 5, 'b': 0, 'd': 4, 'g': 2, 'j': 3, 'h': 6, 'k': inf, 'l': inf}\nAll parents: {'a': 'j', 'c': 'a', 'f': 'b', 'e': 'a', 'b': 'b', 'd': 'j', 'g': 'f', 'j': 'g', 'h': 'e', 'k': -1, 'l': -1}\nPath from 'a' to 'j': a --> f --> g --> j\nPath from 'a' to 'k': Not available\nPath from 'a' to 'f': a --> f\nPath from 'a' to 'h': a --> e --> h\nShortest path from 'a' to 'j': j --> a\nDFS from a:\nvisiting a\nvisiting e\nvisiting h\nvisiting j\nvisiting d\nvisiting b\nvisiting f\nvisiting g\nvisiting c\nDFS from b:\nvisiting b\nvisiting f\nvisiting g\nvisiting j\nvisiting d\nvisiting a\nvisiting e\nvisiting h\nvisiting c\n3 connected components:\n{'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 2, 'g': 2, 'f': 2, 'h': 2, 'i': 2, 'j': 3, 'k': 3}\n" ], [ "from collections import deque\nimport math\n\nclass Graph:\n \n\"\"\"...\"\"\"\n \n #computes the distance from root of all nodes \n def get_distance(self, root):\n distances = dict()\n parents = dict()\n for node in self.node_iterator():\n distances[node] = math.inf\n parents[node] = -1\n Q = deque()\n Q.append(root)\n distances[root] = 0\n parents[root] = root\n while len(Q) > 0:\n curNode = Q.popleft()\n for n in self.adj(curNode):\n if distances[n] == math.inf:\n distances[n] = distances[curNode] + 1\n parents[n] = curNode\n Q.append(n)\n return (distances,parents)\n \n\n ", "_____no_output_____" ] ], [ [ "### Cycle detection (un-directed graphs)\n\nThe recursive visit does a DFS and it checks for each node if it back-connects to form a cycle. At each call we need to remember where we came from to avoid trivial loops.", "_____no_output_____" ] ], [ [ "def has_cycleRec(G, u, from_node, visited):\n visited.add(u)\n for v in G.adj(u):\n if v != from_node: #to avoid trivial cycles\n if v in visited:\n return True\n else:\n #continue with the visit to check\n #if there are cycles\n if has_cycleRec(G,v, u, visited):\n return True\n return False\n \n\ndef has_cycle(G):\n visited = set()\n #I am starting the visit from all nodes\n for node in G.node_iterator():\n if node not in visited:\n if has_cycleRec(G, node, None, visited):\n return True\n return False\n \n \nmyG = Graph()\n \nfor u, v in [('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'),\n ('d', 'c'), ('c','e'),('e','c'), ('d','a'),('a','d'), ('e','d'),\n ('d','e')]:\n myG.insert_edge(u,v) \n\nprint(has_cycle(myG))\n \nmyG = Graph()\nfor u, v in [('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'), ('c', 'd'),\n ('d', 'c'), ('c','e'),('e','c'), ('e','d'),\n ('d','e')]:\n myG.insert_edge(u,v) \n\nprint(has_cycle(myG))\n\n\nmyG = Graph()\nfor u, v in [('a', 'b'), ('b', 'a'), ('b', 'c'), ('c', 'b'),\n ('c','e'),('e','c'), ('e','d'),\n ('d','e')]:\n myG.insert_edge(u,v) \n\nprint(has_cycle(myG))", "_____no_output_____" ] ], [ [ "## DFS schema", "_____no_output_____" ] ], [ [ "clock = 0\n\ndef dfs_schema(G, node, dt, ft):\n #clock: visit time (global variable)\n #dt: discovery time\n #ft: finish time\n global clock \n\n clock += 1\n dt[node] = clock\n print(\"Start time {}: {}\".format(node, clock))\n\n for v in G.adj(node):\n if dt[v] == 0:\n #DFS VISIT edge\n #visit the edge (node,v)\n print(\"\\tDFS edge: {} --> {}\".format(node, v))\n dfs_schema(G,v, dt, ft)\n elif dt[node] > dt[v] and ft[v] == 0:\n #BACK EDGE\n #visit the back edge (node,v)\n print(\"\\tBack edge: {}--> {}\".format(node,v))\n elif dt[node] < dt[v] and ft[v] != 0:\n #FORWARD EDGE\n #visit the forward edge (node,v)\n print(\"\\tForward edge: {}--> {}\".format(node,v))\n else:\n #CROSS EDGE\n print(\"\\tCross edge: {} --> {}\".format(node,v))\n clock += 1\n ft[node] = clock\n print(\"Finish time {}: {}\".format(node,clock))\n return dt,ft\n\n\nG = Graph()\n\nfor u,v,c in [('a','b', 'black'), ('a','c','black'), ('a','d', 'black'),\n ('d','a','black'), ('d','b','black'), ('b','c', 'black'),\n ('e','c','black')]:\n print(u,v,c)\n G.insert_edge(u,v)\n\ndt = dict()\ndf = dict()\nfor node in G.node_iterator():\n dt[node] = 0\n df[node] = 0\n \n#print(G)\n#clock = 0\ns,e = dfs_schema(G,'a', dt, df)\ns,e = dfs_schema(G,'e', dt, df)\nprint(\"Discovery times:{}\".format(s))\nprint(\"Finish times: {}\".format(e))\n\n\n", "a b black\na c black\na d black\nd a black\nd b black\nb c black\ne c black\nStart time a: 1\n\tDFS edge: a --> b\nStart time b: 2\n\tDFS edge: b --> c\nStart time c: 3\nFinish time c: 4\nFinish time b: 5\n\tForward edge: a--> c\n\tDFS edge: a --> d\nStart time d: 6\n\tBack edge: d--> a\n\tCross edge: d --> b\nFinish time d: 7\nFinish time a: 8\nStart time e: 9\n\tCross edge: e --> c\nFinish time e: 10\nDiscovery times:{'a': 1, 'b': 2, 'c': 3, 'd': 6, 'e': 9}\nFinish times: {'a': 8, 'b': 5, 'c': 4, 'd': 7, 'e': 10}\n" ] ], [ [ "## Cycle check in ordered graphs\n", "_____no_output_____" ] ], [ [ "def detect_cycle(G):\n dt = dict()\n ft = dict()\n global clock\n\n def has_cycle(G, node, dt, ft):\n #clock: visit time (global variable)\n #dt: discovery time\n #ft: finish time\n global clock \n\n clock += 1\n dt[node] = clock\n for v in G.adj(node):\n if dt[v] == 0:\n #DFS VISIT edge\n if has_cycle(G,v, dt, ft):\n return True\n elif dt[node] > dt[v] and ft[v] == 0:\n #BACK EDGE\n #CYCLE FOUND!!!!\n print(\"Back edge: {} --> {}\".format(node,v))\n return True\n ## Note we are not interested \n ## in forward and cross edges\n\n clock += 1\n ft[node] = clock\n return False\n\n \n for node in G.node_iterator():\n dt[node] = 0\n ft[node] = 0\n clock = 1\n for u in G.node_iterator():\n if ft[u] == 0:\n if has_cycle(G,u, dt, ft):\n return True\n return False\n\n\n\nG = Graph()\n\nfor u,v,c in [('a','b', 'black'), ('a','c','black'), ('a','d', 'black'),\n ('d','a','black'), ('d','b','black'), ('b','c', 'black'),\n ('e','c','black')]:\n print(u,v,c)\n G.insert_edge(u,v)\nprint(G)\n\nprint(\"Does G have a cycle? {}\".format(detect_cycle(G)))\n\n\nG = Graph()\n\nfor u,v,c in [('a','b', 'black'), ('b','c','black'), ('a','c', 'black')]:\n print(u,v,c)\n G.insert_edge(u,v)\nprint(G)\n\nprint(\"Does G have a cycle? {}\".format(detect_cycle(G)))\n\n\nG = Graph()\n\nfor u,v,c in [('a','b', 'black'), ('b','c','black'), ('c','a', 'black')]:\n print(u,v,c)\n G.insert_edge(u,v)\nprint(G)\n\nprint(\"Does G have a cycle? {}\".format(detect_cycle(G)))\n", "a b black\na c black\na d black\nd a black\nd b black\nb c black\ne c black\nNodes:\na,b,c,d,e\nEdges:\na --0--> b\na --0--> c\na --0--> d\nb --0--> c\nc\nd --0--> a\nd --0--> b\ne --0--> c\n\nBack edge: d --> a\nDoes G have a cycle? True\na b black\nb c black\na c black\nNodes:\na,b,c\nEdges:\na --0--> b\na --0--> c\nb --0--> c\nc\n\nDoes G have a cycle? False\na b black\nb c black\nc a black\nNodes:\na,b,c\nEdges:\na --0--> b\nb --0--> c\nc --0--> a\n\nBack edge: c --> a\nDoes G have a cycle? True\n" ] ], [ [ "## Topological sort of a DAG\n\nIdea: perform a DFS visit and when the visit of a node is finished (post-order) add the node to a stack. The stack at the end contains the nodes in one of the possible topological orders.", "_____no_output_____" ] ], [ [ "class Stack:\n \n # initializer, the inner structure is a list\n # data is added at the end of the list\n # for speed\n def __init__(self):\n self.__data = []\n \n # returns the length of the stack (size)\n def __len__(self):\n return len(self.__data)\n \n # returns True if stack is empty\n def isEmpty(self):\n return len(self.__data) == 0\n \n # returns the last inserted item of the stack\n # and shrinks the stack\n def pop(self):\n if len(self.__data) > 0:\n return self.__data.pop()\n \n \n # returns the last inserted element without\n # removing it (None if empty)\n def peek(self):\n if len(self.__data) > 0:\n return self.__data[-1]\n else:\n return None\n \n # adds an element to the stack\n def push(self, item):\n self.__data.append(item)\n \n # transforms the Stack into a string\n def __str__(self):\n if len(self.__data) == 0:\n return \"Stack([])\"\n else:\n out = \"Stack(\" + str(self.__data[-1])\n for i in range(len(self.__data) -2,-1, -1):\n out += \" | \" + str(self.__data[i]) \n out += \")\"\n return out\n\n\n\ndef top_sort(G):\n S = Stack()\n visited = set()\n for u in G.node_iterator():\n if u not in visited:\n top_sortRec(G, u, visited, S)\n return S\n\n\ndef top_sortRec(G, u, visited, S):\n visited.add(u)\n for v in G.adj(u):\n if v not in visited:\n top_sortRec(G,v,visited,S)\n S.push(u)\n \n\n\nG = Graph()\nfor u,v,c in [('a','c','black'), ('a','b', 'black'), ('c','e','black'), ('a','e', 'black'),\n ('b','d','black')]:\n\n G.insert_edge(u,v)\nprint(top_sort(G))\n\nG = Graph()\nfor u,v,c in [('a','b', 'black'), ('a','c','black'), ('a','e', 'black'),\n ('c','e','black'), ('b','d','black'), ('e','b', 'black')]:\n\n G.insert_edge(u,v)\nprint(top_sort(G))", "Stack(a | b | d | c | e)\nStack(a | c | e | b | d)\n" ] ], [ [ "## Strongly connected components (SCC)", "_____no_output_____" ] ], [ [ "class Stack:\n \n # initializer, the inner structure is a list\n # data is added at the end of the list\n # for speed\n def __init__(self):\n self.__data = []\n \n # returns the length of the stack (size)\n def __len__(self):\n return len(self.__data)\n \n # returns True if stack is empty\n def isEmpty(self):\n return len(self.__data) == 0\n \n # returns the last inserted item of the stack\n # and shrinks the stack\n def pop(self):\n if len(self.__data) > 0:\n return self.__data.pop()\n \n \n # returns the last inserted element without\n # removing it (None if empty)\n def peek(self):\n if len(self.__data) > 0:\n return self.__data[-1]\n else:\n return None\n \n # adds an element to the stack\n def push(self, item):\n self.__data.append(item)\n \n # transforms the Stack into a string\n def __str__(self):\n if len(self.__data) == 0:\n return \"Stack([])\"\n else:\n out = \"Stack(\" + str(self.__data[-1])\n for i in range(len(self.__data) -2,-1, -1):\n out += \" | \" + str(self.__data[i]) \n out += \")\"\n return out\n\n\ndef top_sort(G):\n S = Stack()\n visited = set()\n for u in G.node_iterator():\n if u not in visited:\n top_sortRec(G, u, visited, S)\n return S\n\n\ndef top_sortRec(G, u, visited, S):\n visited.add(u)\n for v in G.adj(u):\n if v not in visited:\n top_sortRec(G,v,visited,S)\n S.push(u)\n\n\n\ndef scc(G):\n #performs a topological sort of G\n S = top_sort(G)\n print(S)\n #Transposes G\n GT = transpose(G)\n #modified version of CC algo that \n #gets starting nodes off the stack S\n counter, ids = cc(GT,S)\n return (counter,ids)\n \n\n \ndef transpose(G):\n tmpG = Graph()\n for u in G.node_iterator():\n for v in G.adj(u):\n tmpG.insert_edge(v,u) \n return tmpG\n\n\n\ndef cc(G, S):\n ids = dict()\n for node in G.node_iterator():\n ids[node] = 0\n counter = 0\n while len(S) > 0:\n u = S.pop()\n if ids[u] == 0:\n counter += 1\n ccdfs(G, counter, u, ids)\n return (counter, ids)\n\ndef ccdfs(G, counter, u, ids):\n ids[u] = counter\n for v in G.adj(u):\n if ids[v] == 0:\n ccdfs(G, counter, v, ids)\n\n\nG = Graph()\nfor u,v,c in [('a','b', 'black'), ('b','c','black'), ('a','d', 'black'),\n ('c','e','black'), ('d','c','black'), ('e','d', 'black'), \n ('e','f','black'), ('f','c','black')]:\n\n G.insert_edge(u,v)\n\n \nprint(G)\nc,i = scc(G)\nprint(\"Components: {}\\nIds:{}\".format(c,i))\n\n\nG1 = Graph()\nfor u,v,c in [('a','b', 'black'), ('b','c','black'), ('a','d', 'black'),\n ('c','e','black'), ('d','c','black'), ('e','d', 'black'), \n ('e','f','black'), ('f','c','black'), ('f','g','black')]:\n\n G1.insert_edge(u,v)\nprint(G1)\nc,i = scc(G1)\nprint(\"Components: {}\\nIds:{}\".format(c,i))", "Nodes:\na,b,c,d,e,f\nEdges:\na --0--> b\na --0--> d\nb --0--> c\nc --0--> e\nd --0--> c\ne --0--> d\ne --0--> f\nf --0--> c\n\nStack(a | b | c | e | f | d)\nComponents: 3\nIds:{'b': 2, 'a': 1, 'd': 3, 'c': 3, 'e': 3, 'f': 3}\nNodes:\na,b,c,d,e,f,g\nEdges:\na --0--> b\na --0--> d\nb --0--> c\nc --0--> e\nd --0--> c\ne --0--> d\ne --0--> f\nf --0--> c\nf --0--> g\ng\n\nStack(a | b | c | e | f | g | d)\nComponents: 4\nIds:{'b': 2, 'a': 1, 'd': 3, 'c': 3, 'e': 3, 'f': 3, 'g': 4}\n" ] ], [ [ "## Complexity of visits\n\n\nComplexity: $O(n+m)$ \n\n* every node is inserted in the queue at most once;\n\n* whenever a node is extracted all its edges are analyzed once and only once;\n\n* number of edges analyzed: $$m = \\sum_{u \\in V} out\\_degree(u)$$", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a66938bfcda69de2fdc6816f908732ae7b5b432
235,718
ipynb
Jupyter Notebook
py_files/01. NMF analysis.ipynb
jason-sa/question_intent_classification
74b818badbf9c659053a5fc875e66ff6717befde
[ "MIT" ]
null
null
null
py_files/01. NMF analysis.ipynb
jason-sa/question_intent_classification
74b818badbf9c659053a5fc875e66ff6717befde
[ "MIT" ]
null
null
null
py_files/01. NMF analysis.ipynb
jason-sa/question_intent_classification
74b818badbf9c659053a5fc875e66ff6717befde
[ "MIT" ]
null
null
null
39.378216
337
0.464178
[ [ [ "# NMF Analysis\n\nPerforms a simple tf-idf of the question pairs and NMF dimension reduction to calculate cosine similarity of each question pair. The goal of the analysis is to see if the pairs labeled as duplicates have a distinctly different cosine similarity compared to those pairs marked as not duplicates.", "_____no_output_____" ] ], [ [ "# data manipulation\nfrom utils import save, load\nimport pandas as pd\nimport numpy as np\n\n# modeling\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# You can configure the format of the images: ‘png’, ‘retina’, ‘jpeg’, ‘svg’, ‘pdf’.\n%config InlineBackend.figure_format = 'svg'\n# this statement allows the visuals to render within your Jupyter Notebook\n%matplotlib inline", "_____no_output_____" ], [ "X_train = load('X_train')\ny_train = load('y_train')\ntrain_df = pd.DataFrame(np.concatenate([X_train, y_train.reshape(-1, 1)], axis=1))\ntrain_df = train_df.rename(columns={0:'id', 1:'question1', 2:'question2', 3:'is_duplicate'})\ntrain_df.head()", "_____no_output_____" ] ], [ [ "Let's make a stack of questions maintaining the `id` of the question pair.", "_____no_output_____" ] ], [ [ "question_df = train_df.loc[:, ['id', 'question1']]\nquestion_df = question_df.append(train_df.loc[:,['id', 'question2']], sort=False)\nquestion_df.loc[question_df['question1'].isna(), 'question1'] = question_df.loc[question_df['question1'].isna(), 'question2']\nquestion_df = question_df.drop(columns='question2')\nquestion_df = question_df.sort_values('id')\nquestion_df.head(6)", "_____no_output_____" ] ], [ [ "Let's now calcualte the tf-idf term matrix.", "_____no_output_____" ] ], [ [ "tf = TfidfVectorizer(stop_words='english', token_pattern='\\\\b[a-zA-Z0-9][a-zA-Z0-9]+\\\\b')\n\nquestion_tf = tf.fit_transform(question_df['question1'])", "_____no_output_____" ], [ "# first 10 terms\ntf.get_feature_names()[:10]", "_____no_output_____" ], [ "# last 10 terms\ntf.get_feature_names()[-10:]", "_____no_output_____" ], [ "# total terms\nlen(tf.get_feature_names())", "_____no_output_____" ] ], [ [ "Lots of words, but some cleanup will probably needed given the numbers.\n\nLet's now reduce the 74,795 term matrix utilizing NMF.", "_____no_output_____" ] ], [ [ "def calc_NMF_sim(n_components, col_name, tf_df, df):\n nmf = NMF(n_components=n_components)\n\n nmf_topics = nmf.fit_transform(tf_df)\n\n odd_idx = [i for i in range(nmf_topics.shape[0]) if i % 2 == 1]\n even_idx = [i for i in range(nmf_topics.shape[0]) if i % 2 == 0]\n\n sim_list = [cosine_similarity(\n nmf_topics[odd_idx[i]].reshape(1,-1),\n nmf_topics[even_idx[i]].reshape(1,-1)\n )[0,0]\n for i in range(len(odd_idx))]\n\n df = pd.concat([df.sort_values('id'), pd.Series(sim_list)], axis=1)\n df = df.rename(columns={0:col_name})\n \n return df", "_____no_output_____" ], [ "train_df_cosine = calc_NMF_sim(5, 'cos_sim_5', question_tf, train_df.reset_index())\ntrain_df_cosine = calc_NMF_sim(10, 'cos_sim_10', question_tf, train_df_cosine)\ntrain_df_cosine = calc_NMF_sim(50, 'cos_sim_50', question_tf, train_df_cosine)\ntrain_df_cosine = calc_NMF_sim(100, 'cos_sim_100', question_tf, train_df_cosine)", "_____no_output_____" ], [ "train_df_cosine.head()", "_____no_output_____" ] ], [ [ "We calcualted the cosine similarity for 5, 10, 50, and 100 dimensional dimensional NMF. Let's now plot the distribution for the duplicate pairs and not duplicate pairs. The goal is to see if there is a natural division based purely on the cosine similarity between the pair of questions.", "_____no_output_____" ], [ "## This seems off!!!\n\nNeed to figure out why there is so much overlap now. Let's ignore for now and see if the MVP model suffers the same.", "_____no_output_____" ] ], [ [ "cols = ['cos_sim_5', 'cos_sim_10', 'cos_sim_50', 'cos_sim_100']\nplt.figure(figsize=(10,10))\n\nfor i in range(4):\n plt.subplot(2, 2, i+1)\n sns.kdeplot(train_df_cosine.loc[train_df_cosine['is_duplicate'] == 0, cols[i]], \n shade=True, \n label = 'No Intent',\n color = 'red')\n sns.kdeplot(train_df_cosine.loc[train_df_cosine['is_duplicate'] == 1, cols[i]], \n shade=True,\n label = 'Intent',\n color = 'green')\n plt.title(cols[i])\n plt.ylim(top=25)\n\n# plt.xlabel('cosine similarity')\n# plt.ylabel('density')\nplt.suptitle('KDE comparing pairs with intent and no intent')\n;", "_____no_output_____" ] ], [ [ "More of the duplicate pairs have a higher cosine similarity compared to the non-duplicate pairs. However, there is also significant overlap, which means finding the decision boundary will be difficult.", "_____no_output_____" ], [ "Let's take a look at the set of pairs marked as duplicates with a 0 cosine similarity with the NMF 100 transformation.", "_____no_output_____" ] ], [ [ "train_df_cosine[(train_df_cosine['is_duplicate'] == 1) & (train_df_cosine['cos_sim_100'] == 0)]", "_____no_output_____" ] ], [ [ "The first example is very confusing. This may be a result of the tf-idf calculation with default parameters is incorrect, or the cosine similarity is not the best metric. The next step would be to build a classification model using NMF or LDA topics for the pair of questions to predict whether or not the pair has the same intent.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a66b3b957ad2e901186f83e0d39df70f5961be3
127,772
ipynb
Jupyter Notebook
.ipynb_checkpoints/SQLite_with_regression_MM-checkpoint.ipynb
MelissaDjohan/NBA-Data-Science
500108e392d033ce496d732763c1211a94053c19
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/SQLite_with_regression_MM-checkpoint.ipynb
MelissaDjohan/NBA-Data-Science
500108e392d033ce496d732763c1211a94053c19
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/SQLite_with_regression_MM-checkpoint.ipynb
MelissaDjohan/NBA-Data-Science
500108e392d033ce496d732763c1211a94053c19
[ "Apache-2.0" ]
1
2020-01-13T18:07:22.000Z
2020-01-13T18:07:22.000Z
223.768827
91,566
0.606393
[ [ [ "%matplotlib inline\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom marshmallow import Schema, fields, post_load", "_____no_output_____" ], [ "# pip install marshmallow", "_____no_output_____" ], [ "# SQLite\nimport sqlite3\nconn = sqlite3.connect(\"db/nba.sqlite\")\nc = conn.cursor()", "_____no_output_____" ], [ "# preview data\nc.execute('SELECT * FROM PLAYERS')\nall_rows = c.fetchall()\nprint(all_rows)", "[(0, 1.0, 'Stephen Curry', 'GSW', 40231758, '$43,006,362 ', '$45,780,966 ', None, None, None, 'Bird Rights', '$129,019,086 ', 2010, 'G', 6.25, 190.0, '1988-03-14 00:00:00', 'Davidson College', 31, 69, 52, 17, 33.8, 27.3, 9.2, 19.4, 47.2, 5.1, 11.7, 43.7, 3.8, 4.2, 91.6, 0.7, 4.7, 5.3, 5.2, 2.8, 1.3, 0.4, 10.0), (1, 2.0, 'Chris Paul', 'OKC', 38506482, '$41,358,814 ', '$44,211,146 ', None, None, None, None, '$124,076,442 ', 2006, 'G', 6.0, 175.0, '1985-05-06 00:00:00', 'Wake Forest University', 34, 58, 39, 19, 32.0, 15.6, 5.2, 12.4, 41.9, 2.2, 6.1, 35.8, 3.0, 3.5, 86.2, 0.6, 3.9, 4.6, 8.2, 2.6, 2.0, 0.3, 5.2), (2, 3.0, 'Russell Westbrook', 'HOU', 38178000, '$41,006,000 ', '$43,848,000 ', '$46,662,000 ', None, None, 'Bird Rights', '$123,032,000 ', 2009, 'G', 6.25, 200.0, '1988-11-12 00:00:00', 'University of California, Los Angeles', 30, 73, 44, 29, 36.0, 22.9, 8.6, 20.2, 42.8, 1.6, 5.6, 29.0, 4.1, 6.2, 65.6, 1.5, 9.6, 11.1, 10.7, 4.5, 1.9, 0.5, 4.0), (3, 4.0, 'John Wall', 'WAS', 37800000, '$40,824,000 ', '$43,848,000 ', '$46,872,000 ', None, None, 'Bird Rights', '$122,472,000 ', 2011, 'G', 6.333333333333333, 195.0, '1990-09-06 00:00:00', 'University of Kentucky', 28, 32, 11, 21, 34.5, 20.7, 7.7, 17.3, 44.4, 1.6, 5.3, 30.2, 3.8, 5.5, 69.7, 0.5, 3.2, 3.6, 8.7, 3.8, 1.5, 0.9, -4.8), (4, 5.0, 'James Harden', 'HOU', 37800000, '$40,824,000 ', '$43,848,000 ', '$46,872,000 ', None, None, 'Bird Rights', '$122,472,000 ', 2010, 'G', 6.416666666666667, 220.0, '1989-08-26 00:00:00', 'Arizona State University', 29, 78, 51, 27, 36.8, 36.1, 10.8, 24.5, 44.2, 4.8, 13.2, 36.8, 9.7, 11.0, 87.9, 0.8, 5.8, 6.6, 7.5, 5.0, 2.0, 0.7, 4.6), (5, 6.0, 'LeBron James', 'LAL', 37436858, '$39,219,565 ', '$41,002,273 ', None, None, None, None, '$76,656,423 ', 2004, 'F-G', 6.666666666666667, 250.0, '1984-12-30 00:00:00', None, 34, 55, 28, 27, 35.2, 27.4, 10.1, 19.9, 51.0, 2.0, 5.9, 33.9, 5.1, 7.6, 66.5, 1.0, 7.4, 8.5, 8.3, 3.6, 1.3, 0.6, 2.1), (6, 7.0, 'Kevin Durant', 'BRK', 37199000, '$39,058,950 ', '$40,918,900 ', '$42,778,850 ', None, None, None, '$117,176,850 ', 2008, 'F-G', 6.75, 240.0, '1988-09-29 00:00:00', 'University of Texas at Austin', 30, 78, 54, 24, 34.6, 26.0, 9.2, 17.7, 52.1, 1.8, 5.0, 35.3, 5.7, 6.5, 88.5, 0.4, 5.9, 6.4, 5.9, 2.9, 0.7, 1.1, 8.4), (7, 8.0, 'Kemba Walker', 'BOS', 34379100, '$32,742,000 ', '$36,016,200 ', '$37,653,300 ', None, None, None, '$103,137,300 ', 2012, 'G', 6.083333333333333, 172.0, '1990-05-08 00:00:00', 'University of Connecticut', 29, 82, 39, 43, 34.9, 25.6, 8.9, 20.5, 43.4, 3.2, 8.9, 35.6, 4.6, 5.5, 84.4, 0.6, 3.8, 4.4, 5.9, 2.6, 1.2, 0.4, 0.4), (8, 9.0, 'Blake Griffin', 'DET', 34234964, '$36,595,996 ', '$38,957,028 ', None, None, None, 'Bird Rights', '$70,830,960 ', 2011, 'F', 6.833333333333333, 251.0, '1989-03-16 00:00:00', 'University of Oklahoma', 30, 75, 39, 36, 35.0, 24.5, 8.3, 17.9, 46.2, 2.5, 7.0, 36.2, 5.5, 7.3, 75.3, 1.3, 6.2, 7.5, 5.4, 3.4, 0.7, 0.4, 0.5), (9, 10.0, 'Kyle Lowry', 'TOR', 33296296, None, None, None, None, None, 'Bird Rights', '$33,296,296 ', 2007, 'G', 6.0, 205.0, '1986-03-25 00:00:00', 'Villanova University', 33, 65, 47, 18, 34.1, 14.2, 4.7, 11.4, 41.1, 2.4, 7.0, 34.7, 2.5, 3.0, 83.0, 0.6, 4.2, 4.8, 8.7, 2.8, 1.4, 0.5, 8.2), (10, 11.0, 'Paul George', 'LAC', 33005556, '$35,450,412 ', '$37,895,268 ', None, None, None, 'Maximum Salary', '$68,455,968 ', 2011, 'F', 6.75, 220.0, '1990-05-02 00:00:00', 'California State University, Fresno', 29, 77, 46, 31, 36.9, 28.0, 9.2, 21.0, 43.8, 3.8, 9.8, 38.6, 5.9, 7.0, 83.9, 1.4, 6.8, 8.2, 4.1, 2.7, 2.2, 0.4, 6.4), (11, 12.0, 'Klay Thompson', 'GSW', 32742000, '$35,361,360 ', '$37,980,720 ', '$40,600,080 ', '$43,219,440 ', None, None, '$189,903,600 ', 2012, 'G-F', 6.583333333333333, 215.0, '1990-02-08 00:00:00', 'Washington State University', 29, 78, 55, 23, 34.0, 21.5, 8.4, 18.0, 46.7, 3.1, 7.7, 40.2, 1.7, 2.0, 81.6, 0.5, 3.4, 3.8, 2.4, 1.5, 1.1, 0.6, 4.5), (12, 13.0, 'Jimmy Butler', 'MIA', 32742000, '$34,379,100 ', '$36,016,200 ', '$37,653,300 ', None, None, None, '$103,137,300 ', 2012, 'F-G', 6.666666666666667, 236.0, '1989-09-14 00:00:00', 'Marquette University', 29, 65, 38, 27, 33.6, 18.7, 6.4, 13.9, 46.2, 1.0, 3.0, 34.7, 4.8, 5.6, 85.5, 1.9, 3.4, 5.3, 4.0, 1.5, 1.9, 0.6, 2.5), (13, 14.0, 'Kawhi Leonard', 'LAC', 32742000, '$34,379,100 ', '$36,016,200 ', None, None, None, None, '$67,121,100 ', 2012, 'F', 6.583333333333333, 230.0, '1991-06-29 00:00:00', 'San Diego State University', 28, 60, 41, 19, 34.0, 26.6, 9.3, 18.8, 49.6, 1.9, 5.0, 37.1, 6.1, 7.1, 85.4, 1.3, 6.0, 7.3, 3.3, 2.0, 1.8, 0.4, 5.9), (14, 15.0, 'Gordon Hayward', 'BOS', 32700690, '$34,187,085 ', None, None, None, None, 'Cap space', '$32,700,690 ', 2011, 'F-G', 6.666666666666667, 226.0, '1990-03-23 00:00:00', 'Butler University', 29, 72, 44, 28, 25.9, 11.5, 4.1, 8.8, 46.6, 1.1, 3.2, 33.3, 2.2, 2.6, 83.4, 0.7, 3.8, 4.5, 3.4, 1.5, 0.9, 0.3, 3.0), (15, 16.0, 'Mike Conley', 'UTA', 32511623, '$34,504,132 ', None, None, None, None, 'Cap Space', '$54,938,006 ', 2008, 'G', 6.083333333333333, 175.0, '1987-10-11 00:00:00', 'Ohio State University', 31, 70, 30, 40, 33.5, 21.1, 7.0, 16.0, 43.8, 2.2, 6.1, 36.4, 4.9, 5.8, 84.5, 0.6, 2.8, 3.4, 6.4, 1.9, 1.3, 0.3, 0.3), (16, 17.0, 'Kyrie Irving', 'BRK', 31742000, '$33,329,100 ', '$34,916,200 ', '$36,503,300 ', None, None, None, '$136,490,600 ', 2012, 'G', 6.25, 193.0, '1992-03-23 00:00:00', 'Duke University', 27, 67, 37, 30, 33.0, 23.8, 9.0, 18.5, 48.7, 2.6, 6.5, 40.1, 3.2, 3.7, 87.3, 1.1, 3.9, 5.0, 6.9, 2.6, 1.5, 0.5, 5.0), (17, 18.0, 'Tobias Harris', 'PHI', 31034483, '$33,517,241 ', '$36,000,000 ', '$38,482,759 ', '$40,965,517 ', None, None, '$180,000,000 ', 2012, 'F', 6.75, 235.0, '1992-07-15 00:00:00', 'University of Tennessee', 26, 82, 46, 36, 34.7, 20.0, 7.5, 15.3, 48.7, 1.9, 4.8, 39.7, 3.2, 3.7, 86.6, 0.8, 7.0, 7.9, 2.8, 1.8, 0.6, 0.5, 0.2), (18, 19.0, 'Khris Middleton', 'MIL', 30603448, '$33,051,724 ', '$35,500,000 ', '$37,948,276 ', '$40,396,552 ', None, None, '$137,103,448 ', 2013, 'F-G', 6.666666666666667, 234.0, '1991-08-12 00:00:00', 'Texas A&M University', 27, 77, 56, 21, 31.1, 18.3, 6.6, 14.9, 44.1, 2.3, 6.2, 37.8, 2.8, 3.4, 83.7, 0.6, 5.3, 6.0, 4.3, 2.3, 1.0, 0.1, 7.2), (19, 20.0, 'Paul Millsap', 'DEN', 30500000, None, None, None, None, None, 'Cap space', '$30,500,000 ', 2007, 'F', 6.666666666666667, 246.0, '1985-02-10 00:00:00', 'Louisiana Tech University', 34, 70, 49, 21, 27.1, 12.6, 4.6, 9.5, 48.4, 0.8, 2.3, 36.5, 2.6, 3.6, 72.7, 2.2, 5.0, 7.2, 2.0, 1.4, 1.2, 0.8, 4.9), (20, 21.0, 'Damian Lillard', 'POR', 29802321, '$31,626,953 ', '$43,750,000 ', '$47,250,000 ', '$50,750,000 ', '$54,250,000 ', '1st Round Pick', '$257,429,274 ', 2013, 'G', 6.25, 195.0, '1990-07-15 00:00:00', 'Weber State University', 28, 80, 51, 29, 35.5, 25.8, 8.5, 19.2, 44.4, 3.0, 8.0, 36.9, 5.9, 6.4, 91.2, 0.9, 3.8, 4.6, 6.9, 2.7, 1.1, 0.4, 6.2), (21, 22.0, 'Kevin Love', 'CLE', 28900000, '$31,300,000 ', '$31,300,000 ', '$28,900,000 ', None, None, 'Bird Rights', '$120,400,000 ', 2009, 'F', 6.833333333333333, 251.0, '1988-09-07 00:00:00', 'University of California, Los Angeles', 30, 22, 7, 15, 27.2, 17.0, 5.0, 12.9, 38.5, 2.4, 6.7, 36.1, 4.7, 5.2, 90.4, 1.5, 9.4, 10.9, 2.2, 1.9, 0.3, 0.2, -2.5), (22, 24.0, 'Al Horford', 'PHI', 28000000, '$27,500,000 ', '$27,000,000 ', '$26,500,000 ', None, None, None, '$97,000,000 ', 2008, 'C-F', 6.833333333333333, 245.0, '1986-06-03 00:00:00', 'University of Florida', 33, 68, 41, 27, 29.0, 13.6, 5.7, 10.6, 53.5, 1.1, 3.0, 36.0, 1.1, 1.4, 82.1, 1.8, 5.0, 6.7, 4.2, 1.5, 0.9, 1.3, 3.8), (23, 25.0, 'DeMar DeRozan', 'SAS', 27739975, '$27,739,975 ', None, None, None, None, 'Cap Space', '$27,739,975 ', 2010, 'G-F', 6.583333333333333, 221.0, '1989-08-07 00:00:00', 'University of Southern California', 29, 77, 44, 33, 34.9, 21.2, 8.2, 17.1, 48.1, 0.1, 0.6, 15.6, 4.8, 5.7, 83.0, 0.7, 5.3, 6.0, 6.2, 2.6, 1.1, 0.5, 0.1), (24, 27.0, 'Joel Embiid', 'PHI', 27504630, '$29,542,010 ', '$31,579,390 ', '$33,616,770 ', None, None, '1st Round Pick', '$122,242,800 ', 2017, 'C-F', 7.0, 250.0, '1994-03-16 00:00:00', 'University of Kansas', 25, 64, 43, 21, 33.7, 27.5, 9.1, 18.7, 48.4, 1.2, 4.1, 30.0, 8.2, 10.1, 80.4, 2.5, 11.1, 13.6, 3.7, 3.5, 0.7, 1.9, 5.8), (25, 29.0, \"D'Angelo Russell\", 'GSW', 27285000, '$28,649,250 ', '$30,013,500 ', '$31,377,750 ', None, None, None, '$117,325,500 ', 2016, 'G', 6.416666666666667, 195.0, '1996-02-23 00:00:00', 'Ohio State University', 23, 81, 42, 39, 30.2, 21.1, 8.1, 18.7, 43.4, 2.9, 7.8, 36.9, 2.0, 2.5, 78.0, 0.7, 3.2, 3.9, 7.0, 3.1, 1.2, 0.2, 0.3), (26, 30.0, 'Andrew Wiggins', 'MIN', 27270000, '$29,290,000 ', '$31,310,000 ', '$33,330,000 ', None, None, '1st Round Pick', '$121,200,000 ', 2015, 'F-G', 6.666666666666667, 199.0, '1995-02-23 00:00:00', 'University of Kansas', 24, 73, 31, 42, 34.8, 18.1, 6.8, 16.6, 41.2, 1.6, 4.8, 33.9, 2.8, 4.1, 69.9, 1.1, 3.7, 4.8, 2.5, 1.9, 1.0, 0.7, -0.9), (27, 32.0, 'Devin Booker', 'PHO', 27250000, '$29,430,000 ', '$31,610,000 ', '$33,790,000 ', '$35,970,000 ', None, '1st round pick', '$158,050,000 ', 2016, 'G', 6.5, 206.0, '1996-10-30 00:00:00', 'University of Kentucky', 22, 64, 15, 49, 35.0, 26.6, 9.2, 19.6, 46.7, 2.1, 6.5, 32.6, 6.1, 7.1, 86.6, 0.6, 3.5, 4.1, 6.8, 4.1, 0.9, 0.2, -5.2), (28, 33.0, 'Karl-Anthony Towns', 'MIN', 27250000, '$29,430,000 ', '$31,610,000 ', '$33,790,000 ', '$35,970,000 ', None, '1st round pick', '$155,870,000 ', 2016, 'C-F', 7.0, 244.0, '1995-11-15 00:00:00', 'University of Kentucky', 23, 77, 34, 43, 33.0, 24.4, 8.8, 17.1, 51.8, 1.8, 4.6, 40.0, 4.9, 5.8, 83.6, 3.4, 9.0, 12.4, 3.4, 3.1, 0.9, 1.6, 0.7), (29, 34.0, 'Bradley Beal', 'WAS', 27093019, '$28,751,775 ', '$34,502,130 ', '$37,262,300 ', None, None, 'Cap Space', '$90,346,924 ', 2013, 'G', 6.416666666666667, 207.0, '1993-06-28 00:00:00', 'University of Florida', 26, 82, 32, 50, 36.9, 25.6, 9.3, 19.6, 47.5, 2.5, 7.3, 35.1, 4.4, 5.5, 80.8, 1.1, 3.9, 5.0, 5.5, 2.7, 1.5, 0.7, -1.4), (30, 35.0, 'Anthony Davis', 'LAL', 27093019, '$28,751,775 ', None, None, None, None, '1st Round Pick', '$27,093,019 ', 2013, 'F-C', 6.833333333333333, 253.0, '1993-03-11 00:00:00', 'University of Kentucky', 26, 56, 25, 31, 33.0, 25.9, 9.5, 18.3, 51.7, 0.9, 2.6, 33.1, 6.1, 7.7, 79.4, 3.1, 8.9, 12.0, 3.9, 2.0, 1.6, 2.4, 2.4), (31, 36.0, 'Andre Drummond', 'DET', 27093019, '$28,751,775 ', None, None, None, None, 'Cap Space', '$27,093,019 ', 2013, 'C', 6.916666666666667, 279.0, '1993-08-10 00:00:00', 'University of Connecticut', 25, 79, 40, 39, 33.5, 17.3, 7.1, 13.3, 53.3, 0.1, 0.5, 13.2, 3.1, 5.2, 59.0, 5.4, 10.2, 15.6, 1.4, 2.2, 1.7, 1.7, 2.2), (32, 37.0, 'Hassan Whiteside', 'POR', 27093018, None, None, None, None, None, 'Cap Space', '$27,093,018 ', 2011, 'C', 7.0, 265.0, '1989-06-13 00:00:00', 'Marshall University', 30, 72, 33, 39, 23.3, 12.3, 5.4, 9.4, 57.1, 0.0, 0.2, 12.5, 1.5, 3.4, 44.9, 3.6, 7.8, 11.3, 0.8, 1.3, 0.6, 1.9, 0.3), (33, 39.0, 'Jrue Holiday', 'NOP', 26131111, '$26,131,111 ', '$27,020,000 ', None, None, None, 'Bird Rights', '$52,262,222 ', 2010, 'G', 6.333333333333333, 205.0, '1990-06-12 00:00:00', 'University of California, Los Angeles', 29, 67, 30, 37, 35.8, 21.2, 8.2, 17.3, 47.2, 1.8, 5.4, 32.5, 3.1, 4.0, 76.8, 1.1, 3.9, 5.0, 7.7, 3.1, 1.6, 0.8, 3.4), (34, 40.0, 'LaMarcus Aldridge', 'SAS', 26000000, '$24,000,000 ', None, None, None, None, 'Cap Space', '$33,000,000 ', 2007, 'F-C', 6.916666666666667, 260.0, '1985-07-19 00:00:00', 'University of Texas at Austin', 33, 81, 48, 33, 33.2, 21.3, 8.4, 16.3, 51.9, 0.1, 0.5, 23.8, 4.3, 5.1, 84.7, 3.1, 6.1, 9.2, 2.4, 1.8, 0.5, 1.3, 1.1), (35, 41.0, 'Steven Adams', 'OKC', 25842697, '$27,528,090 ', None, None, None, None, '1st Round Pick', '$53,370,787 ', 2014, 'C', 7.0, 255.0, '1993-07-20 00:00:00', 'University of Pittsburgh', 25, 80, 47, 33, 33.4, 13.9, 6.0, 10.1, 59.5, 0.0, 0.0, 0.0, 1.8, 3.7, 50.0, 4.9, 4.6, 9.5, 1.6, 1.7, 1.5, 1.0, 4.9), (36, 42.0, 'Giannis Antetokounmpo', 'MIL', 25842697, '$27,528,090 ', None, None, None, None, '1st Round Pick', '$53,370,787 ', 2014, 'F-G', 6.916666666666667, 222.0, '1994-12-06 00:00:00', None, 24, 72, 56, 16, 32.8, 27.7, 10.0, 17.3, 57.8, 0.7, 2.8, 25.6, 6.9, 9.5, 72.9, 2.2, 10.3, 12.5, 5.9, 3.7, 1.3, 1.5, 9.1), (37, 43.0, 'Marc Gasol', 'TOR', 25595700, None, None, None, None, None, 'Bird Rights', '$25,595,700 ', 2009, 'C', 7.083333333333333, 255.0, '1985-01-29 00:00:00', None, 34, 79, 39, 40, 30.8, 13.6, 4.9, 11.0, 44.8, 1.3, 3.5, 36.3, 2.4, 3.2, 75.9, 1.0, 6.9, 7.9, 4.4, 2.0, 1.1, 1.1, 2.0), (38, 44.0, 'Nicolas Batum', 'CHO', 25565217, '$27,130,434 ', None, None, None, None, 'Cap Space', '$25,565,217 ', 2009, 'F-G', 6.666666666666667, 200.0, '1988-12-14 00:00:00', None, 30, 75, 35, 40, 31.4, 9.3, 3.4, 7.5, 45.0, 1.5, 4.0, 38.9, 1.0, 1.2, 86.5, 0.9, 4.3, 5.2, 3.3, 1.6, 0.9, 0.6, -0.3), (39, 45.0, 'Chandler Parsons', 'ATL', 25102511, None, None, None, None, None, 'Cap Space', '$25,102,511 ', 2012, 'F', 6.833333333333333, 230.0, '1988-10-25 00:00:00', 'University of Florida', 30, 25, 11, 14, 19.8, 7.5, 2.7, 7.3, 37.4, 1.2, 3.8, 30.9, 0.9, 1.0, 88.0, 0.2, 2.6, 2.8, 1.7, 1.3, 0.8, 0.2, -1.8), (40, 46.0, 'Rudy Gobert', 'UTA', 25008427, '$26,525,281 ', None, None, None, None, '1st Round Pick', '$51,533,708 ', 2014, 'C', 7.083333333333333, 245.0, '1992-06-26 00:00:00', None, 27, 81, 50, 31, 31.8, 15.9, 5.9, 8.8, 66.9, 0.0, 0.0, 0.0, 4.1, 6.4, 63.6, 3.8, 9.0, 12.9, 2.0, 1.6, 0.8, 2.3, 4.7), (41, 47.0, 'Serge Ibaka', 'TOR', 23271604, None, None, None, None, None, 'Cap space', '$23,271,604 ', 2010, 'F-C', 6.833333333333333, 235.0, '1989-09-18 00:00:00', None, 29, 74, 53, 21, 27.2, 15.0, 6.3, 11.9, 52.9, 0.7, 2.3, 29.0, 1.8, 2.4, 76.3, 2.1, 6.0, 8.1, 1.3, 1.5, 0.4, 1.4, 3.5), (42, 48.0, 'Danilo Gallinari', 'OKC', 22615559, None, None, None, None, None, 'Sign and Trade', '$22,615,559 ', 2009, 'F', 6.833333333333333, 225.0, '1988-08-08 00:00:00', None, 30, 68, 40, 28, 30.3, 19.8, 6.0, 13.0, 46.3, 2.4, 5.5, 43.3, 5.4, 6.0, 90.4, 0.8, 5.3, 6.1, 2.6, 1.5, 0.7, 0.3, 1.5), (43, 49.0, 'Victor Oladipo', 'IND', 21000000, '$21,000,000 ', None, None, None, None, '1st Round Pick', '$42,000,000 ', 2014, 'G', 6.333333333333333, 210.0, '1992-05-04 00:00:00', 'Indiana University', 27, 36, 25, 11, 31.9, 18.8, 6.9, 16.3, 42.3, 2.1, 6.0, 34.3, 2.9, 3.9, 73.0, 0.6, 5.0, 5.6, 5.2, 2.3, 1.7, 0.3, 3.5), (44, 50.0, 'Malcolm Brogdon', 'IND', 20000000, '$20,700,000 ', '$21,700,000 ', '$22,600,000 ', None, None, None, '$85,000,000 ', 2017, 'G', 6.416666666666667, 215.0, '1992-12-11 00:00:00', 'University of Virginia', 26, 64, 49, 15, 28.6, 15.6, 5.9, 11.7, 50.5, 1.6, 3.8, 42.6, 2.2, 2.4, 92.8, 1.0, 3.5, 4.5, 3.2, 1.4, 0.7, 0.2, 6.6), (45, 51.0, 'Terry Rozier', 'CHO', 19894737, '$18,900,000 ', '$17,905,263 ', None, None, None, None, '$56,700,000 ', 2016, 'G', 6.166666666666667, 190.0, '1994-03-17 00:00:00', 'University of Louisville', 25, 79, 47, 32, 22.7, 9.0, 3.3, 8.4, 38.7, 1.5, 4.3, 35.3, 0.9, 1.2, 78.5, 0.4, 3.5, 3.9, 2.9, 0.9, 0.9, 0.3, -0.5), (46, 52.0, 'Aaron Gordon', 'ORL', 19863636, '$18,136,364 ', '$16,409,091 ', None, None, None, None, '$54,409,091 ', 2015, 'F', 6.75, 220.0, '1995-09-16 00:00:00', 'University of Arizona', 23, 78, 40, 38, 33.8, 16.0, 6.0, 13.4, 44.9, 1.6, 4.4, 34.9, 2.4, 3.2, 73.1, 1.7, 5.7, 7.4, 3.7, 2.1, 0.7, 0.7, 1.4), (47, 53.0, 'Zach LaVine', 'CHI', 19500000, '$19,500,000 ', '$19,500,000 ', None, None, None, None, '$58,500,000 ', 2015, 'G', 6.416666666666667, 189.0, '1995-03-10 00:00:00', 'University of California, Los Angeles', 24, 63, 16, 47, 34.5, 23.7, 8.4, 18.0, 46.7, 1.9, 5.1, 37.4, 5.0, 6.0, 83.2, 0.6, 4.0, 4.7, 4.5, 3.4, 1.0, 0.4, -4.8), (48, 54.0, 'Kent Bazemore', 'POR', 19269662, None, None, None, None, None, 'Cap Space', '$19,269,662 ', 2013, 'G-F', 6.416666666666667, 201.0, '1989-07-01 00:00:00', 'Old Dominion University', 29, 67, 24, 43, 24.5, 11.6, 4.1, 10.3, 40.2, 1.4, 4.5, 32.0, 1.9, 2.6, 72.6, 0.6, 3.3, 3.9, 2.3, 1.8, 1.3, 0.6, -4.4), (49, 55.0, 'Tyler Johnson', 'PHO', 19245370, None, None, None, None, None, 'Cap Space', '$19,245,370 ', 2015, 'G', 6.333333333333333, 186.0, '1992-05-07 00:00:00', 'California State University, Fresno', 27, 57, 26, 31, 26.8, 10.9, 3.8, 9.2, 41.3, 1.6, 4.6, 34.6, 1.7, 2.2, 74.8, 0.6, 2.4, 3.0, 2.9, 1.4, 0.9, 0.5, -2.2), (50, 57.0, 'Jeff Teague', 'MIN', 19000000, None, None, None, None, None, 'Cap space', '$19,000,000 ', 2010, 'G', 6.166666666666667, 186.0, '1988-06-10 00:00:00', 'Wake Forest University', 31, 42, 23, 19, 30.1, 12.1, 4.2, 9.9, 42.3, 0.8, 2.5, 33.3, 2.9, 3.6, 80.4, 0.4, 2.1, 2.5, 8.2, 2.3, 1.0, 0.4, 0.0), (51, 58.0, 'Harrison Barnes', 'SAC', 18973214, '$20,491,071 ', '$22,008,929 ', '$23,526,786 ', None, None, None, '$85,000,000 ', 2013, 'F', 6.666666666666667, 210.0, '1992-05-30 00:00:00', 'University of North Carolina', 27, 77, 34, 43, 32.9, 16.4, 5.6, 13.3, 42.0, 2.3, 5.7, 39.5, 3.0, 3.6, 82.4, 0.7, 3.9, 4.7, 1.5, 1.3, 0.6, 0.2, -0.7), (52, 59.0, 'Evan Turner', 'ATL', 18606557, None, None, None, None, None, 'Cap Space', '$18,606,557 ', 2011, 'G', 6.583333333333333, 220.0, '1988-10-27 00:00:00', 'Ohio State University', 30, 73, 47, 26, 22.0, 6.8, 2.8, 6.1, 46.0, 0.2, 0.7, 21.2, 1.0, 1.5, 70.8, 0.5, 4.0, 4.5, 3.9, 1.6, 0.5, 0.2, 0.4), (53, 60.0, 'Draymond Green', 'GSW', 18539130, None, None, None, None, None, 'Bird Rights', '$18,539,130 ', 2013, 'F', 6.583333333333333, 230.0, '1990-03-04 00:00:00', 'Michigan State University', 29, 66, 48, 18, 31.3, 7.4, 2.8, 6.4, 44.5, 0.7, 2.5, 28.5, 1.0, 1.4, 69.2, 0.9, 6.4, 7.3, 6.9, 2.6, 1.4, 1.1, 7.0), (54, 61.0, 'Tristan Thompson', 'CLE', 18539130, None, None, None, None, None, 'Bird Rights', '$18,539,130 ', 2012, 'F-C', 6.75, 238.0, '1991-03-13 00:00:00', 'University of Texas at Austin', 28, 43, 8, 35, 27.9, 10.9, 4.7, 8.8, 52.9, 0.0, 0.0, 0.0, 1.6, 2.5, 64.2, 4.0, 6.2, 10.2, 2.0, 1.4, 0.7, 0.4, -7.0), (55, 62.0, 'Allen Crabbe', 'ATL', 18500000, None, None, None, None, None, 'Cap Space', '$18,500,000 ', 2014, 'G-F', 6.5, 210.0, '1992-04-09 00:00:00', 'University of California', 27, 43, 19, 24, 26.4, 9.6, 3.2, 8.7, 36.7, 2.3, 6.0, 37.8, 1.0, 1.3, 73.2, 0.4, 3.1, 3.4, 1.1, 1.1, 0.5, 0.3, 0.1), (56, 64.0, 'Reggie Jackson', 'DET', 18086956, None, None, None, None, None, 'Bird Rights', '$18,086,956 ', 2012, 'G', 6.25, 208.0, '1990-04-16 00:00:00', 'Boston College', 29, 82, 41, 41, 27.9, 15.4, 5.4, 12.8, 42.1, 2.1, 5.7, 36.9, 2.5, 2.9, 86.4, 0.5, 2.1, 2.6, 4.2, 1.8, 0.7, 0.1, 0.5), (57, 65.0, 'Myles Turner', 'IND', 18000000, '$18,000,000 ', '$18,000,000 ', '$18,000,000 ', None, None, '1st round pick', '$72,000,000 ', 2016, 'C-F', 6.916666666666667, 243.0, '1996-03-24 00:00:00', 'University of Texas at Austin', 23, 74, 43, 31, 28.6, 13.3, 5.1, 10.5, 48.7, 1.0, 2.6, 38.8, 2.0, 2.7, 73.6, 1.4, 5.8, 7.2, 1.6, 1.4, 0.8, 2.7, 1.6), (58, 66.0, 'Julius Randle', 'NYK', 18000000, '$18,900,000 ', '$19,800,000 ', None, None, None, None, '$40,900,000 ', 2015, 'F-C', 6.75, 250.0, '1994-11-29 00:00:00', 'University of Kentucky', 24, 73, 31, 42, 30.6, 21.4, 7.8, 14.9, 52.4, 0.9, 2.7, 34.4, 4.9, 6.7, 73.1, 2.2, 6.5, 8.7, 3.1, 2.8, 0.7, 0.6, -0.2), (59, 67.0, 'Gary Harris', 'DEN', 17839286, '$19,160,714 ', '$20,482,143 ', None, None, None, '1st Round Pick', '$57,482,143 ', 2015, 'G', 6.333333333333333, 210.0, '1994-09-14 00:00:00', 'Michigan State University', 24, 57, 38, 19, 28.8, 12.9, 4.7, 11.2, 42.4, 1.4, 4.2, 33.9, 2.0, 2.5, 79.9, 0.7, 2.1, 2.8, 2.2, 1.2, 1.0, 0.3, 4.0), (60, 68.0, 'Andre Iguodala', 'MEM', 17185185, None, None, None, None, None, 'Bird Rights', '$17,185,185 ', 2005, 'G-F', 6.5, 215.0, '1984-01-28 00:00:00', 'University of Arizona', 35, 68, 47, 21, 23.2, 5.7, 2.2, 4.4, 50.0, 0.7, 2.1, 33.3, 0.6, 1.0, 58.2, 0.7, 3.0, 3.7, 3.2, 0.8, 0.9, 0.8, 4.6), (61, 70.0, 'Evan Fournier', 'ORL', 17000000, '$17,000,000 ', None, None, None, None, 'Cap Space', '$17,000,000 ', 2013, 'G-F', 6.583333333333333, 205.0, '1992-10-29 00:00:00', None, 26, 81, 42, 39, 31.5, 15.1, 5.8, 13.2, 43.8, 1.9, 5.6, 34.0, 1.7, 2.1, 80.6, 0.5, 2.7, 3.2, 3.6, 1.9, 0.9, 0.1, 1.2), (62, 71.0, 'Bismack Biyombo', 'CHO', 17000000, None, None, None, None, None, 'Cap Space', '$17,000,000 ', 2012, 'C-F', 6.75, 255.0, '1992-08-28 00:00:00', None, 26, 54, 24, 30, 14.5, 4.4, 1.6, 2.9, 57.1, 0.0, 0.0, 0.0, 1.1, 1.7, 63.7, 1.5, 3.1, 4.6, 0.6, 0.6, 0.2, 0.8, -2.5), (63, 72.0, 'Derrick Favors', 'NOP', 16900000, None, None, None, None, None, None, '$16,000,000 ', 2011, 'F-C', 6.833333333333333, 265.0, '1991-07-15 00:00:00', 'Georgia Institute of Technology', 27, 76, 46, 30, 23.2, 11.8, 4.8, 8.1, 58.6, 0.2, 1.0, 21.8, 2.0, 3.0, 67.5, 2.7, 4.6, 7.4, 1.2, 1.1, 0.7, 1.4, 2.4), (64, 73.0, 'Clint Capela', 'HOU', 16456522, '$17,500,000 ', '$18,543,478 ', '$19,586,957 ', None, None, None, '$72,086,957 ', 2015, 'C', 6.833333333333333, 240.0, '1994-05-18 00:00:00', None, 25, 67, 44, 23, 33.6, 16.6, 7.1, 10.9, 64.8, 0.0, 0.0, 0.0, 2.5, 3.9, 63.6, 4.4, 8.2, 12.7, 1.4, 1.4, 0.7, 1.5, 3.5), (65, 74.0, 'Gorgui Dieng', 'MIN', 16229213, '$17,287,640 ', None, None, None, None, '1st Round Pick', '$33,516,853 ', 2014, 'C', 6.916666666666667, 241.0, '1990-01-18 00:00:00', 'University of Louisville', 29, 76, 33, 43, 13.6, 6.4, 2.5, 5.0, 50.1, 0.3, 0.7, 33.9, 1.2, 1.4, 83.0, 1.1, 3.0, 4.1, 0.9, 0.8, 0.6, 0.5, -1.5), (66, 75.0, 'Ricky Rubio', 'PHO', 16190476, '$17,000,000 ', '$17,809,524 ', None, None, None, None, '$51,000,000 ', 2012, 'G', 6.333333333333333, 194.0, '1990-10-21 00:00:00', None, 28, 68, 40, 28, 27.9, 12.7, 4.3, 10.7, 40.4, 1.2, 3.7, 31.1, 2.9, 3.4, 85.5, 0.5, 3.1, 3.6, 6.1, 2.6, 1.3, 0.1, 3.7), (67, 77.0, 'Brandon Knight', 'CLE', 15643750, None, None, None, None, None, None, '$15,643,750 ', 2012, 'G', 6.25, 189.0, '1991-12-02 00:00:00', 'University of Kentucky', 27, 39, 17, 22, 18.9, 6.8, 2.5, 6.7, 38.1, 1.1, 3.3, 31.8, 0.7, 0.9, 79.4, 0.3, 1.3, 1.5, 1.8, 0.8, 0.5, 0.1, -4.9), (68, 78.0, 'Eric Bledsoe', 'MIL', 15625000, '$16,875,000 ', '$18,125,000 ', '$19,375,000 ', None, None, 'Bird Rights', '$70,000,000 ', 2011, 'G', 6.083333333333333, 205.0, '1989-12-09 00:00:00', 'University of Kentucky', 29, 78, 59, 19, 29.1, 15.9, 6.0, 12.4, 48.4, 1.6, 4.8, 32.9, 2.3, 3.0, 75.0, 1.1, 3.6, 4.6, 5.5, 2.1, 1.5, 0.4, 7.2), (69, 81.0, 'Ian Mahinmi', 'WAS', 15450051, None, None, None, None, None, 'Cap Space', '$15,450,051 ', 2008, 'C', 6.916666666666667, 250.0, '1986-11-05 00:00:00', None, 32, 34, 12, 22, 14.6, 4.1, 1.4, 3.1, 45.2, 0.1, 0.5, 18.8, 1.2, 1.8, 68.9, 1.4, 2.4, 3.8, 0.7, 0.6, 0.7, 0.5, -1.0), (70, 82.0, 'James Johnson', 'MIA', 15349400, '$16,047,100 ', None, None, None, None, 'Cap space', '$15,349,400 ', 2010, 'F', 6.75, 250.0, '1987-02-20 00:00:00', 'Wake Forest University', 32, 55, 27, 28, 21.2, 7.8, 3.0, 6.9, 43.3, 0.9, 2.7, 33.6, 0.9, 1.3, 71.4, 0.4, 2.8, 3.2, 2.5, 1.3, 0.6, 0.5, -1.9), (71, 83.0, 'Marvin Williams', 'CHO', 15006250, None, None, None, None, None, 'Cap Space', '$15,006,250 ', 2006, 'F', 6.75, 237.0, '1986-06-19 00:00:00', 'University of North Carolina', 33, 75, 35, 40, 28.4, 10.1, 3.7, 8.7, 42.2, 1.9, 5.1, 36.6, 0.9, 1.1, 76.7, 1.0, 4.4, 5.4, 1.2, 0.6, 0.9, 0.8, -0.1), (72, 84.0, 'Bobby Portis', 'NYK', 15000000, '$15,750,000 ', None, None, None, None, None, '$15,000,000 ', 2016, 'F', 6.916666666666667, 230.0, '1995-02-10 00:00:00', 'University of Arkansas', 24, 50, 14, 36, 26.0, 14.2, 5.6, 12.6, 44.4, 1.5, 3.8, 39.3, 1.5, 1.9, 79.4, 2.2, 5.9, 8.1, 1.4, 1.5, 0.7, 0.4, -2.2), (73, 85.0, 'Marcus Morris', 'NYK', 15000000, None, None, None, None, None, None, '$15,000,000 ', 2012, 'F', 6.75, 235.0, '1989-09-02 00:00:00', 'University of Kansas', 29, 75, 44, 31, 27.9, 13.9, 5.0, 11.3, 44.7, 1.9, 5.2, 37.5, 1.9, 2.3, 84.4, 1.0, 5.1, 6.1, 1.5, 1.2, 0.6, 0.3, 1.0), (74, 86.0, 'Danny Green', 'LAL', 14634146, '$15,365,854 ', None, None, None, None, None, '$30,000,000 ', 2010, 'G-F', 6.5, 215.0, '1987-06-22 00:00:00', 'University of North Carolina', 32, 80, 56, 24, 27.7, 10.3, 3.7, 7.9, 46.5, 2.5, 5.4, 45.5, 0.5, 0.6, 84.1, 0.8, 3.2, 4.0, 1.6, 0.9, 0.9, 0.7, 8.0), (75, 87.0, 'Cody Zeller', 'CHO', 14471910, '$15,415,730 ', None, None, None, None, '1st Round Pick', '$29,887,640 ', 2014, 'C-F', 7.0, 240.0, '1992-10-05 00:00:00', 'Indiana University', 26, 49, 22, 27, 25.4, 10.1, 3.9, 7.0, 55.1, 0.1, 0.4, 27.3, 2.3, 2.9, 78.7, 2.2, 4.6, 6.8, 2.1, 1.3, 0.8, 0.8, 0.9), (76, 88.0, 'Eric Gordon', 'HOU', 14057730, '$16,869,276 ', '$18,218,818 ', '$19,568,360 ', '$20,917,902 ', None, 'Cap Space', '$68,714,184 ', 2009, 'G', 6.333333333333333, 215.0, '1988-12-25 00:00:00', 'Indiana University', 30, 68, 44, 24, 31.7, 16.2, 5.6, 13.8, 40.9, 3.2, 8.8, 36.0, 1.8, 2.2, 78.3, 0.3, 1.9, 2.2, 1.9, 1.3, 0.6, 0.4, 5.1), (77, 89.0, 'Mason Plumlee', 'DEN', 14041096, None, None, None, None, None, 'Cap space', '$14,041,096 ', 2014, 'C-F', 6.916666666666667, 245.0, '1990-03-05 00:00:00', 'Duke University', 29, 82, 54, 28, 21.1, 7.8, 3.2, 5.4, 59.3, 0.0, 0.1, 20.0, 1.4, 2.4, 56.1, 2.0, 4.4, 6.4, 3.0, 1.5, 0.8, 0.9, 0.9), (78, 90.0, 'Rudy Gay', 'SAS', 14000000, '$14,000,000 ', None, None, None, None, None, '$28,000,000 ', 2007, 'F', 6.666666666666667, 230.0, '1986-08-17 00:00:00', 'University of Connecticut', 32, 69, 42, 27, 26.7, 13.7, 5.4, 10.8, 50.4, 1.1, 2.7, 40.2, 1.7, 2.1, 81.6, 0.9, 5.9, 6.8, 2.6, 1.7, 0.8, 0.5, 2.3), (79, 92.0, 'Jordan Clarkson', 'CLE', 13437500, None, None, None, None, None, 'Cap Space', '$13,437,500 ', 2015, 'G', 6.416666666666667, 194.0, '1992-06-07 00:00:00', 'University of Missouri', 27, 81, 18, 63, 27.3, 16.8, 6.5, 14.6, 44.8, 1.8, 5.5, 32.4, 2.0, 2.4, 84.4, 1.0, 2.3, 3.3, 2.4, 1.7, 0.7, 0.2, -4.3), (80, 93.0, 'Dewayne Dedmon', 'SAC', 13333334, '$13,333,333 ', '$13,333,333 ', None, None, None, None, '$40,000,000 ', 2014, 'C', 7.0, 245.0, '1989-08-12 00:00:00', 'University of Southern California', 29, 64, 24, 40, 25.1, 10.8, 4.0, 8.2, 49.2, 1.3, 3.4, 38.2, 1.4, 1.8, 81.4, 1.6, 5.9, 7.5, 1.4, 1.3, 1.1, 1.1, -2.7), (81, 94.0, 'Solomon Hill', 'MEM', 13258781, None, None, None, None, None, 'Cap Space', '$13,258,781 ', 2014, 'F', 6.583333333333333, 225.0, '1991-03-18 00:00:00', 'University of Arizona', 28, 44, 14, 30, 20.0, 4.3, 1.5, 4.0, 38.2, 0.7, 2.3, 31.7, 0.5, 0.7, 71.9, 0.8, 2.3, 3.0, 1.3, 0.7, 0.5, 0.2, -2.9), (82, 95.0, 'Justise Winslow', 'MIA', 13000000, '$13,000,000 ', '$13,000,000 ', None, None, None, '1st round pick', '$26,000,000 ', 2016, 'F', 6.583333333333333, 225.0, '1996-03-26 00:00:00', 'Duke University', 23, 66, 30, 36, 29.7, 12.6, 4.9, 11.3, 43.3, 1.5, 3.9, 37.5, 1.3, 2.1, 62.8, 1.0, 4.4, 5.4, 4.3, 2.2, 1.1, 0.3, 1.8), (83, 96.0, 'Michael Kidd-Gilchrist', 'CHO', 13000000, None, None, None, None, None, '1st Round Pick', '$13,000,000 ', 2013, 'F', 6.583333333333333, 232.0, '1993-09-26 00:00:00', 'University of Kentucky', 25, 64, 29, 35, 18.4, 6.7, 2.5, 5.2, 47.6, 0.3, 0.7, 34.0, 1.5, 1.9, 77.2, 1.4, 2.5, 3.8, 1.0, 0.7, 0.5, 0.6, -0.8), (84, 97.0, 'Will Barton', 'DEN', 12960000, '$13,920,000 ', '$14,880,000 ', None, None, None, None, '$26,880,000 ', 2013, 'G', 6.5, 175.0, '1991-01-06 00:00:00', 'University of Memphis', 28, 43, 27, 16, 27.7, 11.5, 4.3, 10.7, 40.2, 1.6, 4.6, 34.2, 1.3, 1.7, 77.0, 0.7, 3.9, 4.6, 2.9, 1.5, 0.4, 0.5, 0.2), (85, 98.0, 'Thaddeus Young', 'CHI', 12900000, '$13,545,000 ', '$14,190,000 ', None, None, None, None, '$32,445,000 ', 2008, 'F', 6.666666666666667, 221.0, '1988-06-21 00:00:00', 'Georgia Institute of Technology', 31, 81, 47, 34, 30.7, 12.6, 5.5, 10.4, 52.7, 0.6, 1.8, 34.9, 1.1, 1.7, 64.4, 2.4, 4.1, 6.5, 2.5, 1.5, 1.5, 0.4, 1.7), (86, 99.0, 'Courtney Lee', 'DAL', 12759670, None, None, None, None, None, 'Cap Space', '$12,759,670 ', 2009, 'G', 6.416666666666667, 200.0, '1985-10-03 00:00:00', 'Western Kentucky University', 33, 34, 6, 28, 12.6, 4.0, 1.6, 3.8, 41.1, 0.5, 1.6, 29.1, 0.4, 0.6, 66.7, 0.3, 1.3, 1.6, 1.1, 0.4, 0.6, 0.1, -1.6), (87, 101.0, 'Marcus Smart', 'BOS', 12553471, '$13,446,428 ', '$14,339,285 ', None, None, None, None, '$40,339,184 ', 2015, 'G', 6.333333333333333, 220.0, '1994-03-06 00:00:00', 'Oklahoma State University', 25, 80, 48, 32, 27.5, 8.9, 3.0, 7.1, 42.2, 1.6, 4.3, 36.4, 1.3, 1.6, 80.6, 0.7, 2.2, 2.9, 4.0, 1.5, 1.8, 0.4, 2.1), (88, 102.0, 'Terrence Ross', 'ORL', 12500000, '$13,500,000 ', '$12,500,000 ', '$11,500,000 ', None, None, None, '$50,000,000 ', 2013, 'F-G', 6.583333333333333, 206.0, '1991-02-05 00:00:00', 'University of Washington', 28, 81, 42, 39, 26.5, 15.1, 5.4, 12.7, 42.8, 2.7, 7.0, 38.3, 1.6, 1.8, 87.5, 0.3, 3.1, 3.5, 1.7, 1.1, 0.9, 0.4, 0.1), (89, 103.0, 'Miles Plumlee', 'MEM', 12500000, None, None, None, None, None, 'Bird Rights', '$12,500,000 ', 2013, 'C-F', 6.916666666666667, 249.0, '1988-09-01 00:00:00', 'Duke University', 30, 18, 1, 17, 9.6, 4.4, 1.8, 2.7, 66.7, 0.0, 0.0, 0.0, 0.9, 1.7, 53.3, 0.9, 1.3, 2.2, 0.9, 0.6, 0.3, 0.2, -2.4), (90, 104.0, 'Patty Mills', 'SAS', 12428571, '$13,285,714 ', None, None, None, None, 'Bird Rights', '$25,714,285 ', 2010, 'G', 6.0, 185.0, '1988-08-11 00:00:00', \"Saint Mary's College of California\", 30, 82, 48, 34, 23.3, 9.9, 3.4, 8.1, 42.5, 1.9, 4.9, 39.4, 1.1, 1.3, 85.4, 0.3, 1.9, 2.2, 3.0, 1.1, 0.6, 0.1, 2.5), (91, 105.0, 'Patrick Beverley', 'LAC', 12345679, '$13,333,333 ', '$14,320,988 ', None, None, None, None, '$40,000,000 ', 2013, 'G', 6.083333333333333, 185.0, '1988-07-12 00:00:00', 'University of Arkansas', 30, 78, 48, 30, 27.4, 7.6, 2.5, 6.1, 40.7, 1.4, 3.6, 39.7, 1.2, 1.6, 78.0, 1.0, 4.0, 5.0, 3.8, 1.1, 0.9, 0.6, 1.9), (92, 106.0, 'Trevor Ariza', 'SAC', 12200000, '$12,800,000 ', None, None, None, None, None, '$14,000,000 ', 2005, 'F', 6.666666666666667, 215.0, '1985-06-30 00:00:00', 'University of California, Los Angeles', 33, 69, 22, 47, 34.0, 12.5, 4.3, 10.7, 39.9, 2.1, 6.3, 33.4, 1.9, 2.4, 79.3, 0.7, 4.7, 5.4, 3.7, 1.5, 1.3, 0.3, -4.8), (93, 107.0, 'Dion Waiters', 'MIA', 12100000, '$12,650,000 ', None, None, None, None, 'Cap space', '$24,750,000 ', 2013, 'G', 6.333333333333333, 225.0, '1991-12-10 00:00:00', 'Syracuse University', 27, 44, 20, 24, 25.9, 12.0, 4.5, 10.9, 41.4, 2.5, 6.6, 37.7, 0.5, 1.0, 50.0, 0.2, 2.5, 2.6, 2.8, 1.5, 0.7, 0.2, -0.9), (94, 108.0, 'Brook Lopez', 'MIL', 12093024, '$12,697,675 ', '$13,302,325 ', '$13,906,976 ', None, None, None, '$52,000,000 ', 2009, 'C', 7.0, 275.0, '1988-04-01 00:00:00', 'Stanford University', 31, 81, 60, 21, 28.7, 12.5, 4.4, 9.7, 45.2, 2.3, 6.3, 36.5, 1.4, 1.6, 84.2, 0.4, 4.5, 4.9, 1.2, 1.0, 0.6, 2.2, 7.2), (95, 110.0, 'Cory Joseph', 'SAC', 12000000, '$12,600,000 ', '$12,600,000 ', None, None, None, None, '$24,600,000 ', 2012, 'G', 6.25, 193.0, '1991-08-20 00:00:00', 'University of Texas at Austin', 27, 82, 48, 34, 25.2, 6.5, 2.8, 6.7, 41.2, 0.7, 2.1, 32.2, 0.4, 0.5, 69.8, 0.5, 2.9, 3.4, 3.9, 1.0, 1.1, 0.3, 2.6), (96, 111.0, 'Joe Ingles', 'UTA', 11954546, '$10,863,637 ', None, None, None, None, 'Bird Rights', '$22,818,183 ', 2015, 'F-G', 6.666666666666667, 226.0, '1987-10-02 00:00:00', None, 31, 82, 50, 32, 31.3, 12.1, 4.4, 9.8, 44.8, 2.3, 5.9, 39.1, 1.1, 1.5, 70.7, 0.4, 3.6, 4.0, 5.7, 2.4, 1.2, 0.2, 5.2), (97, 112.0, 'Kelly Olynyk', 'MIA', 11667885, '$12,198,243 ', None, None, None, None, 'Cap space', '$11,667,885 ', 2014, 'C-F', 7.0, 238.0, '1991-04-19 00:00:00', 'Gonzaga University', 28, 79, 39, 40, 22.9, 10.0, 3.3, 7.1, 46.3, 1.4, 4.0, 35.4, 1.9, 2.3, 82.2, 0.9, 3.8, 4.7, 1.8, 1.4, 0.7, 0.5, 1.3), (98, 113.0, 'Maurice Harkless', 'LAC', 11511234, None, None, None, None, None, 'Cap Space', '$11,511,234 ', 2013, 'F-G', 6.75, 215.0, '1993-05-11 00:00:00', \"St. John's University\", 26, 60, 37, 23, 23.6, 7.7, 3.2, 6.5, 48.7, 0.6, 2.0, 27.5, 0.8, 1.2, 67.1, 1.3, 3.2, 4.5, 1.2, 0.8, 1.1, 0.9, 4.0), (99, 114.0, 'Tony Snell', 'DET', 11392857, '$12,178,571 ', None, None, None, None, 'Cap space', '$11,392,857 ', 2014, 'G-F', 6.583333333333333, 200.0, '1991-11-10 00:00:00', 'University of New Mexico', 27, 74, 55, 19, 17.6, 6.0, 2.2, 4.9, 45.2, 1.1, 2.8, 39.7, 0.5, 0.6, 88.1, 0.4, 1.7, 2.1, 0.9, 0.3, 0.4, 0.2, 2.3), (100, 115.0, 'Robert Covington', 'MIN', 11301219, '$12,138,345 ', '$12,975,471 ', None, None, None, 'Cap Space', '$36,415,035 ', 2014, 'F', 6.75, 215.0, '1990-12-14 00:00:00', 'Tennessee State University', 28, 35, 20, 15, 34.4, 13.3, 4.5, 10.3, 43.1, 2.4, 6.4, 37.8, 1.9, 2.5, 76.4, 0.8, 4.7, 5.5, 1.3, 1.3, 2.1, 1.3, 1.8), (101, 116.0, 'Meyers Leonard', 'MIA', 11286515, None, None, None, None, None, 'Cap Space', '$11,286,515 ', 2013, 'C-F', 7.083333333333333, 245.0, '1992-02-27 00:00:00', 'University of Illinois at Urbana-Champaign', 27, 61, 35, 26, 14.4, 5.9, 2.2, 4.0, 54.5, 0.8, 1.8, 45.0, 0.7, 0.8, 84.3, 0.8, 3.0, 3.8, 1.2, 0.7, 0.2, 0.1, -1.3), (102, 117.0, 'T.J. Warren', 'IND', 10810000, '$11,750,000 ', '$12,690,000 ', None, None, None, '1st Round Pick', '$35,250,000 ', 2015, 'F', 6.666666666666667, 230.0, '1993-09-05 00:00:00', 'North Carolina State University', 25, 43, 10, 33, 31.6, 18.0, 6.9, 14.2, 48.6, 1.8, 4.2, 42.8, 2.3, 2.9, 81.5, 0.7, 3.3, 4.0, 1.5, 1.2, 1.2, 0.7, -5.2), (103, 119.0, 'Spencer Dinwiddie', 'BRK', 10600000, '$11,400,000 ', '$12,300,000 ', None, None, None, 'Minimum Salary', '$22,000,000 ', 2015, 'G', 6.5, 200.0, '1993-04-06 00:00:00', 'University of Colorado', 26, 68, 36, 32, 28.1, 16.8, 5.4, 12.2, 44.2, 1.8, 5.4, 33.5, 4.2, 5.2, 80.6, 0.4, 2.1, 2.4, 4.6, 2.2, 0.6, 0.3, -1.2), (104, 120.0, 'Jeremy Lamb', 'IND', 10500000, '$10,500,000 ', '$10,500,000 ', None, None, None, None, '$31,500,000 ', 2013, 'G-F', 6.416666666666667, 185.0, '1992-05-30 00:00:00', 'University of Connecticut', 27, 79, 38, 41, 28.5, 15.3, 5.5, 12.4, 44.0, 1.5, 4.2, 34.8, 2.9, 3.3, 88.8, 0.8, 4.7, 5.5, 2.2, 1.0, 1.1, 0.4, 0.6), (105, 121.0, 'Dwight Powell', 'DAL', 10259375, '$10,185,185 ', '$11,000,000 ', '$11,814,815 ', None, None, 'Cap Space', '$43,259,375 ', 2015, 'F-C', 6.916666666666667, 240.0, '1991-07-20 00:00:00', 'Stanford University', 27, 77, 31, 46, 21.6, 10.6, 3.8, 6.3, 59.7, 0.5, 1.6, 30.7, 2.5, 3.3, 77.2, 1.8, 3.5, 5.3, 1.5, 0.9, 0.6, 0.6, 0.2), (106, 122.0, 'Norman Powell', 'TOR', 10116576, '$10,865,952 ', '$11,615,328 ', None, None, None, 'Cap Space', '$20,982,528 ', 2016, 'G', 6.333333333333333, 215.0, '1993-05-25 00:00:00', 'University of California, Los Angeles', 26, 60, 45, 15, 18.8, 8.6, 3.2, 6.7, 48.3, 1.1, 2.8, 40.0, 1.0, 1.3, 82.7, 0.3, 2.1, 2.3, 1.5, 1.1, 0.7, 0.2, 0.3), (107, 123.0, 'Josh Richardson', 'PHI', 10100000, '$10,800,000 ', '$11,600,000 ', None, None, None, 'Minimum Salary', '$20,900,000 ', 2016, 'G', 6.5, 200.0, '1993-09-15 00:00:00', 'University of Tennessee', 25, 73, 35, 38, 34.8, 16.6, 5.8, 14.1, 41.2, 2.2, 6.3, 35.7, 2.7, 3.2, 86.1, 0.7, 2.9, 3.6, 4.1, 1.5, 1.1, 0.5, 0.9), (108, 125.0, 'DeAndre Jordan', 'BRK', 9881598, '$10,375,678 ', '$9,881,598 ', '$9,821,842 ', None, None, None, '$39,960,716 ', 2009, 'C', 6.916666666666667, 265.0, '1988-07-21 00:00:00', 'Texas A&M University', 30, 69, 25, 44, 29.7, 11.0, 4.1, 6.5, 64.1, 0.0, 0.0, 0.0, 2.7, 3.8, 70.5, 3.3, 9.8, 13.1, 2.3, 2.2, 0.6, 1.1, -3.1), (109, 126.0, 'Taj Gibson', 'NYK', 9800000, '$10,290,000 ', None, None, None, None, None, '$9,800,000 ', 2010, 'F', 6.75, 225.0, '1985-06-24 00:00:00', 'University of Southern California', 34, 70, 32, 38, 24.1, 10.8, 4.3, 7.7, 56.6, 0.2, 0.5, 32.4, 1.9, 2.5, 75.7, 2.5, 4.1, 6.5, 1.2, 1.0, 0.8, 0.6, -1.2), (110, 128.0, 'Markelle Fultz', 'ORL', 9745200, '$12,288,697 ', None, None, None, None, '1st Round Pick', '$22,033,897 ', 2018, 'G', 6.333333333333333, 195.0, '1998-05-29 00:00:00', 'University of Washington', 21, 19, 12, 7, 22.5, 8.2, 3.4, 8.2, 41.9, 0.2, 0.7, 28.6, 1.1, 1.9, 56.8, 1.4, 2.3, 3.7, 3.1, 1.3, 0.9, 0.3, -0.4), (111, 129.0, 'John Henson', 'CLE', 9732396, None, None, None, None, None, '1st Round Pick', '$9,732,396 ', 2013, 'C-F', 6.916666666666667, 229.0, '1990-12-28 00:00:00', 'University of North Carolina', 28, 14, 10, 4, 13.5, 5.6, 2.2, 4.8, 46.3, 0.8, 2.2, 35.5, 0.4, 0.7, 60.0, 1.1, 3.9, 5.1, 1.0, 0.9, 0.5, 0.8, 0.1), (112, 130.0, 'Matthew Dellavedova', 'CLE', 9607500, None, None, None, None, None, 'Cap Space', '$9,607,500 ', 2014, 'G', 6.333333333333333, 198.0, '1990-09-08 00:00:00', \"Saint Mary's College of California\", 28, 48, 20, 28, 16.9, 5.9, 2.0, 5.0, 40.5, 0.9, 2.7, 33.8, 0.9, 1.1, 80.8, 0.1, 1.5, 1.6, 3.8, 1.4, 0.3, 0.0, -1.3), (113, 131.0, 'Dante Exum', 'UTA', 9600000, '$9,600,000 ', None, None, None, None, 'Cap Space', '$18,200,000 ', 2015, 'G', 6.5, 190.0, '1995-07-13 00:00:00', None, 23, 42, 21, 21, 15.8, 6.9, 2.4, 5.7, 41.9, 0.4, 1.5, 29.0, 1.6, 2.0, 79.1, 0.4, 1.2, 1.6, 2.6, 1.2, 0.3, 0.1, 1.1), (114, 133.0, 'Delon Wright', 'DAL', 9473684, '$9,000,000 ', '$8,526,316 ', None, None, None, None, '$27,000,000 ', 2016, 'G', 6.416666666666667, 183.0, '1992-04-26 00:00:00', 'University of Utah', 27, 75, 45, 30, 22.6, 8.7, 3.2, 7.4, 43.4, 0.7, 2.2, 29.8, 1.6, 2.0, 79.3, 0.9, 2.6, 3.5, 3.3, 1.0, 1.2, 0.4, 0.0), (115, 134.0, 'Jerami Grant', 'DEN', 9346153, '$9,346,153 ', None, None, None, None, None, '$9,346,153 ', 2015, 'F', 6.666666666666667, 210.0, '1994-03-12 00:00:00', 'Syracuse University', 25, 80, 48, 32, 32.7, 13.6, 5.1, 10.3, 49.7, 1.4, 3.7, 39.2, 2.0, 2.8, 71.0, 1.2, 4.0, 5.2, 1.0, 0.8, 0.8, 1.3, 3.8), (116, 135.0, 'Al-Farouq Aminu', 'ORL', 9258000, '$9,720,900 ', '$10,183,800 ', None, None, None, 'MLE', '$29,162,700 ', 2011, 'F', 6.75, 220.0, '1990-09-21 00:00:00', 'Wake Forest University', 28, 81, 52, 29, 28.3, 9.4, 3.2, 7.3, 43.3, 1.2, 3.5, 34.3, 1.9, 2.1, 86.7, 1.4, 6.1, 7.5, 1.3, 0.9, 0.8, 0.4, 4.7), (117, 136.0, 'George Hill', 'MIL', 9133907, '$9,590,602 ', '$10,047,297 ', None, None, None, None, '$21,000,000 ', 2009, 'G', 6.25, 188.0, '1986-05-04 00:00:00', 'Indiana University-Purdue University Indianapolis', 33, 60, 39, 21, 21.7, 7.6, 2.8, 6.3, 45.2, 0.8, 2.6, 31.4, 1.2, 1.4, 82.4, 0.7, 1.8, 2.5, 2.3, 0.9, 0.9, 0.1, 2.4), (118, 453.0, 'George Hill', 'MIL', 1000000, None, None, None, None, None, None, '$21,000,000 ', 2009, 'G', 6.25, 188.0, '1986-05-04 00:00:00', 'Indiana University-Purdue University Indianapolis', 33, 60, 39, 21, 21.7, 7.6, 2.8, 6.3, 45.2, 0.8, 2.6, 31.4, 1.2, 1.4, 82.4, 0.7, 1.8, 2.5, 2.3, 0.9, 0.9, 0.1, 2.4), (119, 137.0, 'Kyle Anderson', 'MEM', 9073050, '$9,505,100 ', '$9,937,150 ', None, None, None, 'MLE', '$28,515,300 ', 2015, 'F-G', 6.75, 230.0, '1993-09-20 00:00:00', 'University of California, Los Angeles', 25, 43, 19, 24, 29.8, 8.0, 3.5, 6.4, 54.3, 0.2, 0.8, 26.5, 0.9, 1.5, 57.8, 1.1, 4.7, 5.8, 3.0, 1.3, 1.3, 0.9, -1.4), (120, 138.0, 'Fred VanVleet', 'TOR', 9000000, None, None, None, None, None, 'Bird Rights', '$9,000,000 ', 2017, 'G', 6.0, 195.0, '1994-02-25 00:00:00', 'Wichita State University', 25, 64, 46, 18, 27.5, 11.0, 3.8, 9.4, 41.0, 1.8, 4.6, 37.8, 1.5, 1.8, 84.3, 0.3, 2.3, 2.6, 4.8, 1.3, 0.9, 0.3, 4.7), (121, 141.0, 'Lonzo Ball', 'NOP', 8719320, '$11,003,782 ', None, None, None, None, '1st Round Pick', '$8,719,320 ', 2018, 'G', 6.5, 190.0, '1997-10-27 00:00:00', 'University of California, Los Angeles', 21, 47, 25, 22, 30.3, 9.9, 3.9, 9.7, 40.6, 1.6, 4.9, 32.9, 0.4, 1.0, 41.7, 1.1, 4.2, 5.3, 5.4, 2.2, 1.5, 0.4, -0.3), (122, 142.0, \"E'Twaun Moore\", 'NOP', 8664928, None, None, None, None, None, 'Cap Space', '$8,664,928 ', 2012, 'G', 6.333333333333333, 191.0, '1989-02-28 00:00:00', 'Purdue University', 30, 53, 21, 32, 27.6, 11.9, 4.8, 10.0, 48.1, 1.4, 3.3, 43.2, 0.8, 1.1, 76.3, 0.7, 1.7, 2.4, 1.9, 1.1, 0.8, 0.2, 0.2), (123, 145.0, 'Tyus Jones', 'MEM', 8408000, '$7,965,100 ', '$7,522,200 ', None, None, None, 'MLE', '$23,895,300 ', 2016, 'G', 6.166666666666667, 195.0, '1996-05-10 00:00:00', 'Duke University', 23, 68, 29, 39, 22.9, 6.9, 2.7, 6.6, 41.5, 0.6, 1.9, 31.7, 0.9, 1.0, 84.1, 0.3, 1.6, 2.0, 4.8, 0.7, 1.2, 0.1, 0.0), (124, 148.0, 'Ben Simmons', 'PHI', 8113930, '$29,250,000 ', '$31,590,000 ', '$33,930,000 ', '$36,270,000 ', '$38,610,000 ', '1st Round pick', '$177,763,930 ', 2018, 'G-F', 6.833333333333333, 230.0, '1996-07-20 00:00:00', 'Louisiana State University', 22, 79, 50, 29, 34.2, 16.9, 6.8, 12.2, 56.3, 0.0, 0.1, 0.0, 3.3, 5.4, 60.0, 2.2, 6.6, 8.8, 7.7, 3.5, 1.4, 0.8, 1.5), (125, 149.0, 'Kentavious Caldwell-Pope', 'LAL', 8089282, '$8,493,746 ', None, None, None, None, None, '$8,089,282 ', 2014, 'G', 6.416666666666667, 205.0, '1993-02-18 00:00:00', 'University of Georgia', 26, 82, 37, 45, 24.8, 11.4, 4.0, 9.2, 43.0, 1.8, 5.3, 34.7, 1.7, 1.9, 86.7, 0.6, 2.3, 2.9, 1.3, 0.8, 0.9, 0.2, -1.9), (126, 150.0, 'Maxi Kleber', 'DAL', 8000000, '$8,250,000 ', '$8,750,000 ', '$9,000,000 ', None, None, None, '$25,000,000 ', 2018, 'F', 6.833333333333333, 220.0, '1992-01-29 00:00:00', None, 27, 71, 30, 41, 21.2, 6.8, 2.5, 5.4, 45.3, 1.1, 3.1, 35.3, 0.8, 1.0, 78.4, 1.3, 3.4, 4.6, 1.0, 0.8, 0.5, 1.1, 0.9), (127, 151.0, 'Lou Williams', 'LAC', 8000000, '$8,000,000 ', None, None, None, None, 'Cap Space', '$16,000,000 ', 2006, 'G', 6.083333333333333, 175.0, '1986-10-27 00:00:00', None, 32, 75, 47, 28, 26.6, 20.0, 6.5, 15.2, 42.5, 1.4, 3.9, 36.1, 5.7, 6.5, 87.6, 0.5, 2.4, 3.0, 5.4, 2.4, 0.8, 0.1, 2.6), (128, 152.0, 'Elfrid Payton', 'NYK', 8000000, '$8,000,000 ', None, None, None, None, None, '$9,000,000 ', 2015, 'G', 6.333333333333333, 185.0, '1994-02-22 00:00:00', 'University of Louisiana at Lafayette', 25, 42, 18, 24, 29.8, 10.6, 4.3, 9.8, 43.4, 0.8, 2.5, 31.4, 1.3, 1.8, 74.3, 1.2, 4.1, 5.2, 7.6, 2.7, 1.0, 0.4, -1.6), (129, 153.0, 'Thomas Bryant', 'WAS', 7936508, '$8,333,333 ', '$8,730,159 ', None, None, None, None, '$25,000,000 ', 2018, 'C', 6.833333333333333, 248.0, '1997-07-31 00:00:00', 'Indiana University', 21, 72, 29, 43, 20.8, 10.5, 4.3, 7.0, 61.6, 0.5, 1.4, 33.3, 1.5, 1.9, 78.1, 1.6, 4.7, 6.3, 1.3, 0.8, 0.3, 0.9, -1.5), (130, 155.0, 'Jayson Tatum', 'BOS', 7830000, '$9,897,120 ', None, None, None, None, '1st Round Pick', '$7,830,000 ', 2018, 'F', 6.666666666666667, 205.0, '1998-03-03 00:00:00', 'Duke University', 21, 79, 48, 31, 31.1, 15.7, 5.9, 13.1, 45.0, 1.5, 3.9, 37.3, 2.5, 2.9, 85.5, 0.9, 5.2, 6.0, 2.1, 1.5, 1.1, 0.7, 4.6), (131, 156.0, 'Jae Crowder', 'MEM', 7815533, None, None, None, None, None, 'Bird Rights', '$7,815,533 ', 2013, 'F', 6.5, 235.0, '1990-07-06 00:00:00', 'Marquette University', 28, 80, 48, 32, 27.1, 11.9, 4.0, 10.0, 39.9, 2.2, 6.5, 33.1, 1.8, 2.5, 72.1, 0.8, 4.1, 4.8, 1.7, 1.1, 0.8, 0.4, 2.8), (132, 157.0, 'Wayne Ellington', 'NYK', 7804878, '$8,195,122 ', None, None, None, None, None, '$7,804,878 ', 2010, 'G', 6.333333333333333, 200.0, '1987-10-29 00:00:00', 'University of North Carolina', 31, 53, 26, 27, 24.5, 10.3, 3.5, 8.6, 40.3, 2.6, 7.0, 37.1, 0.7, 0.9, 79.6, 0.3, 1.8, 2.0, 1.4, 0.8, 1.0, 0.1, -0.2), (133, 159.0, 'Joe Harris', 'BRK', 7670000, None, None, None, None, None, 'Early Bird Rights', '$7,670,000 ', 2015, 'G', 6.5, 219.0, '1991-09-06 00:00:00', 'University of Virginia', 27, 76, 41, 35, 30.2, 13.7, 4.9, 9.8, 50.0, 2.4, 5.1, 47.4, 1.4, 1.8, 82.7, 0.7, 3.1, 3.8, 2.4, 1.6, 0.5, 0.2, -1.0), (134, 160.0, 'Seth Curry', 'DAL', 7441860, '$7,813,953 ', '$8,186,047 ', '$8,558,140 ', None, None, 'MLE', '$32,000,000 ', 2014, 'G', 6.166666666666667, 185.0, '1990-08-23 00:00:00', 'Duke University', 28, 74, 49, 25, 18.9, 7.9, 2.9, 6.3, 45.6, 1.5, 3.4, 45.0, 0.6, 0.7, 84.6, 0.4, 1.3, 1.6, 0.9, 0.8, 0.5, 0.2, 1.8), (135, 161.0, 'Doug McDermott', 'IND', 7333334, '$7,333,333 ', None, None, None, None, None, '$14,666,667 ', 2015, 'F', 6.666666666666667, 225.0, '1992-01-03 00:00:00', 'Creighton University', 27, 77, 43, 34, 17.4, 7.3, 2.7, 5.5, 49.1, 1.1, 2.7, 40.8, 0.9, 1.0, 83.5, 0.2, 1.2, 1.4, 0.9, 0.5, 0.2, 0.1, 1.7), (136, 162.0, 'Langston Galloway', 'DET', 7333333, None, None, None, None, None, 'MLE', '$7,333,333 ', 2015, 'G', 6.166666666666667, 200.0, '1991-12-09 00:00:00', \"Saint Joseph's University\", 27, 80, 41, 39, 21.8, 8.4, 2.9, 7.3, 38.8, 1.7, 4.8, 35.5, 1.0, 1.2, 84.4, 0.6, 1.5, 2.1, 1.1, 0.3, 0.5, 0.1, 0.5), (137, 163.0, 'Derrick Rose', 'DET', 7317073, '$7,682,927 ', None, None, None, None, 'MLE', '$15,000,000 ', 2009, 'G', 6.25, 190.0, '1988-10-04 00:00:00', 'University of Memphis', 30, 51, 24, 27, 27.3, 18.0, 7.1, 14.8, 48.2, 1.1, 2.9, 37.0, 2.7, 3.1, 85.6, 0.6, 2.1, 2.7, 4.3, 1.6, 0.6, 0.2, 0.7), (138, 164.0, 'Brandon Ingram', 'NOP', 7265485, None, None, None, None, None, '1st Round pick', '$7,265,485 ', 2017, 'F', 6.75, 190.0, '1997-09-02 00:00:00', 'Duke University', 21, 52, 25, 27, 33.9, 18.3, 7.0, 14.0, 49.7, 0.6, 1.8, 33.0, 3.8, 5.6, 67.5, 0.8, 4.3, 5.1, 3.0, 2.5, 0.5, 0.6, -1.1), (139, 165.0, 'D.J. Augustin', 'ORL', 7250000, None, None, None, None, None, 'Cap Space', '$7,250,000 ', 2009, 'G', 6.0, 183.0, '1987-11-10 00:00:00', 'University of Texas at Austin', 31, 81, 42, 39, 28.0, 11.7, 3.9, 8.4, 47.0, 1.6, 3.8, 42.1, 2.2, 2.6, 86.6, 0.5, 2.0, 2.5, 5.3, 1.6, 0.6, 0.0, 2.1), (140, 166.0, 'Darius Miller', 'NOP', 7250000, '$7,000,000 ', None, None, None, None, None, '$7,250,000 ', 2013, 'F', 6.666666666666667, 235.0, '1990-03-21 00:00:00', 'University of Kentucky', 29, 69, 31, 38, 25.5, 8.2, 2.7, 7.0, 39.0, 1.9, 5.3, 36.5, 0.8, 1.0, 78.9, 0.2, 1.7, 1.9, 2.1, 0.9, 0.6, 0.3, 0.2), (141, 168.0, 'Josh Jackson', 'MEM', 7059480, '$8,930,242 ', None, None, None, None, '1st Round Pick', '$7,059,480 ', 2018, 'F-G', 6.666666666666667, 207.0, '1997-02-10 00:00:00', 'University of Kansas', 22, 79, 19, 60, 25.2, 11.5, 4.4, 10.6, 41.3, 0.9, 2.8, 32.4, 1.8, 2.7, 67.1, 0.8, 3.6, 4.4, 2.3, 2.2, 0.9, 0.7, -5.4), (142, 169.0, 'DeMarre Carroll', 'SAS', 7000000, '$6,650,000 ', '$7,000,000 ', None, None, None, None, '$15,000,000 ', 2010, 'F', 6.666666666666667, 215.0, '1986-07-27 00:00:00', 'University of Missouri', 32, 67, 36, 31, 25.4, 11.1, 3.4, 8.6, 39.5, 1.6, 4.6, 34.2, 2.7, 3.6, 76.0, 1.0, 4.2, 5.2, 1.3, 1.1, 0.5, 0.1, 0.7), (143, 173.0, 'Nemanja Bjelica', 'SAC', 6825000, '$7,150,000 ', None, None, None, None, None, '$6,825,000 ', 2016, 'F', 6.833333333333333, 240.0, '1988-05-09 00:00:00', None, 31, 77, 38, 39, 23.2, 9.6, 3.7, 7.7, 47.9, 1.3, 3.3, 40.1, 0.9, 1.2, 76.1, 1.6, 4.1, 5.8, 1.9, 1.1, 0.7, 0.7, 1.1), (144, 174.0, 'Jaylen Brown', 'BOS', 6534829, None, None, None, None, None, '1st Rd pick', '$6,534,829 ', 2017, 'F-G', 6.583333333333333, 225.0, '1996-10-24 00:00:00', 'University of California', 22, 74, 41, 33, 25.9, 13.0, 5.0, 10.7, 46.5, 1.3, 3.7, 34.4, 1.8, 2.7, 65.8, 0.9, 3.4, 4.2, 1.4, 1.3, 0.9, 0.4, 0.9), (145, 175.0, 'Jabari Parker', 'ATL', 6500000, '$6,500,000 ', None, None, None, None, None, '$6,500,000 ', 2015, 'F', 6.666666666666667, 250.0, '1995-03-15 00:00:00', 'Duke University', 24, 64, 18, 46, 26.9, 14.5, 5.8, 11.7, 49.3, 1.0, 3.0, 31.3, 2.0, 2.9, 71.2, 1.2, 5.3, 6.6, 2.4, 2.4, 0.7, 0.5, -3.2), (146, 176.0, 'Ivica Zubac', 'LAC', 6481482, '$7,000,000 ', '$7,518,518 ', '$7,518,518 ', None, None, None, '$21,000,000 ', 2017, 'C', 7.083333333333333, 265.0, '1997-03-18 00:00:00', None, 22, 59, 32, 27, 17.6, 8.9, 3.6, 6.4, 55.9, 0.0, 0.0, 0.0, 1.7, 2.1, 80.2, 1.9, 4.2, 6.1, 1.1, 1.2, 0.2, 0.9, 0.7), (147, 177.0, 'Joakim Noah', 'NYK', 6431666, '$6,431,666 ', '$6,431,666 ', None, None, None, None, '$19,294,998 ', 2008, 'C', 6.916666666666667, 230.0, '1985-02-25 00:00:00', 'University of Florida', 34, 42, 14, 28, 16.5, 7.1, 2.6, 5.1, 51.6, 0.0, 0.0, 0.0, 1.9, 2.6, 71.6, 1.4, 4.3, 5.7, 2.1, 1.2, 0.5, 0.7, 0.1), (148, 179.0, \"De'Aaron Fox\", 'SAC', 6392760, '$8,099,627 ', None, None, None, None, '1st Round Pick', '$6,392,760 ', 2018, 'G', 6.25, 170.0, '1997-12-20 00:00:00', 'University of Kentucky', 21, 81, 39, 42, 31.4, 17.3, 6.2, 13.6, 45.8, 1.1, 2.9, 37.1, 3.7, 5.1, 72.7, 0.5, 3.2, 3.8, 7.3, 2.8, 1.6, 0.6, 0.8), (149, 181.0, 'Montrezl Harrell', 'LAC', 6000000, None, None, None, None, None, None, '$6,000,000 ', 2016, 'F-C', 6.666666666666667, 240.0, '1994-01-26 00:00:00', 'University of Louisville', 25, 82, 48, 34, 26.3, 16.6, 6.7, 10.8, 61.5, 0.0, 0.2, 17.6, 3.2, 5.0, 64.3, 2.2, 4.3, 6.5, 2.0, 1.6, 0.9, 1.3, 0.7), (150, 182.0, 'Ish Smith', 'WAS', 5853659, '$6,146,341 ', None, None, None, None, 'MLE', '$12,000,000 ', 2011, 'G', 6.0, 175.0, '1988-07-05 00:00:00', 'Wake Forest University', 30, 56, 33, 23, 22.3, 8.9, 3.7, 8.7, 41.9, 0.8, 2.4, 32.6, 0.8, 1.1, 75.8, 0.4, 2.2, 2.6, 3.6, 1.1, 0.5, 0.2, 1.5), (151, 183.0, 'Marco Belinelli', 'SAS', 5846154, None, None, None, None, None, 'MLE', '$5,846,154 ', 2008, 'G-F', 6.416666666666667, 210.0, '1986-03-25 00:00:00', None, 33, 79, 46, 33, 23.0, 10.5, 3.6, 8.7, 41.3, 1.9, 5.0, 37.2, 1.4, 1.6, 90.3, 0.2, 2.3, 2.5, 1.7, 0.9, 0.4, 0.1, 0.4), (152, 185.0, 'Jonathan Isaac', 'ORL', 5806440, '$7,362,566 ', None, None, None, None, '1st Round Pick', '$13,169,006 ', 2018, 'F', 6.833333333333333, 210.0, '1997-10-03 00:00:00', 'Florida State University', 21, 75, 37, 38, 26.6, 9.6, 3.5, 8.1, 42.9, 1.1, 3.5, 32.3, 1.5, 1.8, 81.5, 1.3, 4.2, 5.5, 1.1, 1.0, 0.8, 1.3, 0.1), (153, 186.0, 'Rodney Hood', 'POR', 5718000, '$6,003,900 ', None, None, None, None, 'MLE', '$5,718,000 ', 2015, 'G-F', 6.666666666666667, 206.0, '1992-10-20 00:00:00', 'Duke University', 26, 72, 29, 43, 26.3, 11.2, 4.1, 9.3, 43.5, 1.2, 3.3, 35.6, 1.9, 2.2, 88.4, 0.3, 1.8, 2.2, 1.8, 0.8, 0.8, 0.2, -3.8), (154, 189.0, 'Trey Lyles', 'SAS', 5500000, '$5,500,000 ', None, None, None, None, 'MLE', '$5,500,000 ', 2016, 'F', 6.833333333333333, 234.0, '1995-11-05 00:00:00', 'University of Kentucky', 23, 64, 42, 22, 17.5, 8.5, 3.2, 7.7, 41.8, 0.8, 3.1, 25.5, 1.3, 1.8, 69.8, 0.7, 3.2, 3.8, 1.4, 1.1, 0.5, 0.4, 0.1), (155, 191.0, 'Aron Baynes', 'PHO', 5453280, None, None, None, None, None, None, '$5,453,280 ', 2013, 'C', 6.833333333333333, 260.0, '1986-12-09 00:00:00', 'Washington State University', 32, 51, 32, 19, 16.1, 5.6, 2.1, 4.4, 47.1, 0.4, 1.2, 34.4, 1.0, 1.2, 85.5, 1.7, 3.0, 4.7, 1.1, 0.8, 0.2, 0.7, 2.5), (156, 192.0, 'Kris Dunn', 'CHI', 5348007, None, None, None, None, None, '1st Round pick', '$5,348,007 ', 2017, 'G', 6.333333333333333, 210.0, '1994-03-18 00:00:00', 'Providence College', 25, 46, 14, 32, 30.2, 11.3, 4.7, 11.0, 42.5, 0.7, 2.1, 35.4, 1.2, 1.5, 79.7, 0.4, 3.7, 4.1, 6.0, 2.3, 1.5, 0.5, -4.8), (157, 195.0, 'Lauri Markkanen', 'CHI', 5300400, '$6,731,508 ', None, None, None, None, '1st Round Pick', '$5,300,400 ', 2018, 'F', 7.0, 230.0, '1997-05-22 00:00:00', 'University of Arizona', 22, 52, 16, 36, 32.3, 18.7, 6.6, 15.3, 43.0, 2.3, 6.4, 36.1, 3.3, 3.8, 87.2, 1.4, 7.6, 9.0, 1.4, 1.6, 0.7, 0.6, -3.8), (158, 196.0, 'Ryan Anderson', 'MIA', 5214583, '$5,214,583 ', '$5,214,584 ', None, None, None, None, '$15,643,750 ', 2009, 'F', 6.833333333333333, 240.0, '1988-05-06 00:00:00', 'University of California', 31, 25, 7, 18, 12.9, 2.5, 0.8, 2.8, 30.4, 0.4, 1.6, 22.5, 0.5, 0.6, 75.0, 0.7, 1.4, 2.2, 0.8, 0.6, 0.2, 0.0, -2.9), (159, 305.0, 'Ryan Anderson', 'HOU', 2564753, None, None, None, None, None, 'Minimum Salary', '$250,000 ', 2009, 'F', 6.833333333333333, 240.0, '1988-05-06 00:00:00', 'University of California', 31, 25, 7, 18, 12.9, 2.5, 0.8, 2.8, 30.4, 0.4, 1.6, 22.5, 0.5, 0.6, 75.0, 0.7, 1.4, 2.2, 0.8, 0.6, 0.2, 0.0, -2.9), (160, 198.0, 'Pau Gasol', 'SAS', 5100000, None, None, None, None, None, None, '$5,100,000 ', 2002, 'F-C', 7.0, 250.0, '1980-07-06 00:00:00', None, 38, 30, 17, 13, 12.0, 3.9, 1.4, 3.1, 44.7, 0.2, 0.4, 46.2, 0.9, 1.3, 70.0, 0.7, 3.8, 4.6, 1.7, 0.5, 0.2, 0.5, -1.1), (161, 292.0, 'Pau Gasol', 'POR', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2002, 'F-C', 7.0, 250.0, '1980-07-06 00:00:00', None, 38, 30, 17, 13, 12.0, 3.9, 1.4, 3.1, 44.7, 0.2, 0.4, 46.2, 0.9, 1.3, 70.0, 0.7, 3.8, 4.6, 1.7, 0.5, 0.2, 0.5, -1.1), (162, 199.0, 'Daniel Theis', 'BOS', 5000000, '$5,000,000 ', None, None, None, None, None, '$5,000,000 ', 2018, 'C', 6.75, 215.0, '1992-04-04 00:00:00', None, 27, 66, 38, 28, 13.8, 5.7, 2.2, 4.0, 54.9, 0.4, 1.0, 38.8, 0.8, 1.2, 73.7, 1.3, 2.1, 3.4, 1.0, 0.5, 0.3, 0.6, 2.0), (163, 200.0, 'Luol Deng', 'LAL', 4990000, '$4,990,000 ', '$4,990,000 ', None, None, None, None, '$14,970,000 ', 2005, 'F', 6.75, 220.0, '1985-04-16 00:00:00', 'Duke University', 34, 22, 11, 11, 17.8, 7.1, 2.7, 5.4, 50.0, 0.6, 2.0, 31.8, 1.1, 1.6, 71.4, 0.9, 2.4, 3.3, 0.8, 0.6, 0.7, 0.4, 4.0), (164, 202.0, 'Buddy Hield', 'SAC', 4861208, None, None, None, None, None, '1st Round pick', '$4,861,208 ', 2017, 'G', 6.333333333333333, 214.0, '1993-12-17 00:00:00', 'University of Oklahoma', 26, 82, 39, 43, 31.9, 20.7, 7.6, 16.6, 45.8, 3.4, 7.9, 42.7, 2.1, 2.4, 88.6, 1.3, 3.7, 5.0, 2.5, 1.8, 0.7, 0.4, 0.0), (165, 203.0, 'Frank Ntilikina', 'NYK', 4855800, '$6,176,578 ', None, None, None, None, '1st Round Pick', '$4,855,800 ', 2018, 'G', 6.416666666666667, 190.0, '1998-07-28 00:00:00', None, 20, 43, 9, 34, 21.0, 5.7, 2.2, 6.6, 33.7, 0.8, 2.7, 28.7, 0.5, 0.7, 76.7, 0.3, 1.7, 2.0, 2.8, 1.3, 0.7, 0.3, -4.3), (166, 204.0, 'Ed Davis', 'UTA', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$9,772,350 ', 2011, 'F', 6.833333333333333, 240.0, '1989-06-05 00:00:00', 'University of North Carolina', 30, 81, 42, 39, 17.9, 5.8, 2.3, 3.7, 61.6, 0.0, 0.0, 0.0, 1.2, 2.0, 61.7, 2.7, 5.9, 8.6, 0.8, 0.8, 0.4, 0.4, 1.2), (167, 205.0, 'Richaun Holmes', 'SAC', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$9,772,350 ', 2016, 'F-C', 6.833333333333333, 245.0, '1993-10-15 00:00:00', 'Bowling Green State University', 25, 70, 16, 54, 16.9, 8.2, 3.2, 5.2, 60.8, 0.0, 0.0, 0.0, 1.8, 2.5, 73.1, 1.6, 3.1, 4.7, 0.9, 0.7, 0.6, 1.1, -2.5), (168, 206.0, 'Frank Kaminsky', 'PHO', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$9,772,350 ', 2016, 'F-C', 7.0, 242.0, '1993-04-04 00:00:00', 'University of Wisconsin', 26, 47, 22, 25, 16.1, 8.6, 2.9, 6.3, 46.3, 1.1, 3.0, 36.0, 1.7, 2.3, 73.8, 0.8, 2.6, 3.5, 1.3, 0.9, 0.3, 0.3, 0.9), (169, 207.0, 'Mike Scott', 'PHI', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$9,772,350 ', 2013, 'F', 6.666666666666667, 237.0, '1988-07-16 00:00:00', 'University of Virginia', 30, 79, 44, 35, 17.7, 5.8, 2.1, 5.3, 40.0, 1.3, 3.2, 40.1, 0.3, 0.4, 66.7, 0.5, 2.9, 3.5, 0.8, 0.6, 0.3, 0.2, 0.5), (170, 208.0, 'Avery Bradley', 'LAL', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$9,772,350 ', 2011, 'G', 6.166666666666667, 180.0, '1990-11-26 00:00:00', 'University of Texas at Austin', 28, 63, 31, 32, 30.2, 9.9, 3.9, 9.7, 40.8, 1.4, 3.9, 35.1, 0.7, 0.8, 86.0, 0.7, 2.1, 2.8, 2.4, 1.4, 0.7, 0.3, -1.3), (171, 350.0, 'Avery Bradley', 'MEM', 2000000, None, None, None, None, None, None, '$2,000,000 ', 2011, 'G', 6.166666666666667, 180.0, '1990-11-26 00:00:00', 'University of Texas at Austin', 28, 63, 31, 32, 30.2, 9.9, 3.9, 9.7, 40.8, 1.4, 3.9, 35.1, 0.7, 0.8, 86.0, 0.7, 2.1, 2.8, 2.4, 1.4, 0.7, 0.3, -1.3), (172, 209.0, 'Robin Lopez', 'MIL', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$4,767,000 ', 2009, 'C', 7.0, 255.0, '1988-04-01 00:00:00', 'Stanford University', 31, 74, 20, 54, 21.7, 9.5, 4.1, 7.2, 56.8, 0.1, 0.4, 22.6, 1.2, 1.7, 72.4, 1.9, 2.0, 3.9, 1.2, 1.3, 0.1, 1.1, -1.9), (173, 210.0, 'JaMychal Green', 'LAC', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$4,767,000 ', 2015, 'F', 6.75, 227.0, '1990-06-21 00:00:00', 'University of Alabama', 29, 65, 31, 34, 21.1, 9.4, 3.5, 7.3, 48.3, 1.1, 2.7, 40.3, 1.2, 1.6, 79.2, 1.6, 4.7, 6.3, 0.8, 1.3, 0.7, 0.5, -3.3), (174, 211.0, 'Justin Holiday', 'IND', 4767000, None, None, None, None, None, 'Room Exception', '$4,767,000 ', 2013, 'G', 6.5, 185.0, '1989-04-05 00:00:00', 'University of Washington', 30, 82, 25, 57, 31.8, 10.5, 3.7, 9.5, 38.6, 2.0, 5.7, 34.8, 1.2, 1.3, 89.6, 0.6, 3.4, 3.9, 1.8, 1.3, 1.5, 0.4, -5.5), (175, 212.0, 'Garrett Temple', 'BRK', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$4,767,000 ', 2010, 'G', 6.5, 195.0, '1986-05-08 00:00:00', 'Louisiana State University', 33, 75, 37, 38, 27.2, 7.8, 2.8, 6.6, 42.2, 1.2, 3.5, 34.1, 1.1, 1.4, 74.8, 0.4, 2.5, 2.9, 1.4, 0.9, 1.0, 0.4, -1.1), (176, 213.0, 'Enes Kanter', 'BOS', 4767000, '$5,005,350 ', None, None, None, None, 'Room Exception', '$4,767,000 ', 2012, 'C', 6.916666666666667, 245.0, '1992-05-20 00:00:00', None, 27, 67, 28, 39, 24.5, 13.7, 5.6, 10.2, 54.9, 0.1, 0.5, 29.4, 2.3, 2.9, 78.7, 3.8, 6.0, 9.8, 1.7, 1.8, 0.5, 0.4, -2.8), (177, 215.0, 'Rodney McGruder', 'LAC', 4629630, '$5,000,000 ', '$5,370,370 ', None, None, None, None, '$15,000,000 ', 2017, 'G', 6.333333333333333, 205.0, '1991-07-29 00:00:00', 'Kansas State University', 27, 66, 31, 35, 23.5, 7.6, 2.8, 7.0, 40.3, 1.2, 3.4, 35.1, 0.8, 1.1, 72.2, 0.9, 2.7, 3.6, 1.7, 1.0, 0.5, 0.2, -0.6), (178, 216.0, 'Kevon Looney', 'GSW', 4629630, '$5,000,000 ', '$5,370,370 ', None, None, None, None, '$9,629,630 ', 2016, 'F-C', 6.75, 220.0, '1996-02-06 00:00:00', 'University of California, Los Angeles', 23, 80, 56, 24, 18.5, 6.3, 2.7, 4.3, 62.5, 0.0, 0.1, 10.0, 0.8, 1.3, 61.9, 2.4, 2.8, 5.2, 1.5, 0.6, 0.6, 0.7, 3.8), (179, 219.0, 'Jamal Murray', 'DEN', 4444746, None, None, None, None, None, '1st Round pick', '$4,444,746 ', 2017, 'G', 6.333333333333333, 207.0, '1997-02-23 00:00:00', 'University of Kentucky', 22, 75, 49, 26, 32.6, 18.2, 6.8, 15.6, 43.7, 2.0, 5.5, 36.7, 2.5, 3.0, 84.8, 0.9, 3.4, 4.2, 4.8, 2.1, 0.9, 0.4, 3.3), (180, 222.0, 'Zach Collins', 'POR', 4240200, '$5,406,255 ', None, None, None, None, '1st Round Pick', '$9,646,455 ', 2018, 'C-F', 7.0, 232.0, '1997-11-19 00:00:00', 'Gonzaga University', 21, 77, 51, 26, 17.6, 6.6, 2.5, 5.2, 47.3, 0.5, 1.6, 33.1, 1.2, 1.6, 74.6, 1.4, 2.8, 4.2, 0.9, 1.0, 0.3, 0.9, -0.1), (181, 224.0, 'Alex Len', 'ATL', 4160000, None, None, None, None, None, 'Room Exception', '$4,160,000 ', 2014, 'C', 7.083333333333333, 260.0, '1993-06-16 00:00:00', 'University of Maryland', 26, 77, 28, 49, 20.1, 11.1, 4.2, 8.4, 49.4, 1.0, 2.6, 36.3, 1.8, 2.8, 64.8, 2.1, 3.5, 5.5, 1.1, 1.3, 0.4, 0.9, -2.1), (182, 226.0, 'Malik Monk', 'CHO', 4028400, '$5,345,687 ', None, None, None, None, '1st Round Pick', '$4,028,400 ', 2018, 'G', 6.25, 200.0, '1998-02-04 00:00:00', 'University of Kentucky', 21, 73, 34, 39, 17.2, 8.9, 3.1, 8.0, 38.7, 1.5, 4.5, 33.0, 1.2, 1.4, 88.2, 0.2, 1.7, 1.9, 1.6, 1.2, 0.5, 0.3, -0.6), (183, 227.0, 'Dorian Finney-Smith', 'DAL', 4000000, '$4,000,000 ', '$4,000,000 ', None, None, None, None, '$12,000,000 ', 2017, 'F', 6.666666666666667, 220.0, '1993-05-04 00:00:00', 'University of Florida', 26, 81, 33, 48, 24.5, 7.5, 2.8, 6.5, 43.2, 1.0, 3.1, 31.1, 0.9, 1.3, 70.9, 1.7, 3.1, 4.8, 1.2, 0.9, 0.9, 0.4, -1.1), (184, 228.0, 'JaVale McGee', 'LAL', 4000000, '$4,200,000 ', None, None, None, None, None, '$8,200,000 ', 2009, 'C', 7.0, 270.0, '1988-01-19 00:00:00', 'University of Nevada, Reno', 31, 75, 35, 40, 22.3, 12.0, 5.3, 8.5, 62.4, 0.0, 0.2, 8.3, 1.3, 2.0, 63.2, 2.6, 4.9, 7.5, 0.7, 1.4, 0.6, 2.0, -2.1), (185, 229.0, 'Patrick McCaw', 'TOR', 4000000, '$4,000,000 ', None, None, None, None, 'MLE', '$8,000,000 ', 2017, 'G', 6.583333333333333, 185.0, '1995-10-25 00:00:00', 'University of Nevada, Las Vegas', 23, 29, 17, 12, 13.7, 2.6, 0.9, 2.2, 41.3, 0.3, 1.0, 32.1, 0.4, 0.5, 86.7, 0.2, 1.4, 1.7, 1.0, 0.6, 0.8, 0.1, -2.1), (186, 230.0, 'Reggie Bullock', 'NYK', 4000000, '$4,200,000 ', None, None, None, None, 'Room Exception', '$5,000,000 ', 2014, 'G-F', 6.583333333333333, 205.0, '1991-03-16 00:00:00', 'University of North Carolina', 28, 63, 27, 36, 29.8, 11.3, 3.9, 9.4, 41.2, 2.3, 6.2, 37.7, 1.2, 1.3, 85.9, 0.3, 2.4, 2.7, 2.0, 1.0, 0.6, 0.2, -0.8), (187, 234.0, 'Luke Kennard', 'DET', 3827160, '$5,273,826 ', None, None, None, None, '1st Round Pick', '$3,827,160 ', 2018, 'G', 6.5, 206.0, '1996-06-24 00:00:00', 'Duke University', 23, 63, 30, 33, 22.8, 9.7, 3.6, 8.3, 43.8, 1.7, 4.3, 39.4, 0.8, 1.0, 83.6, 0.2, 2.7, 2.9, 1.8, 0.9, 0.4, 0.2, 0.8), (188, 238.0, 'Donovan Mitchell', 'UTA', 3635760, '$5,195,501 ', None, None, None, None, '1st Round Pick', '$3,635,760 ', 2018, 'G', 6.25, 211.0, '1996-09-07 00:00:00', 'University of Louisville', 22, 77, 49, 28, 33.7, 23.8, 8.6, 19.9, 43.2, 2.4, 6.7, 36.2, 4.1, 5.1, 80.6, 0.8, 3.3, 4.1, 4.2, 2.8, 1.4, 0.4, 4.8), (189, 239.0, 'Stanley Johnson', 'TOR', 3623000, '$3,801,000 ', None, None, None, None, 'Bi-annual Exception', '$3,623,000 ', 2016, 'F', 6.583333333333333, 245.0, '1996-05-29 00:00:00', 'University of Arizona', 23, 66, 28, 38, 18.3, 6.9, 2.6, 6.7, 38.9, 0.9, 3.3, 28.8, 0.8, 1.0, 78.1, 0.5, 2.8, 3.3, 1.3, 1.2, 0.9, 0.2, -3.0), (190, 240.0, 'Jake Layman', 'MIN', 3581986, '$3,761,085 ', '$3,940,184 ', None, None, None, None, '$11,283,255 ', 2017, 'F', 6.75, 210.0, '1994-03-07 00:00:00', 'University of Maryland', 25, 71, 46, 25, 18.7, 7.6, 3.0, 6.0, 50.9, 0.8, 2.5, 32.6, 0.7, 1.0, 70.4, 0.8, 2.3, 3.1, 0.7, 0.6, 0.4, 0.4, 1.0), (191, 241.0, 'Thon Maker', 'DET', 3569643, None, None, None, None, None, '1st Round pick', '$3,569,643 ', 2017, 'F-C', 7.083333333333333, 216.0, '1997-02-25 00:00:00', None, 22, 64, 44, 20, 15.2, 5.0, 1.7, 4.2, 40.7, 0.8, 2.4, 32.0, 0.9, 1.3, 66.7, 0.6, 2.5, 3.2, 0.7, 0.5, 0.3, 0.8, -0.1), (192, 245.0, 'Domantas Sabonis', 'IND', 3529555, None, None, None, None, None, '1st Round pick', '$3,529,555 ', 2017, 'F-C', 6.916666666666667, 240.0, '1996-05-03 00:00:00', 'Gonzaga University', 23, 74, 43, 31, 24.8, 14.1, 5.6, 9.5, 59.0, 0.1, 0.2, 52.9, 2.8, 3.9, 71.5, 2.5, 6.8, 9.3, 2.9, 2.2, 0.6, 0.4, 2.6), (193, 248.0, 'T.J. McConnell', 'IND', 3500000, '$3,500,000 ', None, None, None, None, 'Cap Space', '$4,500,000 ', 2016, 'G', 6.166666666666667, 200.0, '1992-03-25 00:00:00', 'University of Arizona', 27, 76, 46, 30, 19.3, 6.4, 2.9, 5.5, 52.5, 0.2, 0.6, 33.3, 0.4, 0.5, 78.4, 0.4, 1.9, 2.3, 3.4, 1.2, 1.0, 0.2, -0.1), (194, 249.0, 'DeMarcus Cousins', 'LAL', 3500000, None, None, None, None, None, None, '$3,500,000 ', 2011, 'C-F', 6.916666666666667, 270.0, '1990-08-13 00:00:00', 'University of Kentucky', 28, 30, 23, 7, 25.7, 16.3, 5.9, 12.4, 48.0, 0.9, 3.2, 27.4, 3.5, 4.8, 73.6, 1.4, 6.8, 8.2, 3.6, 2.4, 1.3, 1.5, 2.2), (195, 253.0, 'Bam Adebayo', 'MIA', 3454080, '$5,115,492 ', None, None, None, None, '1st Round Pick', '$8,569,572 ', 2018, 'C-F', 6.833333333333333, 243.0, '1997-07-18 00:00:00', 'University of Kentucky', 21, 82, 39, 43, 23.3, 8.9, 3.4, 5.9, 57.6, 0.0, 0.2, 20.0, 2.0, 2.8, 73.5, 2.0, 5.3, 7.3, 2.2, 1.5, 0.9, 0.8, 0.3), (196, 254.0, 'Kyle Korver', 'PHO', 3440000, None, None, None, None, None, None, '$3,440,000 ', 2004, 'G-F', 6.583333333333333, 212.0, '1981-03-17 00:00:00', 'Creighton University', 38, 70, 39, 31, 19.1, 8.6, 2.9, 6.9, 41.6, 2.0, 5.0, 39.7, 0.9, 1.0, 82.2, 0.1, 2.2, 2.3, 1.2, 0.8, 0.4, 0.2, 1.9), (197, 293.0, 'Kyle Korver', 'MIL', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2004, 'G-F', 6.583333333333333, 212.0, '1981-03-17 00:00:00', 'Creighton University', 38, 70, 39, 31, 19.1, 8.6, 2.9, 6.9, 41.6, 2.0, 5.0, 39.7, 0.9, 1.0, 82.2, 0.1, 2.2, 2.3, 1.2, 0.8, 0.4, 0.2, 1.9), (198, 259.0, 'Justin Jackson', 'DAL', 3280920, '$5,029,650 ', None, None, None, None, '1st Round Pick', '$8,310,570 ', 2018, 'G-F', 6.666666666666667, 200.0, '1995-03-28 00:00:00', 'University of North Carolina', 24, 81, 36, 45, 19.9, 7.2, 2.7, 6.0, 44.7, 1.1, 3.0, 35.5, 0.8, 1.0, 78.5, 0.5, 2.1, 2.6, 1.2, 0.4, 0.4, 0.2, -0.6), (199, 261.0, 'Markieff Morris', 'DET', 3200000, '$3,360,000 ', None, None, None, None, 'Bi-annual Exception', '$3,200,000 ', 2012, 'F', 6.833333333333333, 245.0, '1989-09-02 00:00:00', 'University of Kansas', 29, 58, 24, 34, 21.9, 9.4, 3.5, 8.3, 42.1, 1.2, 3.5, 33.5, 1.2, 1.6, 77.2, 1.1, 3.5, 4.6, 1.4, 0.9, 0.6, 0.4, -2.8), (200, 262.0, 'Jon Leuer', 'MIL', 3169348, '$3,169,348 ', '$3,169,347 ', None, None, None, None, '$9,508,043 ', 2012, 'F', 6.833333333333333, 228.0, '1989-05-14 00:00:00', 'University of Wisconsin', 30, 41, 16, 25, 9.8, 3.8, 1.6, 2.8, 58.4, 0.0, 0.3, 9.1, 0.6, 0.8, 74.2, 0.7, 1.7, 2.4, 0.3, 0.6, 0.3, 0.1, -2.8), (201, 263.0, 'Yogi Ferrell', 'SAC', 3150000, None, None, None, None, None, 'Cap Space', '$3,150,000 ', 2017, 'G', 6.0, 180.0, '1993-05-09 00:00:00', 'Indiana University', 26, 71, 33, 38, 15.0, 5.9, 2.2, 5.0, 43.5, 0.8, 2.1, 36.2, 0.8, 0.9, 89.6, 0.2, 1.4, 1.5, 1.9, 0.6, 0.5, 0.1, -0.8), (202, 264.0, 'Guerschon Yabusele', 'BOS', 3117240, None, None, None, None, None, None, '$3,117,240 ', 2018, 'F', 6.583333333333333, 260.0, '1995-12-17 00:00:00', None, 23, 41, 27, 14, 6.1, 2.3, 0.9, 1.9, 45.5, 0.2, 0.7, 32.1, 0.4, 0.5, 68.2, 0.6, 0.7, 1.3, 0.4, 0.4, 0.2, 0.2, -0.4), (203, 266.0, 'Dwight Howard', 'MEM', 3039097, None, None, None, None, None, None, '$3,039,097 ', 2005, 'C', 6.916666666666667, 265.0, '1985-12-08 00:00:00', None, 33, 9, 4, 5, 25.5, 12.8, 4.8, 7.7, 62.3, 0.0, 0.0, 0.0, 3.2, 5.3, 60.4, 2.7, 6.6, 9.2, 0.4, 1.8, 0.8, 0.4, -5.6), (204, 307.0, 'Dwight Howard', 'LAL', 2564753, None, None, None, None, None, None, None, 2005, 'C', 6.916666666666667, 265.0, '1985-12-08 00:00:00', None, 33, 9, 4, 5, 25.5, 12.8, 4.8, 7.7, 62.3, 0.0, 0.0, 0.0, 3.2, 5.3, 60.4, 2.7, 6.6, 9.2, 0.4, 1.8, 0.8, 0.4, -5.6), (205, 267.0, 'Khem Birch', 'ORL', 3000000, '$3,000,000 ', None, None, None, None, None, '$6,000,000 ', 2018, 'C-F', 6.75, 220.0, '1992-09-28 00:00:00', 'University of Nevada, Las Vegas', 26, 50, 26, 24, 12.9, 4.8, 1.8, 3.0, 60.3, 0.0, 0.0, 0.0, 1.2, 1.7, 69.9, 1.6, 2.2, 3.8, 0.8, 0.4, 0.4, 0.6, 0.9), (206, 268.0, 'Ryan Arcidiacono', 'CHI', 3000000, '$3,000,000 ', '$3,000,000 ', None, None, None, None, '$6,000,000 ', 2018, 'G', 6.25, 188.0, '1994-03-26 00:00:00', 'Villanova University', 25, 81, 22, 59, 24.2, 6.7, 2.3, 5.2, 44.7, 1.0, 2.7, 37.3, 1.1, 1.3, 87.3, 0.3, 2.4, 2.7, 3.3, 0.8, 0.8, 0.0, -3.1), (207, 269.0, 'Quinn Cook', 'LAL', 3000000, '$3,000,000 ', None, None, None, None, None, '$4,000,000 ', 2017, 'G', 6.166666666666667, 184.0, '1993-03-23 00:00:00', 'Duke University', 26, 74, 49, 25, 14.3, 6.9, 2.8, 5.9, 46.5, 1.1, 2.7, 40.5, 0.3, 0.4, 76.9, 0.3, 1.8, 2.1, 1.6, 0.7, 0.3, 0.0, -1.8), (208, 272.0, 'D.J. Wilson', 'MIL', 2961120, '$4,548,280 ', None, None, None, None, '1st Round Pick', '$2,961,120 ', 2018, 'F', 6.833333333333333, 234.0, '1996-02-19 00:00:00', 'University of Michigan', 23, 48, 37, 11, 18.4, 5.8, 2.2, 5.2, 41.4, 1.0, 2.6, 36.2, 0.5, 1.0, 55.3, 0.9, 3.7, 4.6, 1.1, 0.7, 0.4, 0.4, 1.7), (209, 273.0, 'Cedi Osman', 'CLE', 2907143, None, None, None, None, None, 'MLE', '$2,907,143 ', 2018, 'F', 6.666666666666667, 210.0, '1995-04-08 00:00:00', None, 24, 76, 17, 59, 32.2, 13.0, 4.7, 11.1, 42.7, 1.7, 4.9, 34.8, 1.9, 2.4, 77.9, 0.6, 4.1, 4.7, 2.6, 1.5, 0.8, 0.1, -8.4), (210, 275.0, 'Bryn Forbes', 'SAS', 2875000, None, None, None, None, None, 'Early Bird Rights', '$2,875,000 ', 2017, 'G', 6.25, 190.0, '1993-07-23 00:00:00', 'Michigan State University', 25, 82, 48, 34, 28.0, 11.8, 4.4, 9.6, 45.6, 2.1, 5.0, 42.6, 0.8, 1.0, 88.5, 0.2, 2.7, 2.9, 2.1, 1.0, 0.5, 0.0, -0.1), (211, 280.0, 'Alex Caruso', 'LAL', 2750000, '$2,750,000 ', None, None, None, None, None, '$5,500,000 ', 2018, 'G', 6.416666666666667, 186.0, '1994-02-28 00:00:00', 'Texas A&M University', 25, 25, 8, 17, 21.2, 9.2, 3.1, 6.9, 44.5, 1.0, 2.0, 48.0, 2.0, 2.6, 79.7, 0.8, 1.9, 2.7, 3.1, 1.7, 1.0, 0.4, 1.8), (212, 281.0, 'Malik Beasley', 'DEN', 2731714, None, None, None, None, None, '1st Round pick', '$2,731,714 ', 2017, 'G', 6.416666666666667, 196.0, '1996-11-26 00:00:00', 'Florida State University', 22, 81, 53, 28, 23.2, 11.3, 4.3, 9.1, 47.4, 2.0, 5.0, 40.2, 0.7, 0.8, 84.8, 0.4, 2.0, 2.5, 1.2, 0.7, 0.7, 0.1, 1.7), (213, 283.0, 'John Collins', 'ATL', 2686560, '$4,137,302 ', None, None, None, None, '1st Round Pick', '$2,686,560 ', 2018, 'F', 6.833333333333333, 235.0, '1997-09-23 00:00:00', 'Wake Forest University', 21, 61, 24, 37, 30.0, 19.5, 7.6, 13.6, 56.0, 0.9, 2.6, 34.8, 3.3, 4.4, 76.3, 3.6, 6.2, 9.8, 2.0, 2.0, 0.4, 0.6, -1.2), (214, 285.0, 'Caris LeVert', 'BRK', 2625718, '$16,203,704 ', '$17,500,000 ', '$18,796,296 ', None, None, '1st Round pick', '$55,125,718 ', 2017, 'G-F', 6.583333333333333, 203.0, '1994-08-25 00:00:00', 'University of Michigan', 24, 40, 19, 21, 26.6, 13.7, 5.2, 12.1, 42.9, 1.2, 3.9, 31.2, 2.1, 3.1, 69.1, 0.9, 2.9, 3.8, 3.9, 1.7, 1.1, 0.4, -0.3), (215, 287.0, \"DeAndre' Bembry\", 'ATL', 2603982, None, None, None, None, None, '1st Round pick', '$2,603,982 ', 2017, 'F', 6.5, 210.0, '1994-07-04 00:00:00', \"Saint Joseph's University\", 24, 82, 29, 53, 23.6, 8.4, 3.4, 7.5, 44.6, 0.6, 2.1, 28.9, 1.1, 1.7, 64.0, 0.7, 3.7, 4.4, 2.5, 1.7, 1.3, 0.5, -2.8), (216, 290.0, 'Jeff Green', 'UTA', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2008, 'F', 6.75, 235.0, '1986-08-28 00:00:00', 'Georgetown University', 32, 77, 29, 48, 27.2, 12.3, 4.2, 8.9, 47.5, 1.4, 4.2, 34.7, 2.4, 2.7, 88.8, 0.7, 3.3, 4.0, 1.8, 1.3, 0.6, 0.5, -1.2), (217, 291.0, 'Anthony Tolliver', 'POR', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2009, 'F-C', 6.666666666666667, 240.0, '1985-06-01 00:00:00', 'Creighton University', 34, 65, 27, 38, 16.6, 5.0, 1.5, 4.0, 38.2, 1.2, 3.3, 37.7, 0.7, 0.9, 78.3, 0.2, 2.5, 2.7, 0.7, 0.6, 0.3, 0.3, -1.0), (218, 294.0, 'Wesley Matthews', 'MIL', 2564753, '$2,692,991 ', None, None, None, None, None, '$2,564,753 ', 2010, 'G', 6.416666666666667, 220.0, '1986-10-14 00:00:00', 'Marquette University', 32, 69, 27, 42, 30.3, 12.2, 4.0, 10.1, 40.0, 2.2, 5.8, 37.2, 1.9, 2.4, 81.0, 0.5, 2.0, 2.5, 2.3, 1.3, 0.8, 0.2, -1.3), (219, 295.0, 'Udonis Haslem', 'MIA', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2004, 'F-C', 6.666666666666667, 235.0, '1980-06-09 00:00:00', 'University of Florida', 39, 10, 8, 2, 7.5, 2.5, 1.1, 3.3, 33.3, 0.0, 1.2, 0.0, 0.3, 0.4, 75.0, 0.3, 2.4, 2.7, 0.2, 0.3, 0.0, 0.0, -3.2), (220, 296.0, 'Rajon Rondo', 'LAL', 2564753, '$2,619,605 ', None, None, None, None, None, '$2,564,753 ', 2007, 'G', 6.083333333333333, 186.0, '1986-02-22 00:00:00', 'University of Kentucky', 33, 46, 19, 27, 29.8, 9.2, 3.8, 9.4, 40.5, 1.1, 3.1, 35.9, 0.5, 0.8, 63.9, 0.7, 4.5, 5.3, 8.0, 2.8, 1.2, 0.2, -5.3), (221, 297.0, 'Jared Dudley', 'LAL', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2008, 'F-G', 6.583333333333333, 225.0, '1985-07-10 00:00:00', 'Boston College', 33, 59, 28, 31, 20.7, 4.9, 1.7, 4.1, 42.3, 0.9, 2.6, 35.1, 0.5, 0.8, 69.6, 0.6, 2.1, 2.6, 1.4, 0.7, 0.6, 0.3, 0.6), (222, 298.0, 'Tyson Chandler', 'HOU', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2002, 'C', 7.083333333333333, 240.0, '1982-10-02 00:00:00', None, 36, 55, 26, 29, 15.9, 3.1, 1.1, 1.8, 61.6, 0.0, 0.0, 0.0, 0.9, 1.6, 58.6, 1.7, 3.9, 5.6, 0.7, 0.8, 0.4, 0.4, -0.7), (223, 299.0, 'Gerald Green', 'HOU', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2006, 'G-F', 6.583333333333333, 205.0, '1986-01-26 00:00:00', None, 33, 73, 47, 26, 20.2, 9.2, 3.2, 7.9, 40.0, 2.1, 6.0, 35.4, 0.8, 0.9, 83.8, 0.4, 2.1, 2.5, 0.5, 0.8, 0.5, 0.4, 1.8), (224, 300.0, 'Thabo Sefolosha', 'HOU', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2007, 'G-F', 6.583333333333333, 220.0, '1984-05-02 00:00:00', None, 35, 50, 28, 22, 12.2, 3.8, 1.4, 3.0, 47.7, 0.7, 1.6, 43.6, 0.3, 0.4, 63.6, 0.2, 2.3, 2.5, 0.5, 0.5, 0.9, 0.1, 2.5), (225, 302.0, 'J.J. Barea', 'DAL', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2007, 'G', 6.0, 185.0, '1984-06-26 00:00:00', 'Northeastern University', 35, 38, 20, 18, 19.8, 10.9, 4.2, 10.1, 41.8, 1.0, 3.4, 29.7, 1.4, 2.1, 70.5, 0.3, 2.2, 2.5, 5.6, 1.9, 0.6, 0.0, 1.5), (226, 303.0, 'Wilson Chandler', 'BRK', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 2008, 'F', 6.666666666666667, 225.0, '1987-05-10 00:00:00', 'DePaul University', 32, 51, 35, 16, 23.1, 6.0, 2.2, 5.4, 41.8, 1.2, 3.1, 37.3, 0.4, 0.5, 72.0, 0.9, 3.3, 4.2, 1.6, 0.9, 0.5, 0.4, 2.1), (227, 304.0, 'Vince Carter', 'ATL', 2564753, None, None, None, None, None, 'Minimum Salary', '$2,564,753 ', 1999, 'G-F', 6.5, 220.0, '1977-01-26 00:00:00', 'University of North Carolina', 42, 76, 25, 51, 17.5, 7.4, 2.6, 6.2, 41.9, 1.6, 4.2, 38.9, 0.6, 0.9, 71.2, 0.4, 2.1, 2.6, 1.1, 0.6, 0.6, 0.4, -2.4), (228, 310.0, 'Rondae Hollis-Jefferson', 'TOR', 2500000, None, None, None, None, None, 'MLE', '$2,500,000 ', 2016, 'F', 6.583333333333333, 220.0, '1995-01-03 00:00:00', 'University of Arizona', 24, 59, 27, 32, 20.9, 8.9, 3.4, 8.3, 41.1, 0.2, 0.8, 18.4, 2.0, 3.1, 64.5, 1.4, 3.8, 5.3, 1.6, 1.2, 0.7, 0.5, -0.2), (229, 312.0, 'Terrance Ferguson', 'OKC', 2475840, '$3,944,013 ', None, None, None, None, '1st Round Pick', '$2,475,840 ', 2018, 'G-F', 6.583333333333333, 184.0, '1998-05-17 00:00:00', None, 21, 74, 44, 30, 26.1, 6.9, 2.5, 5.8, 42.9, 1.4, 3.9, 36.6, 0.5, 0.7, 72.5, 0.4, 1.5, 1.9, 1.0, 0.6, 0.5, 0.2, 1.9), (230, 315.0, 'Jarrett Allen', 'BRK', 2376840, '$3,909,902 ', None, None, None, None, '1st Round Pick', '$2,376,840 ', 2018, 'F-C', 6.833333333333333, 234.0, '1998-04-21 00:00:00', 'University of Texas at Austin', 21, 80, 42, 38, 26.2, 10.9, 4.2, 7.1, 59.0, 0.1, 0.6, 13.3, 2.5, 3.5, 70.9, 2.4, 6.0, 8.4, 1.4, 1.3, 0.5, 1.5, -1.5), (231, 316.0, 'Pascal Siakam', 'TOR', 2351839, None, None, None, None, None, '1st Round pick', '$2,351,839 ', 2017, 'F', 6.75, 230.0, '1994-02-04 00:00:00', 'New Mexico State University', 25, 80, 57, 23, 31.8, 16.9, 6.5, 11.8, 54.9, 1.0, 2.7, 36.9, 3.0, 3.8, 78.5, 1.6, 5.3, 6.9, 3.1, 1.9, 0.9, 0.7, 7.4), (232, 319.0, 'Patrick Patterson', 'LAC', 2331593, None, None, None, None, None, 'Minimum Salary', '$2,331,593 ', 2011, 'F', 6.75, 230.0, '1989-03-14 00:00:00', 'University of Kentucky', 30, 63, 40, 23, 13.7, 3.6, 1.3, 3.5, 37.4, 0.7, 2.2, 33.6, 0.3, 0.5, 63.3, 0.7, 1.7, 2.3, 0.5, 0.3, 0.3, 0.2, -1.7), (233, 480.0, 'Patrick Patterson', 'OKC', 737067, '$737,067 ', '$737,066 ', None, None, None, None, '$2,211,200 ', 2011, 'F', 6.75, 230.0, '1989-03-14 00:00:00', 'University of Kentucky', 30, 63, 40, 23, 13.7, 3.6, 1.3, 3.5, 37.4, 0.7, 2.2, 33.6, 0.3, 0.5, 63.3, 0.7, 1.7, 2.3, 0.5, 0.3, 0.3, 0.2, -1.7), (234, 321.0, 'Isaiah Thomas', 'WAS', 2320044, None, None, None, None, None, None, '$2,320,044 ', 2012, 'G', 5.75, 185.0, '1989-02-07 00:00:00', 'University of Washington', 30, 12, 6, 6, 15.1, 8.1, 2.8, 8.3, 34.3, 1.0, 3.6, 27.9, 1.4, 2.3, 63.0, 0.4, 0.7, 1.1, 1.9, 1.5, 0.4, 0.1, -1.8), (235, 322.0, 'Alec Burks', 'GSW', 2320044, None, None, None, None, None, 'Minimum Salary', '$2,320,044 ', 2012, 'G', 6.5, 214.0, '1991-07-20 00:00:00', 'University of Colorado', 27, 64, 19, 45, 21.5, 8.8, 3.0, 7.4, 40.5, 1.0, 2.6, 36.3, 1.8, 2.2, 82.3, 0.5, 3.2, 3.7, 2.0, 1.0, 0.6, 0.3, -4.7), (236, 323.0, 'Lance Thomas', 'BRK', 2320044, None, None, None, None, None, None, None, 2012, 'F', 6.666666666666667, 235.0, '1988-04-24 00:00:00', 'Duke University', 31, 46, 7, 39, 17.0, 4.5, 1.7, 4.3, 39.6, 0.5, 1.7, 27.8, 0.6, 0.8, 75.0, 0.5, 2.1, 2.5, 0.6, 0.5, 0.4, 0.2, -4.1), (237, 324.0, 'Damian Jones', 'ATL', 2305057, None, None, None, None, None, '1st Round pick', '$2,305,057 ', 2017, 'F-C', 7.0, 245.0, '1995-06-30 00:00:00', 'Vanderbilt University', 24, 24, 15, 9, 17.1, 5.4, 2.2, 3.1, 71.6, 0.0, 0.0, 0.0, 1.0, 1.5, 64.9, 1.3, 1.8, 3.1, 1.2, 0.7, 0.5, 1.0, 0.7), (238, 326.0, 'OG Anunoby', 'TOR', 2281800, '$3,872,215 ', None, None, None, None, '1st Round Pick', '$6,154,015 ', 2018, 'F', 6.666666666666667, 232.0, '1997-07-17 00:00:00', 'Indiana University', 21, 67, 46, 21, 20.2, 7.0, 2.7, 6.0, 45.3, 1.0, 3.0, 33.2, 0.5, 0.9, 58.1, 0.9, 2.1, 2.9, 0.7, 0.8, 0.7, 0.3, -0.7), (239, 332.0, 'Willie Cauley-Stein', 'GSW', 2177483, '$2,286,357 ', None, None, None, None, 'MLE', '$2,177,483 ', 2016, 'F-C', 7.0, 240.0, '1993-08-18 00:00:00', 'University of Kentucky', 25, 81, 39, 42, 27.3, 11.9, 5.1, 9.1, 55.6, 0.0, 0.0, 50.0, 1.7, 3.1, 55.1, 2.2, 6.1, 8.4, 2.4, 1.0, 1.2, 0.6, 0.7), (240, 333.0, 'Austin Rivers', 'HOU', 2174318, '$2,369,663 ', None, None, None, None, 'Minimum Salary', '$2,174,318 ', 2013, 'G', 6.333333333333333, 200.0, '1992-08-01 00:00:00', 'Duke University', 26, 76, 45, 31, 26.7, 8.1, 3.1, 7.5, 40.6, 1.4, 4.3, 31.8, 0.7, 1.3, 52.6, 0.3, 1.8, 2.1, 2.2, 0.9, 0.6, 0.3, 1.4), (241, 337.0, 'Torrey Craig', 'DEN', 2100000, None, None, None, None, None, 'Cap Space', '$2,100,000 ', 2018, 'G-F', 6.5, 215.0, '1990-12-19 00:00:00', 'University of South Carolina Upstate', 28, 75, 50, 25, 20.0, 5.7, 2.1, 4.8, 44.2, 0.8, 2.5, 32.4, 0.7, 0.9, 70.0, 1.2, 2.3, 3.5, 1.0, 0.6, 0.5, 0.6, 1.0), (242, 340.0, 'Caleb Swanigan', 'SAC', 2033160, '$3,665,787 ', None, None, None, None, '1st Round Pick', '$2,033,160 ', 2018, 'F', 6.666666666666667, 250.0, '1997-04-18 00:00:00', 'Purdue University', 22, 21, 10, 11, 8.5, 2.0, 0.9, 2.5, 34.0, 0.0, 0.3, 14.3, 0.3, 0.4, 66.7, 0.8, 2.2, 3.0, 0.5, 0.9, 0.3, 0.0, -2.9), (243, 341.0, \"Kyle O'Quinn\", 'PHI', 2028594, None, None, None, None, None, 'Minimum Salary', '$2,028,594 ', 2013, 'F-C', 6.833333333333333, 250.0, '1990-03-26 00:00:00', 'Norfolk State University', 29, 45, 27, 18, 8.2, 3.5, 1.5, 3.0, 50.7, 0.0, 0.3, 8.3, 0.4, 0.5, 81.0, 0.6, 2.0, 2.6, 1.2, 0.7, 0.2, 0.6, 0.1), (244, 342.0, 'Michael Carter-Williams', 'ORL', 2028594, None, None, None, None, None, 'Minimum Salary', '$2,028,594 ', 2014, 'G', 6.5, 190.0, '1991-10-10 00:00:00', 'Syracuse University', 27, 28, 17, 11, 13.3, 4.8, 1.6, 4.4, 37.4, 0.4, 1.4, 26.3, 1.1, 1.9, 60.4, 0.7, 1.8, 2.5, 2.5, 0.7, 0.7, 0.5, 0.1), (245, 343.0, 'Nerlens Noel', 'OKC', 2028594, None, None, None, None, None, 'Minimum Salary', '$2,028,594 ', 2015, 'C-F', 6.916666666666667, 228.0, '1994-04-10 00:00:00', 'University of Kentucky', 25, 77, 48, 29, 13.7, 4.9, 2.1, 3.6, 58.7, 0.0, 0.0, 0.0, 0.7, 1.0, 68.4, 1.6, 2.6, 4.2, 0.6, 0.6, 0.9, 1.2, -1.0), (246, 344.0, 'Mike Muscala', 'OKC', 2028594, '$2,283,034 ', None, None, None, None, None, '$2,028,594 ', 2014, 'F-C', 6.916666666666667, 240.0, '1991-07-01 00:00:00', 'Bucknell University', 27, 64, 35, 29, 20.4, 7.0, 2.3, 5.6, 40.2, 1.4, 4.0, 34.8, 1.1, 1.3, 82.4, 0.9, 2.9, 3.8, 1.2, 0.8, 0.3, 0.6, 0.5), (247, 345.0, 'Troy Daniels', 'LAL', 2028594, None, None, None, None, None, 'Minimum Salary', '$2,028,594 ', 2014, 'G', 6.333333333333333, 205.0, '1991-07-15 00:00:00', 'Virginia Commonwealth University', 27, 51, 13, 38, 14.9, 6.2, 2.2, 5.4, 41.1, 1.5, 3.8, 38.1, 0.4, 0.5, 78.3, 0.3, 1.2, 1.4, 0.5, 0.5, 0.5, 0.1, -2.2), (248, 346.0, 'Trey Burke', 'PHI', 2028594, None, None, None, None, None, 'Minimum Salary', '$405,000 ', 2014, 'G', 6.083333333333333, 191.0, '1992-11-12 00:00:00', 'University of Michigan', 26, 58, 16, 42, 19.4, 10.9, 4.1, 9.4, 43.1, 1.0, 2.7, 35.2, 1.8, 2.1, 83.1, 0.5, 1.2, 1.7, 2.7, 0.8, 0.6, 0.1, -0.6), (249, 349.0, 'Noah Vonleh', 'MIN', 2000000, None, None, None, None, None, 'MLE', '$2,000,000 ', 2015, 'F', 6.833333333333333, 240.0, '1995-08-24 00:00:00', 'Indiana University', 23, 68, 13, 55, 25.3, 8.4, 3.0, 6.5, 47.0, 0.7, 2.0, 33.6, 1.6, 2.3, 71.2, 1.7, 6.1, 7.8, 1.9, 1.3, 0.7, 0.8, -4.3), (250, 353.0, 'Kyle Kuzma', 'LAL', 1974600, '$3,562,178 ', None, None, None, None, '1st Round Pick', '$1,974,600 ', 2018, 'F', 6.75, 220.0, '1995-07-24 00:00:00', 'University of Utah', 23, 70, 33, 37, 33.1, 18.7, 7.1, 15.5, 45.6, 1.8, 6.0, 30.3, 2.7, 3.6, 75.2, 0.9, 4.6, 5.5, 2.5, 1.9, 0.6, 0.4, -0.8), (251, 355.0, 'Tony Bradley', 'UTA', 1962360, '$3,542,060 ', None, None, None, None, '1st Round Pick', '$1,962,360 ', 2018, 'F-C', 6.916666666666667, 240.0, '1998-01-08 00:00:00', 'University of North Carolina', 21, 3, 2, 1, 12.0, 5.7, 2.7, 5.3, 50.0, 0.0, 0.0, 0.0, 0.3, 0.7, 50.0, 3.0, 2.0, 5.0, 0.3, 1.0, 0.7, 0.7, -6.0), (252, 357.0, 'Derrick White', 'SAS', 1948080, '$3,516,284 ', None, None, None, None, '1st Round Pick', '$5,464,364 ', 2018, 'G', 6.416666666666667, 190.0, '1994-07-02 00:00:00', 'University of Colorado', 24, 67, 41, 26, 25.8, 9.9, 3.9, 8.1, 47.9, 0.7, 2.1, 33.8, 1.4, 1.8, 77.2, 0.5, 3.2, 3.7, 3.9, 1.4, 1.0, 0.7, 2.3), (253, 359.0, 'Josh Hart', 'NOP', 1934160, '$3,491,159 ', None, None, None, None, '1st Round Pick', '$1,934,160 ', 2018, 'G', 6.416666666666667, 209.0, '1995-03-06 00:00:00', 'Villanova University', 24, 67, 31, 36, 25.6, 7.8, 2.8, 6.9, 40.7, 1.4, 4.1, 33.6, 0.8, 1.2, 68.8, 0.5, 3.2, 3.7, 1.4, 0.9, 1.0, 0.6, 0.0), (254, 365.0, 'Shabazz Napier', 'MIN', 1882867, None, None, None, None, None, 'Cap Space', '$1,882,867 ', 2015, 'G', 6.083333333333333, 175.0, '1991-07-14 00:00:00', 'University of Connecticut', 27, 56, 26, 30, 17.6, 9.4, 3.0, 7.8, 38.9, 1.4, 4.1, 33.3, 2.1, 2.5, 83.3, 0.3, 1.5, 1.8, 2.6, 1.2, 0.7, 0.3, 0.3), (255, 367.0, 'Tim Frazier', 'DET', 1882867, None, None, None, None, None, None, '$1,882,867 ', 2015, 'G', 6.083333333333333, 170.0, '1990-11-01 00:00:00', 'Pennsylvania State University', 28, 59, 27, 32, 19.0, 5.3, 2.0, 4.4, 44.4, 0.6, 1.7, 36.6, 0.7, 0.9, 75.9, 0.7, 2.2, 2.8, 4.2, 1.3, 0.5, 0.1, -1.1), (256, 368.0, 'Ben McLemore', 'HOU', 1882867, '$2,130,023 ', None, None, None, None, None, '$50,000 ', 2014, 'G', 6.416666666666667, 195.0, '1993-02-11 00:00:00', 'University of Kansas', 26, 19, 7, 12, 8.3, 3.9, 1.3, 3.4, 39.1, 0.9, 2.2, 41.5, 0.4, 0.6, 66.7, 0.2, 0.7, 0.9, 0.2, 0.3, 0.3, 0.2, -0.9), (257, 370.0, 'Bruno Caboclo', 'MEM', 1845301, None, None, None, None, None, None, '$300,000 ', 2015, 'F', 6.75, 218.0, '1995-09-21 00:00:00', None, 23, 34, 14, 20, 23.5, 8.3, 2.8, 6.6, 42.7, 1.4, 3.8, 36.9, 1.2, 1.5, 84.0, 1.2, 3.4, 4.6, 1.5, 1.1, 0.4, 1.0, 0.0), (258, 371.0, 'Mario Hezonja', 'POR', 1737145, '$1,882,867 ', None, None, None, None, 'Minimum Salary', '$3,620,012 ', 2016, 'F-G', 6.666666666666667, 215.0, '1995-02-25 00:00:00', None, 24, 58, 12, 46, 20.8, 8.8, 3.3, 8.0, 41.2, 0.7, 2.6, 27.6, 1.5, 2.0, 76.3, 0.5, 3.6, 4.1, 1.5, 1.5, 1.0, 0.1, -4.5), (259, 372.0, 'Emmanuel Mudiay', 'UTA', 1737145, None, None, None, None, None, 'Minimum Salary', '$1,737,145 ', 2016, 'G', 6.416666666666667, 200.0, '1996-03-05 00:00:00', None, 23, 59, 13, 46, 27.2, 14.8, 5.6, 12.5, 44.6, 1.2, 3.6, 32.9, 2.4, 3.2, 77.4, 0.6, 2.8, 3.3, 3.9, 2.4, 0.7, 0.3, -6.1), (260, 373.0, 'Raul Neto', 'PHI', 1737145, None, None, None, None, None, 'Minimum Salary', '$1,737,145 ', 2016, 'G', 6.083333333333333, 179.0, '1992-05-19 00:00:00', None, 27, 37, 26, 11, 12.8, 5.3, 2.0, 4.4, 46.0, 0.5, 1.6, 33.3, 0.8, 0.9, 84.8, 0.2, 1.5, 1.7, 2.5, 0.9, 0.4, 0.1, 4.7), (261, 374.0, 'JaKarr Sampson', 'IND', 1737145, None, None, None, None, None, 'Minimum Salary', '$1,737,145 ', 2015, 'G-F', 6.75, 214.0, '1993-03-20 00:00:00', \"St. John's University\", 26, 4, 1, 3, 31.8, 20.0, 7.3, 13.5, 53.7, 1.3, 3.5, 35.7, 4.3, 5.3, 81.0, 1.3, 6.8, 8.0, 1.0, 1.0, 1.0, 0.8, -1.8), (262, 375.0, 'Cameron Payne', 'TOR', 1737145, '$1,977,011 ', None, None, None, None, 'Minimum Salary', '$150,000 ', 2016, 'G', 6.25, 185.0, '1994-08-08 00:00:00', 'Murray State University', 24, 40, 7, 33, 17.8, 6.3, 2.4, 5.6, 43.0, 0.6, 2.1, 29.8, 0.8, 1.0, 80.5, 0.3, 1.5, 1.8, 2.7, 1.1, 0.7, 0.2, -5.6), (263, 376.0, 'Justin Anderson', 'WAS', 1737145, None, None, None, None, None, None, None, 2016, 'G', 6.5, 228.0, '1993-11-19 00:00:00', 'University of Virginia', 25, 48, 17, 31, 9.7, 3.7, 1.3, 3.3, 40.8, 0.5, 1.6, 31.2, 0.5, 0.7, 74.3, 0.5, 1.3, 1.8, 0.5, 0.5, 0.5, 0.3, -1.6), (264, 377.0, 'Pat Connaughton', 'MIL', 1723050, None, None, None, None, None, None, '$1,723,050 ', 2016, 'G', 6.416666666666667, 206.0, '1993-01-06 00:00:00', 'University of Notre Dame', 26, 61, 43, 18, 20.7, 6.9, 2.7, 5.7, 46.6, 1.1, 3.3, 33.0, 0.5, 0.7, 72.5, 1.0, 3.2, 4.2, 2.0, 0.5, 0.5, 0.4, 1.9), (265, 378.0, 'Jahlil Okafor', 'NOP', 1702486, None, None, None, None, None, 'Minimum Salary', '$54,323 ', 2016, 'C-F', 6.916666666666667, 275.0, '1995-12-15 00:00:00', 'Duke University', 23, 59, 23, 36, 15.8, 8.2, 3.6, 6.1, 58.6, 0.0, 0.1, 20.0, 1.0, 1.5, 66.3, 1.4, 3.3, 4.7, 0.7, 0.9, 0.3, 0.7, -2.0), (266, 381.0, 'Cheick Diallo', 'PHO', 1678854, '$1,824,003 ', None, None, None, None, 'Minimum Salary', '$1,678,854 ', 2017, 'F', 6.75, 220.0, '1996-09-13 00:00:00', 'University of Kansas', 22, 64, 27, 37, 14.0, 6.0, 2.6, 4.2, 62.0, 0.0, 0.1, 25.0, 0.8, 1.0, 74.6, 1.2, 4.0, 5.2, 0.5, 0.8, 0.5, 0.5, -1.4), (267, 382.0, 'Treveon Graham', 'MIN', 1678854, None, None, None, None, None, 'Minimum Salary', '$1,678,854 ', 2017, 'G', 6.5, 220.0, '1993-10-29 00:00:00', 'Virginia Commonwealth University', 25, 35, 19, 16, 20.4, 5.3, 1.8, 5.5, 33.5, 1.1, 3.7, 29.7, 0.5, 0.6, 81.8, 0.7, 2.4, 3.1, 1.0, 0.5, 0.4, 0.2, 0.8), (268, 383.0, 'David Nwaba', 'BRK', 1678854, '$1,824,003 ', None, None, None, None, None, '$1,678,854 ', 2017, 'G', 6.333333333333333, 209.0, '1993-01-14 00:00:00', 'California Polytechnic State University, San Luis Obispo', 26, 51, 14, 37, 19.3, 6.5, 2.5, 5.1, 48.1, 0.5, 1.5, 32.0, 1.1, 1.7, 68.2, 0.8, 2.4, 3.2, 1.1, 0.6, 0.7, 0.3, -1.5), (269, 384.0, 'Dragan Bender', 'MIL', 1678854, '$1,824,003 ', None, None, None, None, 'Minimum Salary', '$300,000 ', 2017, 'F', 7.083333333333333, 225.0, '1997-11-17 00:00:00', None, 21, 46, 9, 37, 18.0, 5.0, 1.9, 4.3, 44.7, 0.5, 2.2, 21.8, 0.7, 1.2, 59.3, 0.7, 3.2, 4.0, 1.2, 0.8, 0.4, 0.5, -3.5), (270, 385.0, 'Marquese Chriss', 'GSW', 1678854, None, None, None, None, None, None, None, 2017, 'F', 6.833333333333333, 233.0, '1997-07-02 00:00:00', 'University of Washington', 21, 43, 17, 26, 11.6, 4.2, 1.6, 4.2, 37.2, 0.4, 1.7, 22.2, 0.7, 1.0, 71.1, 0.9, 2.4, 3.3, 0.5, 0.8, 0.4, 0.3, -4.0), (271, 390.0, 'Jordan McRae', 'WAS', 1645357, None, None, None, None, None, None, '$400,000 ', 2016, 'G', 6.5, 185.0, '1991-03-28 00:00:00', 'University of Tennessee', 28, 27, 9, 18, 12.3, 5.9, 2.3, 4.8, 46.9, 0.4, 1.3, 28.6, 1.0, 1.3, 80.0, 0.2, 1.3, 1.5, 1.1, 0.6, 0.5, 0.3, -0.8), (272, 391.0, 'Georges Niang', 'UTA', 1645357, '$1,783,557 ', None, None, None, None, None, None, 2017, 'F', 6.666666666666667, 230.0, '1993-06-17 00:00:00', 'Iowa State University', 26, 59, 41, 18, 8.8, 4.0, 1.5, 3.1, 47.5, 0.7, 1.8, 41.0, 0.3, 0.4, 83.3, 0.2, 1.3, 1.5, 0.6, 0.4, 0.2, 0.1, -1.6), (273, 392.0, 'Christian Wood', 'DET', 1645357, None, None, None, None, None, None, None, 2016, 'F', 6.916666666666667, 220.0, '1995-09-27 00:00:00', 'University of Nevada, Las Vegas', 23, 21, 13, 8, 11.9, 8.2, 2.9, 5.6, 52.1, 0.4, 1.2, 34.6, 2.0, 2.7, 73.2, 0.8, 3.1, 4.0, 0.4, 0.8, 0.3, 0.5, -2.5), (274, 393.0, 'Furkan Korkmaz', 'PHI', 1620564, '$1,762,796 ', None, None, None, None, 'Minimum Salary', '$1,620,564 ', 2018, 'G', 6.583333333333333, 185.0, '1997-07-24 00:00:00', None, 21, 48, 30, 18, 14.2, 5.8, 2.0, 5.1, 40.0, 1.0, 3.0, 32.6, 0.8, 0.9, 81.8, 0.3, 1.9, 2.2, 1.1, 0.5, 0.6, 0.0, 1.3), (275, 394.0, 'Jordan Bell', 'MIN', 1620564, None, None, None, None, None, 'Minimum Salary', '$1,620,564 ', 2018, 'F', 6.75, 224.0, '1995-01-07 00:00:00', 'University of Oregon', 24, 68, 46, 22, 11.6, 3.3, 1.5, 2.8, 51.6, 0.0, 0.0, 0.0, 0.4, 0.6, 61.0, 0.8, 1.9, 2.7, 1.1, 0.6, 0.3, 0.8, -1.5), (276, 397.0, 'Tyler Lydon', 'SAC', 1620564, '$1,762,796 ', None, None, None, None, None, '$50,000 ', 2018, 'F', 6.833333333333333, 215.0, '1996-04-09 00:00:00', 'Syracuse University', 23, 25, 20, 5, 3.7, 0.9, 0.4, 0.7, 50.0, 0.2, 0.4, 40.0, 0.0, 0.1, 33.3, 0.3, 0.4, 0.7, 0.2, 0.2, 0.1, 0.0, -0.4), (277, 398.0, 'Tyler Ulis', 'SAC', 1620564, None, None, None, None, None, None, None, 2017, 'G', 5.833333333333333, 150.0, '1996-01-05 00:00:00', 'University of Kentucky', 23, 1, 0, 1, 0.8, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), (278, 399.0, 'Sindarius Thornwell', 'CLE', 1620564, None, None, None, None, None, None, None, 2018, 'G', 6.416666666666667, 212.0, '1994-11-15 00:00:00', 'University of South Carolina', 24, 64, 37, 27, 4.9, 1.0, 0.3, 0.8, 34.7, 0.0, 0.2, 20.0, 0.4, 0.5, 73.5, 0.1, 0.6, 0.7, 0.3, 0.3, 0.2, 0.1, -0.6), (279, 400.0, 'C.J. Williams', 'BRK', 1620564, None, None, None, None, None, None, None, 2018, 'G', 6.416666666666667, 225.0, '1990-02-06 00:00:00', 'North Carolina State University', 29, 15, 4, 11, 8.5, 2.6, 1.1, 2.3, 48.6, 0.3, 1.1, 31.3, 0.0, 0.1, 0.0, 0.1, 0.5, 0.5, 0.8, 0.3, 0.4, 0.0, -3.3), (280, 402.0, 'Abdel Nader', 'OKC', 1618520, '$1,752,950 ', None, None, None, None, None, '$1,618,520 ', 2018, 'F', 6.5, 230.0, '1993-09-25 00:00:00', 'Iowa State University', 25, 61, 38, 23, 11.4, 4.0, 1.5, 3.5, 42.3, 0.5, 1.6, 32.0, 0.4, 0.6, 75.0, 0.2, 1.7, 1.9, 0.3, 0.4, 0.3, 0.2, -2.5), (281, 403.0, 'Damyean Dotson', 'NYK', 1618520, None, None, None, None, None, 'Minimum Salary', '$1,618,520 ', 2018, 'G', 6.5, 205.0, '1994-05-06 00:00:00', 'University of Houston', 25, 73, 12, 61, 27.4, 10.7, 4.0, 9.6, 41.5, 1.7, 4.7, 36.8, 1.0, 1.3, 74.5, 0.5, 3.1, 3.6, 1.8, 1.0, 0.8, 0.1, -4.5), (282, 405.0, 'Sterling Brown', 'MIL', 1618520, None, None, None, None, None, 'MLE', '$1,618,520 ', 2018, 'G', 6.416666666666667, 225.0, '1995-02-10 00:00:00', 'Southern Methodist University', 24, 58, 45, 13, 17.8, 6.4, 2.5, 5.4, 46.5, 0.9, 2.5, 36.1, 0.5, 0.7, 69.0, 0.5, 2.7, 3.2, 1.4, 0.8, 0.4, 0.1, 0.0), (283, 406.0, 'Dillon Brooks', 'MEM', 1618520, None, None, None, None, None, 'MLE', '$1,618,520 ', 2018, 'G-F', 6.5, 220.0, '1996-01-22 00:00:00', 'University of Oregon', 23, 18, 9, 9, 18.3, 7.5, 2.7, 6.8, 40.2, 0.8, 2.2, 37.5, 1.2, 1.7, 73.3, 0.5, 1.2, 1.7, 0.9, 1.1, 0.6, 0.2, -0.9), (284, 407.0, 'Dwayne Bacon', 'CHO', 1618520, None, None, None, None, None, 'MLE', '$1,618,520 ', 2018, 'G', 6.5, 222.0, '1995-08-30 00:00:00', 'Florida State University', 23, 43, 20, 23, 17.6, 7.3, 2.8, 6.0, 47.5, 0.9, 2.0, 43.7, 0.8, 1.1, 73.9, 0.2, 1.9, 2.1, 1.1, 0.4, 0.3, 0.1, -0.5), (285, 408.0, 'Semi Ojeleye', 'BOS', 1618520, '$1,752,950 ', None, None, None, None, None, '$1,618,520 ', 2018, 'F', 6.583333333333333, 241.0, '1994-12-05 00:00:00', 'Southern Methodist University', 24, 56, 39, 17, 10.6, 3.3, 1.2, 2.8, 42.4, 0.5, 1.6, 31.5, 0.4, 0.7, 61.5, 0.4, 1.1, 1.5, 0.4, 0.3, 0.2, 0.1, 1.3), (286, 409.0, 'Ivan Rabb', 'MEM', 1618520, None, None, None, None, None, 'MLE', '$371,758 ', 2018, 'F', 6.833333333333333, 220.0, '1997-02-04 00:00:00', 'University of California', 22, 49, 20, 29, 14.7, 5.8, 2.4, 4.3, 54.7, 0.1, 0.3, 20.0, 1.0, 1.4, 71.0, 1.4, 2.8, 4.2, 1.1, 0.7, 0.3, 0.3, -1.9), (287, 410.0, \"Royce O'Neale\", 'UTA', 1618520, None, None, None, None, None, None, None, 2018, 'F', 6.5, 215.0, '1993-06-05 00:00:00', 'Baylor University', 26, 82, 50, 32, 20.4, 5.2, 2.0, 4.2, 47.5, 0.8, 2.1, 38.6, 0.4, 0.5, 76.2, 0.3, 3.2, 3.5, 1.5, 0.9, 0.7, 0.3, 2.1), (288, 411.0, 'Monte Morris', 'DEN', 1588231, '$1,663,861 ', None, None, None, None, 'Minimum Salary', '$1,588,231 ', 2018, 'G', 6.25, 175.0, '1995-06-27 00:00:00', 'Iowa State University', 24, 82, 54, 28, 24.0, 10.4, 4.2, 8.6, 49.3, 1.1, 2.8, 41.4, 0.8, 1.0, 80.2, 0.4, 1.9, 2.4, 3.6, 0.6, 0.9, 0.0, 2.2), (289, 412.0, 'Malcolm Miller', 'TOR', 1588231, None, None, None, None, None, None, '$150,000 ', 2018, 'F', 6.583333333333333, 210.0, '1993-03-06 00:00:00', 'College of the Holy Cross', 26, 10, 9, 1, 6.7, 3.5, 1.1, 2.6, 42.3, 1.0, 2.1, 47.6, 0.3, 0.4, 75.0, 0.1, 0.4, 0.5, 0.1, 0.1, 0.1, 0.1, -0.8), (290, 415.0, 'Alfonzo McKinnie', 'GSW', 1588231, None, None, None, None, None, None, None, 2018, 'F', 6.666666666666667, 215.0, '1992-09-17 00:00:00', 'University of Wisconsin-Green Bay', 26, 72, 53, 19, 13.9, 4.7, 1.9, 3.8, 48.7, 0.6, 1.6, 35.6, 0.4, 0.7, 56.3, 1.1, 2.3, 3.4, 0.4, 0.4, 0.3, 0.2, 0.2), (291, 455.0, 'Jonathon Simmons', 'WAS', 1000000, None, None, None, None, None, None, '$1,000,000 ', 2016, 'G-F', 6.5, 195.0, '1989-09-14 00:00:00', 'University of Houston', 29, 56, 25, 31, 19.0, 6.5, 2.4, 6.3, 38.0, 0.5, 1.9, 26.9, 1.3, 1.7, 74.2, 0.5, 1.8, 2.3, 2.3, 1.2, 0.5, 0.3, -2.6), (292, 482.0, 'Shaun Livingston', 'GSW', 666667, '$666,667 ', '$666,666 ', None, None, None, None, '$2,666,666 ', 2005, 'G', 6.583333333333333, 192.0, '1985-09-11 00:00:00', None, 33, 64, 45, 19, 15.1, 4.0, 1.7, 3.3, 51.9, 0.0, 0.0, 0.0, 0.6, 0.8, 78.4, 0.7, 1.2, 1.8, 1.8, 0.6, 0.5, 0.4, 0.7), (293, 487.0, 'Troy Williams', 'HOU', 122741, '$122,741 ', '$122,741 ', '$122,741 ', None, None, None, '$490,964 ', 2017, 'F', 6.583333333333333, 218.0, '1994-12-30 00:00:00', 'Indiana University', 24, 21, 7, 14, 14.9, 5.3, 2.1, 4.7, 44.9, 0.7, 2.1, 31.8, 0.4, 0.7, 60.0, 0.6, 2.2, 2.8, 0.5, 0.4, 0.5, 0.4, -3.2), (294, 490.0, 'Demetrius Jackson', 'BOS', 92857, '$92,857 ', '$92,857 ', '$92,857 ', '$92,857 ', None, None, '$464,285 ', 2017, 'G', 6.083333333333333, 201.0, '1994-09-07 00:00:00', 'University of Notre Dame', 24, 6, 5, 1, 6.6, 3.7, 1.3, 2.5, 53.3, 0.3, 1.0, 33.3, 0.7, 0.7, 100.0, 0.3, 0.2, 0.5, 0.8, 0.2, 0.3, 0.0, -1.8)]\n" ], [ "# to df\ndf = pd.read_sql('SELECT * FROM PLAYERS', conn)", "_____no_output_____" ], [ "df = df[['salary_2019to2020','age', 'pts','reb','ast', 'plusminus']]\ndf.head()", "_____no_output_____" ], [ "df = df.rename(columns={\"salary_2019to2020\":\"y\",\"age\":\"X1\", \"pts\":\"X2\",\"reb\":\"X3\",\"ast\":\"X4\", \"plusminus\":\"X5\"})\ndf.head()", "_____no_output_____" ], [ "X = df.drop(columns=['y'])\ny = df['y']", "_____no_output_____" ], [ "print(X.shape, y.shape)\nX.shape", "(295, 5) (295,)\n" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)", "_____no_output_____" ], [ "model = LinearRegression(normalize=False)", "_____no_output_____" ], [ "model.fit(X_train, y_train)\ntraining_score = model.score(X_train, y_train)\ntesting_score = model.score(X_test, y_test)\n\nprint(f\"Training Score: {training_score}\")\nprint(f\"Testing Score: {testing_score}\")", "Training Score: 0.5750174983280251\nTesting Score: 0.6386285703232046\n" ], [ "model.coef_", "_____no_output_____" ], [ "score = model.score(X, y)\nprint(f\"R2 Score: {score}\")", "R2 Score: 0.5904805478788308\n" ], [ "plt.scatter(model.predict(X_train), model.predict(X_train) - y_train, c=\"blue\", label=\"Training Data\")\nplt.scatter(model.predict(X_test), model.predict(X_test) - y_test, c=\"orange\", label=\"Testing Data\")\nplt.legend()\nplt.hlines(y=0, xmin=y.min(), xmax=y.max())\nplt.title(\"Residual Plot\")", "_____no_output_____" ], [ "#initial prediction- to test\nmodel.predict(np.array([34,10.8,6.5,1.2,-1.2]).reshape(1, -1))", "_____no_output_____" ], [ "# class Player:\n# def __init__(stats, age, pts, reb, ast, plm):\n# stats.age = age\n# stats.pts = pts\n# stats.reb = reb\n# stats.ast = ast\n# stats.plm = plm\n# def __repr__(stats):\n# return f'Your player is {stats.age} years old and averages {stats.pts} points, {stats.reb} rebounds, {stats.ast} assists, and has a plus-minus of {stats.plm}.'\n \n# input_data = {}\n\n# input_data['age'] = int(input(\"Enter your age: \"))\n# input_data['pts'] = int(input(\"Enter your average points per game: \"))\n# input_data['reb '] = int(input(\"Enter your average rebounds per game: \"))\n# input_data['ast'] = int(input(\"Enter your average assists per game: \"))\n# input_data['plm'] = int(input(\"Enter your plus-minus: \"))\n \n# player = Player(age = input_data['age'], pts = input_data['pts'], reb = input_data['reb '], ast = input_data['ast'], plm = input_data['plm'])\n# print(player)\n\n# prediction = model.predict(np.array([input_data['age'], input_data['pts'], input_data['reb '], input_data['ast'], input_data['plm']]).reshape(1,-1))\n# print(f\"The player's salary prediction is ${prediction[0]}\")", "_____no_output_____" ], [ "class Player:\n def __init__(stats, age, pts, reb, ast, plm):\n stats.age = age\n stats.pts = pts\n stats.reb = reb\n stats.ast = ast\n stats.plm = plm\n def __repr__(stats):\n return f'Your player is {stats.age} years old and averages {stats.pts} points, {stats.reb} rebounds, {stats.ast} assists, and has a plus-minus of {stats.plm}.'\n \nclass PlayerSchema(Schema):\n age = fields.Integer()\n pts = fields.Integer()\n reb = fields.Integer()\n ast = fields.Integer()\n plm = fields.Integer()\n \n @post_load\n def create_player(stats, data, **kwargs):\n return Player(**data)\n \ninput_data = {}\n\ninput_data['age'] = int(input(\"Enter your age: \"))\ninput_data['pts'] = int(input(\"Enter your average points per game: \"))\ninput_data['reb'] = int(input(\"Enter your average rebounds per game: \"))\ninput_data['ast'] = int(input(\"Enter your average assists per game: \"))\ninput_data['plm'] = int(input(\"Enter your plus-minus: \"))\n\nschema = PlayerSchema()\nplayer = schema.load(input_data)\nprint(player)\n\n# result = schema.dump(player)\n# print(result)\n \n# player = Player(age = input_data['age'], pts = input_data['pts'], reb = input_data['reb '], ast = input_data['ast'], plm = input_data['plm'])\n# print(player)\n\nprediction = model.predict(np.array([input_data['age'], input_data['pts'], input_data['reb'], input_data['ast'], input_data['plm']]).reshape(1,-1))\nsalary = prediction.astype('int32')\nprint(f\"The player's salary prediction is ${salary[0]:,}\")", "Enter your age: 23\nEnter your average points per game: 10\nEnter your average rebounds per game: 5\nEnter your average assists per game: 3\nEnter your plus-minus: 1\nYour player is 23 years old and averages 10 points, 5 rebounds, 3 assists, and has a plus-minus of 1.\n{'ast': 3, 'plm': 1, 'pts': 10, 'age': 23, 'reb': 5}\nThe player's salary prediction is $8,710,105\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a66bc9b25afd51da5f8f82518c2c5835f675bbd
2,510
ipynb
Jupyter Notebook
DataScience/AisAudioLabeler-LoadData.ipynb
ralatsdc/AISonobuoy
37f0a6a8b0769b90b974ae94f3a28bb83f45d19e
[ "Apache-2.0" ]
null
null
null
DataScience/AisAudioLabeler-LoadData.ipynb
ralatsdc/AISonobuoy
37f0a6a8b0769b90b974ae94f3a28bb83f45d19e
[ "Apache-2.0" ]
null
null
null
DataScience/AisAudioLabeler-LoadData.ipynb
ralatsdc/AISonobuoy
37f0a6a8b0769b90b974ae94f3a28bb83f45d19e
[ "Apache-2.0" ]
null
null
null
22.612613
85
0.557769
[ [ [ "# Manage imports\nfrom pathlib import Path\nimport folium\nimport AisAudioLabeler as aal\nimport LabelerUtilities as lu", "_____no_output_____" ], [ "# Assign data home directory, and collection filename\ndata_home = Path(\"~\").expanduser() / \"Data\" / \"AISonobuoy\"\ncollection_filename = \"collection-ais.json\"", "_____no_output_____" ], [ "# Load file describing the collection\ncollection_path = Path(data_home) / collection_filename\ncollection = lu.load_json_file(collection_path)", "_____no_output_____" ], [ "# For now, assume a single source and hydrophone\nif len(collection[\"sources\"]) > 1 or len(collection[\"hydrophones\"]) > 1:\n raise Exception(\"Only one source and one hydrophone expected\")\nsource = collection[\"sources\"][0]\nhydrophone = collection[\"hydrophones\"][0]", "_____no_output_____" ], [ "# Load AIS data\nais = aal.get_ais_dataframe(data_home, source)", "_____no_output_____" ], [ "# Get hydrophone metadata\nhmd = aal.get_hmd_dataframe(data_home, hydrophone)", "_____no_output_____" ], [ "# Get ship metadata\nshp = aal.get_shp_dictionary(data_home, source)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a66ca6e253f0e5d7ae0441a7b3e1a291df84f49
74,602
ipynb
Jupyter Notebook
sessions/naive-bayes.ipynb
grim10/machine-learning-course
2747d290defd05c35c6b55d89ecf35decae646c8
[ "MIT" ]
1
2020-02-27T15:30:50.000Z
2020-02-27T15:30:50.000Z
sessions/naive-bayes.ipynb
grim10/machine-learning-course
2747d290defd05c35c6b55d89ecf35decae646c8
[ "MIT" ]
null
null
null
sessions/naive-bayes.ipynb
grim10/machine-learning-course
2747d290defd05c35c6b55d89ecf35decae646c8
[ "MIT" ]
null
null
null
202.722826
38,272
0.897067
[ [ [ "# Naïve Bayes classification\n\nNaive Bayes is a high bias/low variance classifier that is less likely to overfit small training datasets than a low bias/high variance classifier is (such as kNN or logistic regression). Simple algorithm and converges quickly. Strongly assumes independence (hence, naive).", "_____no_output_____" ] ], [ [ "import numpy as np, pandas as pd, matplotlib.pyplot as plt\nfrom sklearn import naive_bayes, metrics, model_selection, preprocessing\nfrom utils import plot_decision", "_____no_output_____" ] ], [ [ "## Load and prep the data", "_____no_output_____" ] ], [ [ "# load the iris data\ndf = pd.read_csv('data/iris.csv')\ndf['species_label'], _ = pd.factorize(df['species'])\ndf.head()", "_____no_output_____" ], [ "# select features\ny = df['species_label']\nX = df[['petal_length', 'petal_width']]", "_____no_output_____" ], [ "# split data randomly into 70% training and 30% test\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=0)", "_____no_output_____" ], [ "# standardize the features\nsc = preprocessing.StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)", "_____no_output_____" ] ], [ [ "## Train the model and make predictions", "_____no_output_____" ] ], [ [ "# train the model\ngnb = naive_bayes.GaussianNB(priors=None)\ngnb.fit(X_train_std, y_train)", "_____no_output_____" ], [ "# use the trained model to make predictions with the test data\ny_pred = gnb.predict(X_test_std)", "_____no_output_____" ] ], [ [ "## Evaluate the model's performance", "_____no_output_____" ] ], [ [ "# how did our model perform?\ncount_misclassified = (y_test != y_pred).sum()\nprint('Misclassified samples: {}'.format(count_misclassified))\naccuracy = metrics.accuracy_score(y_test, y_pred)\nprint('Accuracy: {:.2f}'.format(accuracy))", "Misclassified samples: 1\nAccuracy: 0.98\n" ], [ "# visualize the model's decision regions to see how it separates the samples\nX_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\nplot_decision(X=X_combined_std, y=y_combined, classifier=gnb)\nplt.xlabel('petal length (standardized)')\nplt.ylabel('petal width (standardized)')\nplt.legend(loc='upper left')\nplt.show()", "_____no_output_____" ], [ "# same thing, but this time identify the points that constituted the test data set\ntest_idx = range(len(y_train), len(y_combined))\nplot_decision(X=X_combined_std, y=y_combined, classifier=gnb, test_idx=test_idx)\nplt.xlabel('petal length (standardized)')\nplt.ylabel('petal width (standardized)')\nplt.legend(loc='upper left')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a66dbfaeca003a23afc58147025ba2ab23cd63b
17,214
ipynb
Jupyter Notebook
site/zh-cn/tutorials/distribute/save_and_load.ipynb
FontTian/docs-l10n
2ed835c3106efdece4076a628d3b761965f863f0
[ "Apache-2.0" ]
null
null
null
site/zh-cn/tutorials/distribute/save_and_load.ipynb
FontTian/docs-l10n
2ed835c3106efdece4076a628d3b761965f863f0
[ "Apache-2.0" ]
null
null
null
site/zh-cn/tutorials/distribute/save_and_load.ipynb
FontTian/docs-l10n
2ed835c3106efdece4076a628d3b761965f863f0
[ "Apache-2.0" ]
null
null
null
29.937391
293
0.512432
[ [ [ "##### Copyright 2019 The TensorFlow Authors.\n", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# 使用分布策略保存和加载模型", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://tensorflow.google.cn/tutorials/distribute/save_and_load\" class=\"\"><img src=\"https://tensorflow.google.cn/images/tf_logo_32px.png\">在 TensorFlow.org 上查看</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/distribute/save_and_load.ipynb\" class=\"\"><img src=\"https://tensorflow.google.cn/images/colab_logo_32px.png\" class=\"\">在 Google Colab 中运行</a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/distribute/save_and_load.ipynb\" class=\"\"><img src=\"https://tensorflow.google.cn/images/GitHub-Mark-32px.png\" class=\"\">在 Github 上查看源代码</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/distribute/save_and_load.ipynb\" class=\"\"><img src=\"https://tensorflow.google.cn/images/download_logo_32px.png\" class=\"\">下载笔记本</a></td>\n</table>", "_____no_output_____" ], [ "## 概述\n\n在训练期间一般需要保存和加载模型。有两组用于保存和加载 Keras 模型的 API:高级 API 和低级 API。本教程演示了在使用 `tf.distribute.Strategy` 时如何使用 SavedModel API。要了解 SavedModel 和序列化的相关概况,请参阅[保存的模型指南](../../guide/saved_model.ipynb)和 [Keras 模型序列化指南](../../guide/keras/save_and_serialize.ipynb)。让我们从一个简单的示例开始: ", "_____no_output_____" ], [ "导入依赖项:", "_____no_output_____" ] ], [ [ "import tensorflow_datasets as tfds\n\nimport tensorflow as tf\ntfds.disable_progress_bar()", "_____no_output_____" ] ], [ [ "使用 `tf.distribute.Strategy` 准备数据和模型:", "_____no_output_____" ] ], [ [ "mirrored_strategy = tf.distribute.MirroredStrategy()\n\ndef get_data():\n datasets, ds_info = tfds.load(name='mnist', with_info=True, as_supervised=True)\n mnist_train, mnist_test = datasets['train'], datasets['test']\n\n BUFFER_SIZE = 10000\n\n BATCH_SIZE_PER_REPLICA = 64\n BATCH_SIZE = BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync\n\n def scale(image, label):\n image = tf.cast(image, tf.float32)\n image /= 255\n\n return image, label\n\n train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE)\n\n return train_dataset, eval_dataset\n\ndef get_model():\n with mirrored_strategy.scope():\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10)\n ])\n\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n return model", "_____no_output_____" ] ], [ [ "训练模型: ", "_____no_output_____" ] ], [ [ "model = get_model()\ntrain_dataset, eval_dataset = get_data()\nmodel.fit(train_dataset, epochs=2)", "_____no_output_____" ] ], [ [ "## 保存和加载模型\n\n现在,您已经有一个简单的模型可供使用,让我们了解一下如何保存/加载 API。有两组可用的 API:\n\n- 高级 Keras `model.save` 和 `tf.keras.models.load_model`\n- 低级 `tf.saved_model.save` 和 `tf.saved_model.load`\n", "_____no_output_____" ], [ "### Keras API", "_____no_output_____" ], [ "以下为使用 Keras API 保存和加载模型的示例:", "_____no_output_____" ] ], [ [ "keras_model_path = \"/tmp/keras_save\"\nmodel.save(keras_model_path) # save() should be called out of strategy scope", "_____no_output_____" ] ], [ [ "恢复无 `tf.distribute.Strategy` 的模型:", "_____no_output_____" ] ], [ [ "restored_keras_model = tf.keras.models.load_model(keras_model_path)\nrestored_keras_model.fit(train_dataset, epochs=2)", "_____no_output_____" ] ], [ [ "恢复模型后,您可以继续在它上面进行训练,甚至无需再次调用 `compile()`,因为在保存之前已经对其进行了编译。模型以 TensorFlow 的标准 `SavedModel` proto 格式保存。有关更多信息,请参阅 [`saved_model` 格式指南](../../guide/saved_model.ipynb)。\n\n现在,加载模型并使用 `tf.distribute.Strategy` 进行训练:", "_____no_output_____" ] ], [ [ "another_strategy = tf.distribute.OneDeviceStrategy(\"/cpu:0\")\nwith another_strategy.scope():\n restored_keras_model_ds = tf.keras.models.load_model(keras_model_path)\n restored_keras_model_ds.fit(train_dataset, epochs=2)", "_____no_output_____" ] ], [ [ "如您所见, `tf.distribute.Strategy` 可以按预期进行加载。此处使用的策略不必与保存前所用策略相同。 ", "_____no_output_____" ], [ "### `tf.saved_model` API", "_____no_output_____" ], [ "现在,让我们看一下较低级别的 API。保存模型与 Keras API 类似:", "_____no_output_____" ] ], [ [ "model = get_model() # get a fresh model\nsaved_model_path = \"/tmp/tf_save\"\ntf.saved_model.save(model, saved_model_path)", "_____no_output_____" ] ], [ [ "可以使用 `tf.saved_model.load()` 进行加载。但是,由于该 API 级别较低(因此用例范围更广泛),所以不会返回 Keras 模型。相反,它返回一个对象,其中包含可用于进行推断的函数。例如:", "_____no_output_____" ] ], [ [ "DEFAULT_FUNCTION_KEY = \"serving_default\"\nloaded = tf.saved_model.load(saved_model_path)\ninference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]", "_____no_output_____" ] ], [ [ "加载的对象可能包含多个函数,每个函数与一个键关联。`\"serving_default\"` 是使用已保存的 Keras 模型的推断函数的默认键。要使用此函数进行推断,请运行以下代码: ", "_____no_output_____" ] ], [ [ "predict_dataset = eval_dataset.map(lambda image, label: image)\nfor batch in predict_dataset.take(1):\n print(inference_func(batch))", "_____no_output_____" ] ], [ [ "您还可以采用分布式方式加载和进行推断:", "_____no_output_____" ] ], [ [ "another_strategy = tf.distribute.MirroredStrategy()\nwith another_strategy.scope():\n loaded = tf.saved_model.load(saved_model_path)\n inference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]\n\n dist_predict_dataset = another_strategy.experimental_distribute_dataset(\n predict_dataset)\n\n # Calling the function in a distributed manner\n for batch in dist_predict_dataset:\n another_strategy.run(inference_func,args=(batch,))", "_____no_output_____" ] ], [ [ "调用已恢复的函数只是基于已保存模型的前向传递(预测)。如果您想继续训练加载的函数,或者将加载的函数嵌入到更大的模型中,应如何操作? 通常的做法是将此加载对象包装到 Keras 层以实现此目的。幸运的是,[TF Hub](https://tensorflow.google.cn/hub) 为此提供了 [hub.KerasLayer](https://github.com/tensorflow/hub/blob/master/tensorflow_hub/keras_layer.py),如下所示:", "_____no_output_____" ] ], [ [ "import tensorflow_hub as hub\n\ndef build_model(loaded):\n x = tf.keras.layers.Input(shape=(28, 28, 1), name='input_x')\n # Wrap what's loaded to a KerasLayer\n keras_layer = hub.KerasLayer(loaded, trainable=True)(x)\n model = tf.keras.Model(x, keras_layer)\n return model\n\nanother_strategy = tf.distribute.MirroredStrategy()\nwith another_strategy.scope():\n loaded = tf.saved_model.load(saved_model_path)\n model = build_model(loaded)\n\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n model.fit(train_dataset, epochs=2)", "_____no_output_____" ] ], [ [ "如您所见,`hub.KerasLayer` 可将从 `tf.saved_model.load()` 加载回的结果包装到可供构建其他模型的 Keras 层。这对于迁移学习非常实用。 ", "_____no_output_____" ], [ "### 我应使用哪种 API?", "_____no_output_____" ], [ "对于保存,如果您使用的是 Keras 模型,那么始终建议使用 Keras 的 `model.save()` API。如果您所保存的不是 Keras 模型,那么您只能选择使用较低级的 API。\n\n对于加载,使用哪种 API 取决于您要从加载的 API 中获得什么。如果您无法或不想获取 Keras 模型,请使用 `tf.saved_model.load()`。否则,请使用 `tf.keras.models.load_model()`。请注意,只有保存 Keras 模型后,才能恢复 Keras 模型。\n\n可以混合使用 API。您可以使用 `model.save` 保存 Keras 模型,并使用低级 API `tf.saved_model.load` 加载非 Keras 模型。 ", "_____no_output_____" ] ], [ [ "model = get_model()\n\n# Saving the model using Keras's save() API\nmodel.save(keras_model_path) \n\nanother_strategy = tf.distribute.MirroredStrategy()\n# Loading the model using lower level API\nwith another_strategy.scope():\n loaded = tf.saved_model.load(keras_model_path)", "_____no_output_____" ] ], [ [ "### 警告", "_____no_output_____" ], [ "有一种特殊情况,您的 Keras 模型没有明确定义的输入。例如,可以创建没有任何输入形状的序贯模型 (`Sequential([Dense(3), ...]`)。子类化模型在初始化后也没有明确定义的输入。在这种情况下,在保存和加载时都应坚持使用较低级别的 API,否则会出现错误。\n\n要检查您的模型是否具有明确定义的输入,只需检查 `model.inputs` 是否为 `None`。如果非 `None`,则一切正常。在 `.fit`、`.evaluate`、`.predict` 中使用模型,或调用模型 (`model(inputs)`) 时,输入形状将自动定义。\n\n以下为示例:", "_____no_output_____" ] ], [ [ "class SubclassedModel(tf.keras.Model):\n\n output_name = 'output_layer'\n\n def __init__(self):\n super(SubclassedModel, self).__init__()\n self._dense_layer = tf.keras.layers.Dense(\n 5, dtype=tf.dtypes.float32, name=self.output_name)\n\n def call(self, inputs):\n return self._dense_layer(inputs)\n\nmy_model = SubclassedModel()\n# my_model.save(keras_model_path) # ERROR! \ntf.saved_model.save(my_model, saved_model_path)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a66e921e7b3c9ac8b87e967259b3f92a4422827
20,694
ipynb
Jupyter Notebook
examples/MDBSI.ipynb
feeds/dit
bd303ef7c4708643082f218e153b27d352bf2810
[ "BSD-3-Clause" ]
1
2020-03-13T10:30:11.000Z
2020-03-13T10:30:11.000Z
examples/MDBSI.ipynb
feeds/dit
bd303ef7c4708643082f218e153b27d352bf2810
[ "BSD-3-Clause" ]
null
null
null
examples/MDBSI.ipynb
feeds/dit
bd303ef7c4708643082f218e153b27d352bf2810
[ "BSD-3-Clause" ]
null
null
null
24.317274
327
0.531168
[ [ [ "# Multivariate Dependencies Beyond Shannon Information\n\nThis is a companion Jupyter notebook to the work *Multivariate Dependencies Beyond Shannon Information* by Ryan G. James and James P. Crutchfield. This worksheet was written by Ryan G. James. It primarily makes use of the ``dit`` package for information theory calculations.", "_____no_output_____" ], [ "## Basic Imports\n\nWe first import basic functionality. Further functionality will be imported as needed.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom dit import ditParams, Distribution\nfrom dit.distconst import uniform\n\nditParams['repr.print'] = ditParams['print.exact'] = True", "_____no_output_____" ] ], [ [ "## Distributions\n\nHere we define the two distributions to be compared.", "_____no_output_____" ] ], [ [ "from dit.example_dists.mdbsi import dyadic, triadic\n\ndists = [('dyadic', dyadic), ('triadic', triadic)]", "_____no_output_____" ] ], [ [ "## I-Diagrams and X-Diagrams\n\nHere we construct the I- and X-Diagrams of both distributions. The I-Diagram is constructed by considering how the entropies of each variable interact. The X-Diagram is similar, but considers how the extropies of each variable interact.", "_____no_output_____" ] ], [ [ "from dit.profiles import ExtropyPartition, ShannonPartition\n\ndef print_partition(dists, partition):\n ps = [str(partition(dist)).split('\\n') for _, dist in dists ]\n print('\\t' + '\\t\\t\\t\\t'.join(name for name, _ in dists))\n for lines in zip(*ps):\n print('\\t\\t'.join(lines))", "_____no_output_____" ], [ "print_partition(dists, ShannonPartition)", "_____no_output_____" ] ], [ [ "Both I-Diagrams are the same. This implies that *no* Shannon measure (entropy, mutual information, conditional mutual information [including the transfer entropy], co-information, etc) can differentiate these patterns of dependency.", "_____no_output_____" ] ], [ [ "print_partition(dists, ExtropyPartition)", "_____no_output_____" ] ], [ [ "Similarly, the X-Diagrams are identical and so no extropy-based measure can differentiate the distributions.", "_____no_output_____" ], [ "## Measures of Mutual and Common Information\n\nWe now compute several measures of mutual and common information:", "_____no_output_____" ] ], [ [ "from prettytable import PrettyTable\n\nfrom dit.multivariate import (entropy,\n coinformation,\n total_correlation,\n dual_total_correlation,\n independent_information,\n caekl_mutual_information,\n interaction_information,\n intrinsic_total_correlation,\n gk_common_information,\n wyner_common_information,\n exact_common_information,\n functional_common_information,\n mss_common_information,\n tse_complexity,\n )\n\nfrom dit.other import (extropy,\n disequilibrium,\n perplexity,\n LMPR_complexity,\n renyi_entropy,\n tsallis_entropy,\n )", "_____no_output_____" ], [ "def print_table(title, table, dists):\n pt = PrettyTable(field_names = [''] + [name for name, _ in table])\n for name, _ in table:\n pt.float_format[name] = ' 5.{0}'.format(3)\n for name, dist in dists:\n pt.add_row([name] + [measure(dist) for _, measure in table])\n\n print(\"\\n{}\".format(title))\n print(pt.get_string())", "_____no_output_____" ] ], [ [ "### Entropies\n\nEntropies generally capture the uncertainty contained in a distribution. Here, we compute the Shannon entropy, the Renyi entropy of order 2 (also known as the collision entropy), and the Tsallis entropy of order 2. Though we only compute the order 2 values, any order will produce values identical for both distributions.", "_____no_output_____" ] ], [ [ "entropies = [('H', entropy),\n ('Renyi (α=2)', lambda d: renyi_entropy(d, 2)),\n ('Tsallis (q=2)', lambda d: tsallis_entropy(d, 2)),\n ]", "_____no_output_____" ], [ "print_table('Entropies', entropies, dists)", "_____no_output_____" ] ], [ [ "The entropies for both distributions are indentical. This is not surprising: they have the same probability mass function.", "_____no_output_____" ], [ "### Mutual Informations\n\nMutual informations are multivariate generalizations of the standard Shannon mutual information. By far, the most widely used (and often simply assumed to be the only) generalization is the total correlation, sometimes called the multi-information. It is defined as:\n$$\n T[\\mathbf{X}] = \\sum H[X_i] - H[\\mathbf{X}] = \\sum p(\\mathbf{x}) \\log_2 \\frac{p(\\mathbf{x})}{p(x_1)p(x_2)\\ldots p(x_n)}\n$$\n\nOther generalizations exist, though, including the co-information, the dual total correlation, and the CAEKL mutual information.", "_____no_output_____" ] ], [ [ "mutual_informations = [('I', coinformation),\n ('T', total_correlation),\n ('B', dual_total_correlation),\n ('J', caekl_mutual_information),\n ('II', interaction_information),\n ]", "_____no_output_____" ], [ "print_table('Mutual Informations', mutual_informations, dists)", "_____no_output_____" ] ], [ [ "The equivalence of all these generalizations is not surprising: Each of them can be defined as a function of the I-diagram, and so must be identical here.", "_____no_output_____" ], [ "### Common Informations\n\nCommon informations are generally defined using an auxilliary random variable which captures some amount of information shared by the variables of interest. For all but the Gács-Körner common information, that shared information is the dual total correlation.", "_____no_output_____" ] ], [ [ "common_informations = [('K', gk_common_information),\n ('C', lambda d: wyner_common_information(d, niter=1, polish=False)),\n ('G', lambda d: exact_common_information(d, niter=1, polish=False)),\n ('F', functional_common_information),\n ('M', mss_common_information),\n ]", "_____no_output_____" ], [ "print_table('Common Informations', common_informations, dists)", "_____no_output_____" ] ], [ [ "As it turns out, only the Gács-Körner common information, `K`, distinguishes the two.", "_____no_output_____" ], [ "### Other Measures\n\nHere we list a variety of other information measures.", "_____no_output_____" ] ], [ [ "other_measures = [('IMI', lambda d: intrinsic_total_correlation(d, d.rvs[:-1], d.rvs[-1])),\n ('X', extropy),\n ('R', independent_information),\n ('P', perplexity),\n ('D', disequilibrium),\n ('LMRP', LMPR_complexity),\n ('TSE', tse_complexity),\n ]", "_____no_output_____" ], [ "print_table('Other Measures', other_measures, dists)", "_____no_output_____" ] ], [ [ "Several other measures fail to differentiate our two distributions. For many of these (`X`, `P`, `D`, `LMRP`) this is because they are defined relative to the probability mass function. For the others, it is due to the equality of the I-diagrams. Only the intrinsic mutual information, `IMI`, can distinguish the two.", "_____no_output_____" ], [ "## Information Profiles\n\nLastly, we consider several \"profiles\" of the information.", "_____no_output_____" ] ], [ [ "from dit.profiles import *\n\ndef plot_profile(dists, profile):\n n = len(dists)\n plt.figure(figsize=(8*n, 6))\n ent = max(entropy(dist) for _, dist in dists)\n for i, (name, dist) in enumerate(dists):\n ax = plt.subplot(1, n, i+1)\n profile(dist).draw(ax=ax)\n if profile not in [EntropyTriangle, EntropyTriangle2]:\n ax.set_ylim((-0.1, ent + 0.1))\n ax.set_title(name)", "_____no_output_____" ] ], [ [ "### Complexity Profile", "_____no_output_____" ] ], [ [ "plot_profile(dists, ComplexityProfile)", "_____no_output_____" ] ], [ [ "Once again, these two profiles are identical due to the I-Diagrams being identical. The complexity profile incorrectly suggests that there is no information at the scale of 3 variables.", "_____no_output_____" ], [ "### Marginal Utility of Information", "_____no_output_____" ] ], [ [ "plot_profile(dists, MUIProfile)", "_____no_output_____" ] ], [ [ "The marginal utility of information is based on a linear programming problem with constrains related to values from the I-Diagram, and so here again the two distributions are undifferentiated.", "_____no_output_____" ], [ "### Connected Informations", "_____no_output_____" ] ], [ [ "plot_profile(dists, SchneidmanProfile)", "_____no_output_____" ] ], [ [ "The connected informations are based on differences between maximum entropy distributions with differing $k$-way marginal distributions fixed. Here, the two distributions are differentiated ", "_____no_output_____" ], [ "### Multivariate Entropy Triangle", "_____no_output_____" ] ], [ [ "plot_profile(dists, EntropyTriangle)", "_____no_output_____" ] ], [ [ "Both distributions are at an idential location in the multivariate entropy triangle.", "_____no_output_____" ], [ "## Partial Information\n\nWe next consider a variety of partial information decompositions.", "_____no_output_____" ] ], [ [ "from dit.pid.helpers import compare_measures", "_____no_output_____" ], [ "for name, dist in dists:\n compare_measures(dist, name=name)", "_____no_output_____" ] ], [ [ "Here we see that the PID determines that in dyadic distribution two random variables uniquely contribute a bit of information to the third, whereas in the triadic distribution two random variables redundantly influene the third with one bit, and synergistically with another.", "_____no_output_____" ], [ "## Multivariate Extensions", "_____no_output_____" ] ], [ [ "from itertools import product", "_____no_output_____" ], [ "outcomes_a = [\n (0,0,0,0),\n (0,2,3,2),\n (1,0,2,1),\n (1,2,1,3),\n (2,1,3,3),\n (2,3,0,1),\n (3,1,1,2),\n (3,3,2,0),\n]\noutcomes_b = [\n (0,0,0,0),\n (0,0,1,1),\n (0,1,0,1),\n (0,1,1,0),\n (1,0,0,1),\n (1,0,1,0),\n (1,1,0,0),\n (1,1,1,1),\n]\n\noutcomes = [ tuple([2*a+b for a, b in zip(a_, b_)]) for a_, b_ in product(outcomes_a, outcomes_b) ]\nquadradic = uniform(outcomes)", "_____no_output_____" ], [ "dyadic2 = uniform([(4*a+2*c+e, 4*a+2*d+f, 4*b+2*c+f, 4*b+2*d+e) for a, b, c, d, e, f in product([0,1], repeat=6)])", "_____no_output_____" ], [ "dists2 = [('dyadic2', dyadic2), ('quadradic', quadradic)]", "_____no_output_____" ], [ "print_partition(dists2, ShannonPartition)", "_____no_output_____" ], [ "print_partition(dists2, ExtropyPartition)", "_____no_output_____" ], [ "print_table('Entropies', entropies, dists2)", "_____no_output_____" ], [ "print_table('Mutual Informations', mutual_informations, dists2)", "_____no_output_____" ], [ "print_table('Common Informations', common_informations, dists2)", "_____no_output_____" ], [ "print_table('Other Measures', other_measures, dists2)", "_____no_output_____" ], [ "plot_profile(dists2, ComplexityProfile)", "_____no_output_____" ], [ "plot_profile(dists2, MUIProfile)", "_____no_output_____" ], [ "plot_profile(dists2, SchneidmanProfile)", "_____no_output_____" ], [ "plot_profile(dists2, EntropyTriangle)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a66f04432c992a72931f26511a8e3bfef3b9fd7
94,599
ipynb
Jupyter Notebook
Ensemble_Techniques/.ipynb_checkpoints/XGBoost-checkpoint.ipynb
Aujasvi-Moudgil/Ensemble-Learning
0ca2abd57cd2c27fbd09fdc8b59ec07567f21d2d
[ "MIT" ]
1
2020-08-06T09:57:11.000Z
2020-08-06T09:57:11.000Z
Ensemble_Techniques/.ipynb_checkpoints/XGBoost-checkpoint.ipynb
Aujasvi-Moudgil/Ensemble-Learning
0ca2abd57cd2c27fbd09fdc8b59ec07567f21d2d
[ "MIT" ]
null
null
null
Ensemble_Techniques/.ipynb_checkpoints/XGBoost-checkpoint.ipynb
Aujasvi-Moudgil/Ensemble-Learning
0ca2abd57cd2c27fbd09fdc8b59ec07567f21d2d
[ "MIT" ]
null
null
null
407.75431
87,208
0.937515
[ [ [ "!pip install xgboost ", "Requirement already satisfied: xgboost in c:\\users\\aujas\\anaconda3\\lib\\site-packages (0.81)\nRequirement already satisfied: scipy in c:\\users\\aujas\\anaconda3\\lib\\site-packages (from xgboost) (1.1.0)\nRequirement already satisfied: numpy in c:\\users\\aujas\\anaconda3\\lib\\site-packages (from xgboost) (1.14.3)\n" ], [ "import xgboost as xgb\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "power_plant = pd.read_excel(\"Folds5x2_pp.xlsx\")\n\n\nX = power_plant.drop(\"PE\", axis = 1) # Drop PE from independent variables\ny = power_plant['PE'].values # Hold PE as the dependent variable. PE - Net Hourly Power ouput\n\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.3, random_state=1)", "_____no_output_____" ], [ "# Xgb takes data in matrix form both for training and testing...\n\nDM_train = xgb.DMatrix(data = X_train, \n label = y_train) \nDM_test = xgb.DMatrix(data = X_test,\n label = y_test)", "_____no_output_____" ], [ "# setting the hyper parameters ... Ref https://xgboost.readthedocs.io/en/latest/python/python_api.html\n\ngbm_param_grid = {\n 'colsample_bytree': np.linspace(0.5, 0.9, 5), # generate 5 numbers between .5 and .9 \n 'n_estimators':[10, 200],\n 'max_depth': [10, 15, 20, 25]\n}", "_____no_output_____" ], [ "gbm = xgb.XGBRegressor()", "_____no_output_____" ], [ "grid_mse = GridSearchCV(estimator = gbm, param_grid = gbm_param_grid, scoring = 'neg_mean_squared_error', cv = 5, verbose = 1)", "_____no_output_____" ], [ "grid_mse.fit(X_train, y_train)\nprint(\"Best parameters found: \",grid_mse.best_params_)\nprint(\"Lowest RMSE found: \", np.sqrt(np.abs(grid_mse.best_score_)))", "Fitting 5 folds for each of 40 candidates, totalling 200 fits\n" ], [ "pred = grid_mse.predict(X_test)\nprint(\"Root mean square error for test dataset: {}\".format(np.round(np.sqrt(mean_squared_error(y_test, pred)), 2)))", "Root mean square error for test dataset: 2.88\n" ], [ "test = pd.DataFrame({\"prediction\": pred, \"observed\": y_test.flatten()})\nlowess = sm.nonparametric.lowess\nz = lowess(pred.flatten(), y_test.flatten())\ntest.plot(figsize = [14,8],\n x =\"prediction\", y = \"observed\", kind = \"scatter\", color = 'darkred')\nplt.title(\"Extreme Gradient Boosting: Prediction Vs Test Data\", fontsize = 18, color = \"darkgreen\")\nplt.xlabel(\"Predicted Power Output\", fontsize = 18) \nplt.ylabel(\"Observed Power Output\", fontsize = 18)\nplt.plot(z[:,0], z[:,1], color = \"blue\", lw= 3)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a66f73bbfff109603e1af90dfdcc0833e01870b
595,483
ipynb
Jupyter Notebook
Trading_Strategies/Technical_Trading/Genetic Optimization.ipynb
mikimaus78/ml_monorepo
b2c2627ff0e86e27f6829170d0dac168d8e5783b
[ "BSD-3-Clause" ]
51
2019-02-01T19:43:37.000Z
2022-03-16T09:07:03.000Z
Trading_Strategies/Technical_Trading/Genetic Optimization.ipynb
mikimaus78/ml_monorepo
b2c2627ff0e86e27f6829170d0dac168d8e5783b
[ "BSD-3-Clause" ]
2
2019-02-23T18:54:22.000Z
2019-11-09T01:30:32.000Z
Trading_Strategies/Technical_Trading/Genetic Optimization.ipynb
mikimaus78/ml_monorepo
b2c2627ff0e86e27f6829170d0dac168d8e5783b
[ "BSD-3-Clause" ]
35
2019-02-08T02:00:31.000Z
2022-03-01T23:17:00.000Z
615.80455
226,681
0.907556
[ [ [ "%run technical_trading.py", "/Applications/anaconda/lib/python3.5/site-packages/pandas/io/data.py:33: FutureWarning: \nThe pandas.io.data module is moved to a separate package (pandas-datareader) and will be removed from pandas in a future version.\nAfter installing the pandas-datareader package (https://github.com/pydata/pandas-datareader), you can change the import ``from pandas.io import data, wb`` to ``from pandas_datareader import data, wb``.\n FutureWarning)\n/Applications/anaconda/lib/python3.5/site-packages/sklearn/utils/fixes.py:64: DeprecationWarning: inspect.getargspec() is deprecated, use inspect.signature() instead\n if 'order' in inspect.getargspec(np.copy)[0]:\n" ], [ "#%%\ndata = pd.read_csv('../../data/hs300.csv',index_col = 'date',parse_dates = 'date')\ndata.vol = data.vol.astype(float)\n#start = pd.Timestamp('2005-09-01')\n#end = pd.Timestamp('2012-03-15')\n#data = data[start:end]\n#%%\nchaikin = CHAIKINAD(data, m = 14, n = 16)\nkdj = KDJ(data)\nadx = ADX(data)\nemv = EMV(data, n = 20, m = 23)\ncci = CCI(data, n=20, m = 8)\nbbands = BBANDS(data, n =20, m=2)\naroon = AROON(data)\ncmo = CMO(data)\n\n#%%\nsignal = pd.DataFrame(index=data.index)\n#signal['kdj'] = kdj['2']\nsignal['chaikin'] = chaikin['3']\nsignal['emv'] = emv['2']\nsignal['adx'] = adx['1']\nsignal['cci'] = cci['2']\nsignal['aroon'] = aroon['2']\nsignal['cmo'] = cmo['2']\nsignal['bbands'] = bbands['1']\nsignal = signal.fillna(0)\n\nreturns_c = Backtest(data, signal.mode(axis=1).ix[:,0])\n(1+returns_c).cumprod().plot()\n#%%\noos_date = pd.Timestamp('2012-03-15')\n#pf.create_returns_tear_sheet(returns, live_start_date=oos_date)\npf.create_full_tear_sheet(returns_c)\n#%%", "/Users/jianboxue/Documents/Research_Projects/Trading_Strategies/Technical_Trading/technical_trading.py:93: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n signal['2'][signal['2'] > 0] = 1\n/Users/jianboxue/Documents/Research_Projects/Trading_Strategies/Technical_Trading/technical_trading.py:94: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n signal['2'][signal['2'] < 0] = -1\n" ], [ "%matplotlib inline\n(1+returns_c).cumprod().plot()", "_____no_output_____" ], [ "returns = pd.DataFrame(index=data.index)\n#signal['kdj'] = kdj['2']\nreturns['chaikin'] = np.array(Backtest(data, chaikin['3']))\nreturns['emv'] = np.array(Backtest(data, emv['2']))\nreturns['adx'] = np.array(Backtest(data, adx['1']))\nreturns['cci'] = np.array(Backtest(data, cci['2']))\nreturns['aroon'] = np.array(Backtest(data, aroon['2']))\nreturns['cmo'] = np.array(Backtest(data, cmo['2']))\nreturns['bbands'] = np.array(Backtest(data, bbands['1']))\nreturns = returns.fillna(0)", "_____no_output_____" ], [ "(1+returns['chaikin']).cumprod().plot()", "_____no_output_____" ], [ "nav = pd.DataFrame()\nnav['combined'] = (1+returns_c).cumprod()\nema5 = talib.EMA(np.array(nav['combined']), 5)\nema20 = talib.EMA(np.array(nav['combined']), 20)\nsignal5 = (nav['combined'] > ema5) * 1 + (nav['combined']<ema5) *0\nsignal20 = (nav['combined'] > ema20) * 1 + (nav['combined']<ema20) * 0\nsignal5_20 = (ema5 > ema20) * 1 + (ema20 < ema5)*0\nreturn_ema5 = returns_c * signal5.shift(1)\nreturn_ema20 = returns_c * signal20.shift(1)\nnav['ema5'] = (1+return_ema5).cumprod()\nnav['ema20'] = (1+return_ema20).cumprod()\n#nav['ema5_20'] = (1+retrun_ema5_20).cumprod()\nnav.plot()", "_____no_output_____" ], [ "(1+returns.sum(1)/4).cumprod().plot()\nret_target = returns.sum(1) / 4\nret_target.index = data.index.tz_localize('UTC')\npf.create_full_tear_sheet(ret_target)", "Entire data start date: 2005-01-04\nEntire data end date: 2015-10-30\n\n\nBacktest Months: 125\n Backtest\nannual_return 0.58\nannual_volatility 0.37\nsharpe_ratio 1.56\ncalmar_ratio 2.04\nstability 0.84\nmax_drawdown -0.29\nomega_ratio 1.37\nsortino_ratio 2.32\nskewness -0.33\nkurtosis 6.54\nalpha NaN\nbeta NaN\n" ], [ "%run ../Strategy_Evalution_Tools/turtle_evalution.py", "_____no_output_____" ], [ "# This file is part of DEAP.\n#\n# DEAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n#\n# DEAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.\n\nimport array\nimport random\n\nimport numpy\n\nfrom deap import algorithms\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\n\n### insample vs. oos\nreturns_is = returns.ix[:, :]\nreturns_oos = returns.ix[1001::, :]\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", array.array, typecode='b', fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\n# Attribute generator\ntoolbox.register(\"attr_bool\", random.randint, 0, 1)\n\n# Structure initializers\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.attr_bool, 7)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ndef evalOneMax(individual):\n print(individual)\n for i in range(7) :\n if i == 0:\n rets = returns_is.ix[:,i] * individual[i]\n else :\n rets = rets + returns_is.ix[:,i] * individual[i]\n rets = rets.fillna(0)\n \n sharpe, rsharpe = Sharpe(rets)\n rrr = RRR(rets)\n if np.isnan(rsharpe) :\n rsharpe = 0\n print(rsharpe)\n return rsharpe,;\n\ntoolbox.register(\"evaluate\", evalOneMax)\ntoolbox.register(\"mate\", tools.cxTwoPoint)\ntoolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.05)\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\ndef main():\n random.seed(64)\n \n pop = toolbox.population(n=128)\n hof = tools.HallOfFame(2)\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean)\n stats.register(\"std\", numpy.std)\n stats.register(\"min\", numpy.min)\n stats.register(\"max\", numpy.max)\n \n pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=10, \n stats=stats, halloffame=hof, verbose=True)\n #print(log)\n return pop, log, hof\n\nif __name__ == \"__main__\":\n pop, log, hof = main()", "array('b', [1, 0, 1, 0, 0, 1, 0])\n-0.637074681163\narray('b', [0, 0, 1, 1, 0, 0, 0])\n-1.63973844874\narray('b', [1, 0, 0, 0, 0, 0, 1])\n-1.30562645871\narray('b', [0, 0, 0, 0, 1, 1, 0])\n-2.80306022545\narray('b', [1, 0, 1, 1, 1, 1, 1])\n-0.381502288899\narray('b', [0, 0, 0, 0, 1, 0, 0])\n5.08887558481\narray('b', [0, 0, 1, 0, 0, 0, 1])\n-1.56279755704\narray('b', [0, 0, 1, 0, 0, 0, 0])\n-4.73581282317\narray('b', [1, 1, 1, 1, 1, 0, 1])\n-0.392075865198\narray('b', [0, 0, 1, 0, 0, 0, 0])\n-4.73581282317\narray('b', [0, 0, 0, 1, 1, 1, 1])\n-1.08237327535\narray('b', [0, 1, 1, 0, 0, 1, 0])\n-0.77703640699\narray('b', [0, 1, 1, 0, 1, 1, 1])\n-0.545074731393\narray('b', [0, 1, 1, 0, 0, 1, 0])\n-0.77703640699\narray('b', [1, 0, 0, 1, 1, 1, 0])\n-0.576871676581\narray('b', [1, 1, 1, 0, 1, 1, 0])\n-0.406663918003\narray('b', [0, 1, 1, 1, 0, 0, 0])\n-0.957054698972\narray('b', [0, 0, 1, 1, 0, 1, 0])\n-0.835376570817\narray('b', [0, 1, 1, 0, 0, 0, 1])\n-0.904010932485\narray('b', [0, 0, 1, 0, 0, 1, 0])\n-1.24164691284\narray('b', [0, 1, 1, 1, 0, 1, 0])\n-0.614121248113\narray('b', [1, 1, 1, 1, 1, 0, 0])\n-0.458687259531\narray('b', [0, 1, 1, 0, 1, 0, 0])\n-0.925119325855\narray('b', [0, 1, 1, 0, 0, 1, 1])\n-0.59276782578\narray('b', [1, 1, 0, 1, 1, 1, 1])\n-0.405606658467\narray('b', [0, 0, 1, 1, 1, 1, 0])\n-0.669113365103\narray('b', [0, 1, 0, 1, 1, 0, 0])\n-1.47523587617\narray('b', [1, 1, 1, 1, 1, 0, 1])\n-0.392075865198\narray('b', [1, 1, 0, 0, 0, 0, 1])\n-0.859448834376\narray('b', [0, 1, 0, 1, 0, 0, 1])\n-1.47751867781\narray('b', [0, 0, 1, 1, 0, 1, 0])\n-0.835376570817\narray('b', [1, 0, 1, 0, 0, 1, 1])\n-0.481328585654\narray('b', [1, 1, 0, 1, 1, 1, 1])\n-0.405606658467\narray('b', [1, 1, 0, 1, 1, 1, 0])\n-0.468188501307\narray('b', [1, 1, 0, 0, 0, 0, 0])\n-1.33668626278\narray('b', [0, 1, 1, 0, 0, 0, 1])\n-0.904010932485\narray('b', [1, 0, 0, 0, 0, 0, 1])\n-1.30562645871\narray('b', [0, 0, 0, 1, 0, 1, 0])\n-2.4395721301\narray('b', [1, 0, 0, 0, 1, 1, 0])\n-0.722750296198\narray('b', [0, 1, 0, 1, 0, 1, 0])\n-1.12860565729\narray('b', [1, 1, 1, 1, 0, 0, 0])\n-0.576369203497\narray('b', [0, 1, 0, 0, 1, 1, 0])\n-1.11225583542\narray('b', [0, 0, 1, 0, 0, 0, 1])\n-1.56279755704\narray('b', [0, 1, 1, 0, 1, 1, 1])\n-0.545074731393\narray('b', [0, 1, 0, 1, 1, 0, 0])\n-1.47523587617\narray('b', [0, 1, 0, 1, 0, 0, 1])\n-1.47751867781\narray('b', [1, 1, 1, 0, 1, 1, 0])\n-0.406663918003\narray('b', [1, 0, 0, 1, 0, 0, 0])\n-1.43053445702\narray('b', [0, 1, 1, 0, 1, 1, 0])\n-0.6113051397\narray('b', [1, 1, 1, 1, 0, 1, 0])\n-0.422696146764\narray('b', [0, 1, 0, 1, 0, 1, 1])\n-0.804967290822\narray('b', [0, 0, 0, 0, 1, 0, 0])\n5.08887558481\narray('b', [0, 1, 0, 1, 0, 0, 1])\n-1.47751867781\narray('b', [0, 1, 1, 1, 1, 1, 0])\n-0.531467715143\narray('b', [0, 1, 1, 1, 0, 1, 0])\n-0.614121248113\narray('b', [1, 1, 1, 0, 1, 0, 0])\n-0.544474412631\narray('b', [0, 1, 1, 1, 1, 0, 1])\n-0.616448526846\narray('b', [1, 1, 0, 0, 0, 1, 0])\n-0.765867600366\narray('b', [0, 1, 0, 0, 0, 0, 1])\n-2.90715013734\narray('b', [0, 1, 0, 1, 1, 0, 1])\n-1.04729702573\narray('b', [1, 0, 1, 0, 0, 1, 0])\n-0.637074681163\narray('b', [0, 0, 1, 0, 1, 0, 1])\n-1.04720243545\narray('b', [0, 1, 1, 1, 0, 1, 0])\n-0.614121248113\narray('b', [1, 0, 0, 0, 1, 1, 1])\n-0.560844047477\narray('b', [0, 1, 1, 1, 0, 0, 0])\n-0.957054698972\narray('b', [1, 1, 1, 0, 0, 0, 1])\n-0.547494046885\narray('b', [0, 0, 1, 0, 1, 0, 0])\n-1.57462431028\narray('b', [0, 1, 0, 1, 1, 1, 0])\n-0.824409551894\narray('b', [1, 1, 0, 1, 0, 1, 0])\n-0.589418260067\narray('b', [0, 1, 1, 0, 1, 0, 1])\n-0.703131952698\narray('b', [0, 1, 0, 1, 1, 1, 1])\n-0.720101352263\narray('b', [1, 0, 1, 1, 1, 0, 0])\n-0.560306044158\narray('b', [0, 0, 1, 1, 0, 1, 0])\n-0.835376570817\narray('b', [1, 1, 0, 0, 1, 0, 0])\n-0.827603198305\narray('b', [0, 1, 0, 0, 0, 1, 1])\n-1.07650403613\narray('b', [0, 0, 1, 0, 1, 1, 0])\n-0.847235134999\narray('b', [0, 1, 1, 1, 0, 0, 1])\n-0.710052613434\narray('b', [1, 1, 0, 1, 0, 1, 0])\n-0.589418260067\narray('b', [1, 1, 0, 0, 1, 1, 0])\n-0.555404399017\narray('b', [0, 1, 1, 1, 0, 1, 1])\n-0.518233400067\narray('b', [1, 1, 1, 0, 0, 1, 1])\n-0.404455501696\narray('b', [0, 0, 0, 1, 0, 1, 0])\n-2.4395721301\narray('b', [0, 0, 0, 1, 1, 0, 0])\n-4.04561096905\narray('b', [1, 0, 0, 0, 0, 0, 1])\n-1.30562645871\narray('b', [1, 1, 0, 0, 1, 1, 1])\n-0.451451802281\narray('b', [1, 1, 1, 1, 0, 0, 1])\n-0.457466623298\narray('b', [1, 1, 0, 1, 1, 0, 1])\n-0.51820476961\narray('b', [0, 1, 0, 0, 1, 0, 0])\n-2.83453206599\narray('b', [1, 0, 0, 0, 1, 1, 0])\n-0.722750296198\narray('b', [1, 1, 0, 0, 1, 0, 0])\n-0.827603198305\narray('b', [1, 1, 1, 0, 1, 1, 1])\n-0.354630090452\narray('b', [0, 0, 1, 1, 0, 0, 1])\n-1.02013729713\narray('b', [0, 0, 1, 1, 1, 0, 1])\n-0.821863068661\narray('b', [0, 1, 1, 1, 1, 0, 1])\n-0.616448526846\narray('b', [0, 1, 0, 0, 1, 0, 0])\n-2.83453206599\narray('b', [0, 1, 0, 0, 1, 1, 1])\n-0.836799389917\narray('b', [0, 0, 1, 1, 1, 0, 0])\n-1.03118719406\narray('b', [1, 0, 0, 1, 1, 1, 1])\n-0.483944319054\narray('b', [0, 0, 1, 0, 0, 0, 1])\n-1.56279755704\narray('b', [0, 0, 1, 0, 0, 0, 1])\n-1.56279755704\narray('b', [1, 0, 1, 1, 1, 1, 0])\n-0.419432080402\narray('b', [1, 1, 1, 0, 1, 0, 0])\n-0.544474412631\narray('b', [0, 1, 1, 1, 0, 0, 0])\n-0.957054698972\narray('b', [1, 1, 1, 1, 1, 1, 1])\n-0.336956177194\narray('b', [0, 0, 1, 1, 0, 1, 0])\n-0.835376570817\narray('b', [0, 1, 1, 1, 0, 1, 0])\n-0.614121248113\narray('b', [0, 0, 0, 1, 1, 0, 1])\n-2.03990832058\narray('b', [0, 0, 0, 1, 0, 1, 1])\n-1.30815298539\narray('b', [0, 1, 1, 1, 1, 1, 0])\n-0.531467715143\narray('b', [0, 0, 0, 0, 0, 1, 0])\n6.56891122129\narray('b', [0, 1, 1, 1, 0, 1, 1])\n-0.518233400067\narray('b', [1, 1, 1, 0, 1, 1, 1])\n-0.354630090452\narray('b', [1, 0, 0, 1, 1, 0, 0])\n-0.87555254035\narray('b', [0, 0, 1, 1, 1, 1, 0])\n-0.669113365103\narray('b', [1, 1, 0, 1, 1, 1, 0])\n-0.468188501307\narray('b', [0, 1, 0, 1, 1, 1, 1])\n-0.720101352263\narray('b', [0, 1, 0, 0, 0, 0, 1])\n-2.90715013734\narray('b', [0, 0, 0, 0, 0, 1, 0])\n6.56891122129\narray('b', [1, 0, 0, 1, 1, 1, 1])\n-0.483944319054\narray('b', [0, 1, 0, 1, 0, 1, 0])\n-1.12860565729\narray('b', [0, 0, 0, 1, 1, 1, 1])\n-1.08237327535\narray('b', [0, 1, 1, 0, 0, 0, 1])\n-0.904010932485\narray('b', [1, 1, 0, 1, 1, 0, 0])\n-0.647820251989\narray('b', [1, 1, 0, 0, 0, 0, 1])\n-0.859448834376\narray('b', [1, 0, 0, 1, 1, 1, 1])\n-0.483944319054\narray('b', [0, 0, 0, 1, 0, 0, 0])\n5.06787161961\narray('b', [1, 0, 0, 0, 0, 1, 1])\n-0.728519733099\narray('b', [0, 1, 1, 1, 1, 0, 0])\n-0.725761140793\ngen\tnevals\tavg \tstd \tmin \tmax \n0 \t128 \t-0.752444\t1.51845\t-4.73581\t6.56891\narray('b', [1, 0, 0, 0, 0, 1, 0])\n-1.14055064017\narray('b', [1, 1, 0, 0, 1, 1, 0])\n-0.555404399017\narray('b', [1, 1, 0, 1, 0, 1, 0])\n-0.589418260067\narray('b', [1, 1, 1, 0, 1, 1, 1])\n-0.354630090452\narray('b', [1, 0, 0, 0, 0, 1, 0])\n-1.14055064017\narray('b', [1, 0, 1, 1, 1, 1, 1])\n-0.381502288899\narray('b', [0, 1, 1, 0, 0, 1, 0])\n-0.77703640699\narray('b', [1, 1, 0, 1, 1, 1, 0])\n-0.468188501307\narray('b', [1, 0, 0, 1, 1, 0, 0])\n-0.87555254035\narray('b', [0, 0, 0, 1, 0, 0, 0])\n5.06787161961\narray('b', [1, 0, 0, 1, 1, 1, 1])\n-0.483944319054\narray('b', [0, 0, 0, 0, 0, 0, 0])\n" ], [ "pop", "_____no_output_____" ], [ "hof.items", "_____no_output_____" ], [ "import operator\n\nfrom deap import base\nfrom deap import creator\nfrom deap import gp\nfrom deap import tools\n\npset = gp.PrimitiveSet(\"MAIN\", arity=1)\npset.addPrimitive(operator.add, 2)\npset.addPrimitive(operator.sub, 2)\npset.addPrimitive(operator.mul, 2)\n\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,))\ncreator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMin,\n pset=pset)\n\ntoolbox = base.Toolbox()\ntoolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual,\n toolbox.expr)", "_____no_output_____" ], [ "### insample and out-of-sample test\ndata = pd.read_csv('../../data/hs300.csv',index_col = 'date',parse_dates = 'date')\ndata.vol = data.vol.astype(float)", "_____no_output_____" ], [ "a.", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a66fc6e1fad36e6accfc60f8249a48f472e8acd
330,823
ipynb
Jupyter Notebook
startup-profit-prediction.ipynb
join2AJ/Python_Projects
c329658c7141692b3bc81f814c5cd1fa4e425c24
[ "MIT" ]
null
null
null
startup-profit-prediction.ipynb
join2AJ/Python_Projects
c329658c7141692b3bc81f814c5cd1fa4e425c24
[ "MIT" ]
null
null
null
startup-profit-prediction.ipynb
join2AJ/Python_Projects
c329658c7141692b3bc81f814c5cd1fa4e425c24
[ "MIT" ]
null
null
null
137.900375
135,424
0.875734
[ [ [ "<img src=https://a-static.projektn.sk/2020/11/Startup.jpg>", "_____no_output_____" ], [ "# Startup Profit Prediction", "_____no_output_____" ], [ "# 1. Reading and Understanding the Data", "_____no_output_____" ] ], [ [ "#basic libraries and visualization\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#statmodels\nimport statsmodels.api as sm\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n#sklearn-dataprocessing\nfrom sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler, OneHotEncoder\nfrom sklearn.metrics import r2_score, mean_squared_error\n#sklearn-models\nfrom sklearn.linear_model import LinearRegression, Ridge\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "sns.set_theme(style='darkgrid', palette='Accent')\npd.options.display.float_format = '{:,.2f}'.format", "_____no_output_____" ] ], [ [ "**About the dataset:**\n\nThis dataset has data collected from New York, California and Florida about 50 business Startups. \nThe variables used in the dataset are Profit, R&D spending(research and development), Administration Spending, and Marketing Spending.\n", "_____no_output_____" ] ], [ [ "import os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))", "/kaggle/input/d/farhanmd29/50-startups/50_Startups.csv\n" ], [ "startups = pd.read_csv(\"/kaggle/input/d/farhanmd29/50-startups/50_Startups.csv\")\nstartups.head()", "_____no_output_____" ], [ "startups.shape", "_____no_output_____" ], [ "startups.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 50 entries, 0 to 49\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 R&D Spend 50 non-null float64\n 1 Administration 50 non-null float64\n 2 Marketing Spend 50 non-null float64\n 3 State 50 non-null object \n 4 Profit 50 non-null float64\ndtypes: float64(4), object(1)\nmemory usage: 2.1+ KB\n" ], [ "startups.isnull().sum()", "_____no_output_____" ], [ "startups.duplicated().any()", "_____no_output_____" ], [ "startups.describe()", "_____no_output_____" ] ], [ [ "# 2. EDA", "_____no_output_____" ], [ "## Checking for outliers", "_____no_output_____" ] ], [ [ "startups.columns.values", "_____no_output_____" ], [ "sns.boxplot(data=startups)", "_____no_output_____" ], [ "# We can see that in the Profit column we have an outlier. Since we have a small dataset, this could be a problem\n# in predicting the profit, hence, we are going to remove this outlier.\nQ3, Q1 = np.percentile(startups[\"Profit\"], [75 ,25])\nIQR = Q3 - Q1\nstartups = startups[~(startups.Profit< (Q1 - 1.5*IQR))]", "_____no_output_____" ] ], [ [ "## Visualising Numerical Variables", "_____no_output_____" ] ], [ [ "sns.pairplot(startups[['R&D Spend', 'Administration', 'Marketing Spend', 'Profit']], kind=\"reg\", diag_kind=\"kde\")\nplt.show()", "_____no_output_____" ] ], [ [ "**Insights:**\n\n1. We can see normal distributions of numerical variables. \n\n2. R&D Spend and Marketing Spend are in linear relation with target variable.", "_____no_output_____" ], [ "### Profit Distribution", "_____no_output_____" ] ], [ [ "sns.distplot(startups[\"Profit\"], bins=30)\nplt.show()", "_____no_output_____" ] ], [ [ "### R&D Spend vs. Profit Correlation", "_____no_output_____" ] ], [ [ "sns.jointplot(x=startups[\"Profit\"], y=startups[\"R&D Spend\"], kind=\"reg\")\nplt.show()", "_____no_output_____" ] ], [ [ "## Visualising Categorical Variables", "_____no_output_____" ] ], [ [ "g=sns.FacetGrid(data=startups, col=\"State\", height=5, aspect=0.7)\ng.map_dataframe(sns.barplot, palette=\"Accent\")\ng.set_xticklabels(rotation=45)\nplt.show()\n", "_____no_output_____" ] ], [ [ "**Insights:**\n1. Profits and Marketing Spend are higher at Florida than other states.\n2. R&D Spend and Administration are same for all of the states.", "_____no_output_____" ], [ "# 3. Data Preparation", "_____no_output_____" ] ], [ [ "startups_prepared = startups.copy()", "_____no_output_____" ] ], [ [ "## Checking for multicollinearity", "_____no_output_____" ] ], [ [ "numerical = startups_prepared.drop(columns=[\"State\", \"Profit\"])\nvif = pd.DataFrame()\nvif[\"Features\"] = numerical.columns\nvif[\"VIF\"] = [variance_inflation_factor(numerical.values, i) for i in range(numerical.shape[1])]\nvif[\"VIF\"] = round(vif[\"VIF\"], 2)\nvif = vif.sort_values(by = \"VIF\", ascending = False)\nvif", "_____no_output_____" ] ], [ [ "**Insights:**\n1. VIF scores are higher for R&D and Marketing Spend.\n2. Since Administration is not so correlated with Profit as other variables, we will consider dropping this variable, which will drive VIF factor down.\n", "_____no_output_____" ], [ "## Creating dummy variables", "_____no_output_____" ] ], [ [ "startups_prepared = pd.get_dummies(startups_prepared, drop_first=True)\nstartups_prepared.rename(columns={\"R&D Spend\":\"R&D\", \"Marketing Spend\":\"Marketing\", \n \"State_Florida\":\"Florida\", \"State_New York\":\"New York\"}, inplace=True)", "_____no_output_____" ], [ "startups_prepared.head()", "_____no_output_____" ] ], [ [ "## Defining input and target variables", "_____no_output_____" ] ], [ [ "X = startups_prepared.drop(columns=\"Profit\")\ny = startups_prepared.Profit", "_____no_output_____" ] ], [ [ "## Feature selection", "_____no_output_____" ] ], [ [ "data = f_regression(X[[\"R&D\", \"Administration\", \"Marketing\"]], y)\nf_df = pd.DataFrame(data, index=[[\"F_statistic\", \"p_value\"]], columns=X[[\"R&D\", \"Administration\", \"Marketing\"]].columns).T\nf_df", "_____no_output_____" ] ], [ [ "**Insights:**\n1. R&D and Marketing has nearly 0 p-value what implies statistical significance.\n2. On the other hand Administration seems to have no effect in predicting the Profit, as we previously seen from correlation as well.\n3. We are going to drop Administration column as it has no statistical significance in our model.", "_____no_output_____" ] ], [ [ "X = X.drop(columns=\"Administration\")", "_____no_output_____" ] ], [ [ "## Splitting the Data into Training and Testing Sets", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=6)", "_____no_output_____" ], [ "#Checking if the split has approximately equal distributions of values\nprint(f\"Train Florida: {X_train['Florida'].mean()}\")\nprint(f\"Test Florida: {X_test['Florida'].mean()}\")\nprint(f\"Train Marketing: {X_train['Marketing'].mean()}\")\nprint(f\"Test Marketing: {X_test['Marketing'].mean()}\")", "Train Florida: 0.3125\nTest Florida: 0.35294117647058826\nTrain Marketing: 235405.88625\nTest Marketing: 174887.8511764706\n" ] ], [ [ "## Scaling the Features", "_____no_output_____" ] ], [ [ "#scaling inputs\nsc_x = StandardScaler()\nX_train = sc_x.fit_transform(X_train)\nX_test = sc_x.transform(X_test)\n#scaling target variable\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train.values.reshape(-1, 1))\ny_test = sc_y.transform(y_test.values.reshape(-1, 1))", "_____no_output_____" ], [ "y_train = y_train.reshape(32)\ny_test = y_test.reshape(17)", "_____no_output_____" ] ], [ [ "# 4. Model Selection and Evaluation", "_____no_output_____" ], [ "## Multiple Linear regression", "_____no_output_____" ] ], [ [ "Rsqr_test = []\norder = range(1,4)\nfor n in order:\n pr = PolynomialFeatures(degree=n)\n X_train_poly = pr.fit_transform(X_train)\n X_test_poly = pr.fit_transform(X_test)\n lr = LinearRegression()\n lr.fit(X_train_poly, y_train)\n Rsqr_test.append(lr.score(X_test_poly, y_test))\nRsqr_test", "_____no_output_____" ], [ "plt.plot(Rsqr_test)\nplt.show()", "_____no_output_____" ] ], [ [ "## Support Vector regression", "_____no_output_____" ] ], [ [ "svr = SVR()\nsvr.fit(X_train, y_train)\nsvr.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## Decision Tree regression", "_____no_output_____" ] ], [ [ "dt = DecisionTreeRegressor()\ndt.fit(X_train, y_train)\ndt.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## Random Forest regression", "_____no_output_____" ] ], [ [ "rf = RandomForestRegressor()\nrf.fit(X_train, y_train)\nrf.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "#### The best performing model is Multiple Linear Regression.", "_____no_output_____" ] ], [ [ "lr = LinearRegression()\nlr.fit(X_train, y_train)\nr2_score = lr.score(X_test, y_test)", "_____no_output_____" ], [ "# Adjusted R-square of the model\nn = X_test.shape[0]\np = X_test.shape[1]\n\nadjusted_r2 = 1-(1-r2_score)*(n-1)/(n-p-1)\nadjusted_r2", "_____no_output_____" ] ], [ [ "# 5. Residuals analysis", "_____no_output_____" ] ], [ [ "y_test_hat = lr.predict(X_test)", "_____no_output_____" ], [ "plt.scatter(x=y_test, y=y_test_hat, alpha=0.8)\nplt.plot(y_test, y_test, color='darkgreen')\nplt.show()", "_____no_output_____" ], [ "residuals = y_test - y_test_hat", "_____no_output_____" ] ], [ [ "The errors should not follow any pattern and equally distributed.", "_____no_output_____" ] ], [ [ "plt.scatter(y=residuals, x=y_test_hat, alpha=0.8)\nplt.show()", "_____no_output_____" ], [ "lr.coef_", "_____no_output_____" ], [ "lr.intercept_", "_____no_output_____" ] ], [ [ "## Our preferred model now has an equation that looks like this:\n\n## $$ PROFIT = -3.77 + 0.96 * R&D Spend + 0.01 * Marketing Spend + (-0.01) * Florida + (-0.06) * NewYork $$", "_____no_output_____" ], [ "Note: Since we scaled the data for modeling, if we want to predict profits, we have to perform inverse_transform() on the predicted values.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a673209010efeea1950bf516e1c75008ccc92fa
92,034
ipynb
Jupyter Notebook
data/opening_wkd_data/.ipynb_checkpoints/wkd_scrape-checkpoint.ipynb
lucyrichmond/butlerfinalproject
4e6a96ede438de6ca55d3186df7ebe21445c8a1d
[ "CC-BY-3.0" ]
null
null
null
data/opening_wkd_data/.ipynb_checkpoints/wkd_scrape-checkpoint.ipynb
lucyrichmond/butlerfinalproject
4e6a96ede438de6ca55d3186df7ebe21445c8a1d
[ "CC-BY-3.0" ]
null
null
null
data/opening_wkd_data/.ipynb_checkpoints/wkd_scrape-checkpoint.ipynb
lucyrichmond/butlerfinalproject
4e6a96ede438de6ca55d3186df7ebe21445c8a1d
[ "CC-BY-3.0" ]
null
null
null
32.22479
1,308
0.342384
[ [ [ "import os\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom splinter import Browser\nimport splinter\nimport numpy as np", "_____no_output_____" ], [ "# create variables to store scraped info\nmovie_titles = []\nopening_amts = []\ntotal_gross = []\nper_of_total = []\nnum_of_theaters = []\nopen_date = []", "_____no_output_____" ], [ "# loop through pages to get data from all 1000 movies \n\npages = np.arange(0, 801, 200)\n\nfor page in pages:\n page = requests.get('https://www.boxofficemojo.com/chart/top_opening_weekend/?offset=' + str(page))\n \n # Create BeautifulSoup object; parse with 'lxml'\n soup = BeautifulSoup(page.text, 'lxml')\n \n titles = soup.find_all('td', class_ = 'a-text-left mojo-field-type-release mojo-cell-wide')\n for movie in titles:\n movie_titles.append(movie.select('a')[0].string)\n\n opening = soup.find_all('td', class_ = 'a-text-right mojo-field-type-money')\n for amt in opening:\n opening_amts.append(amt.string)\n\n total = soup.find_all('td', class_ = 'a-text-right mojo-field-type-money mojo-estimatable')\n for money in total:\n total_gross.append(money.string)\n\n percent_of_total = soup.find_all('td', class_ = 'a-text-right mojo-field-type-percent')\n for percent in percent_of_total:\n per_of_total.append(percent.string)\n\n theaters = soup.find_all('td', class_ = 'a-text-right mojo-field-type-positive_integer mojo-estimatable')\n for thr in theaters:\n num_of_theaters.append(thr.string)\n\n date = soup.find_all('td', class_ = 'a-text-left mojo-field-type-date a-nowrap')\n for day in date:\n open_date.append(day.string)", "_____no_output_____" ], [ "gross_df = pd.DataFrame({\"Gross & Average\": total_gross})\n\ngross_df", "_____no_output_____" ], [ "# strip punctuation and turn into integer\ngross_df['Gross & Average'] = gross_df['Gross & Average'].map(lambda x: x.lstrip('$')).str.replace(',', '').astype(int)\n\n\n# separate total gross from avg. per theater\navg_thr = gross_df[gross_df['Gross & Average'] < 100000].dropna().reset_index()['Gross & Average']\ntotal_grss = gross_df[gross_df['Gross & Average'] > 100000].dropna().reset_index()['Gross & Average']\n\n\n\ngross_df['Total Gross'] = total_grss\ngross_df['Average per Theater'] = avg_thr\n\ngross_df = gross_df.drop(columns=['Gross & Average']).dropna()\ngross_df['Movie Title'] = movie_titles\n\ngross_df", "_____no_output_____" ], [ "# create DataFrame\nopening_df = pd.DataFrame({\"Movie Title\": movie_titles, \"Opening\": opening_amts, \"Total Gross\": total_grss, \"% of Total\": per_of_total, \"Theaters\": num_of_theaters, \"Average per Theater\": avg_thr, \"Date\": open_date})\n#pd.options.display.float_format = '{:,}'.format\n\n#df['new_column_name'] = df['column_name'].map('{:,.2f}'.format)\n\n#opening_df['Total Gross'] = '$' + (opening_df['Total Gross'].astype(float)).map('{:,.2f}'.format).astype(str)\n#opening_df['Average per Theater'] = '$' + (opening_df['Average per Theater'].astype(float)).map('{:,.2f}'.format).astype(str)\n\nopening_df\n", "_____no_output_____" ], [ "# strip punctuation and turn into integer\nopening_df['Opening'] = opening_df['Opening'].map(lambda x: x.lstrip('$')).str.replace(',', '').astype(int)\nopening_df['Theaters'] = opening_df['Theaters'].str.replace(',', '').astype(int)\n\nopening_df", "_____no_output_____" ], [ "import datetime as dt \nopening_df['Date'] = pd.to_datetime(opening_df['Date'])\nopening_df", "_____no_output_____" ], [ "#month = opening_df['Date'].dt.month\nmonth = opening_df['Date']\n\nopening_df['Season Number'] = (month.dt.month%12 + 3)//3\nopening_df", "_____no_output_____" ], [ "year = opening_df['Date'].dt.year\nopening_df['Release Year'] = year\nopening_df", "_____no_output_____" ], [ "opening_df[\"Movie & Year\"] = opening_df[\"Movie Title\"] + \" \" + \"(\" + opening_df[\"Release Year\"].astype(str) + \")\"\nopening_df.head()", "_____no_output_____" ], [ "seasons = opening_df['Season Number'].astype(int)\n\nseason_name = []\n\nfor s in seasons:\n if (s == 1):\n season_name.append('Winter')\n elif (s == 2):\n season_name.append('Spring')\n elif (s == 3):\n season_name.append('Summer')\n elif (s == 4):\n season_name.append('Fall')\n\nseason_name", "_____no_output_____" ], [ "opening_df['Season Name'] = season_name\n\nopening_df", "_____no_output_____" ], [ "# export to csv\nopening_df.to_csv('data/opening.csv')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a673525bbab9a7e09476e40d40d8e07ae3d01d0
44,656
ipynb
Jupyter Notebook
Module 2/Ex2.3 UCB.ipynb
tycho01/Reinforcement-Learning-Explained
ec59f2c977999687adc01faeba8cadae30a5a7ba
[ "Unlicense" ]
null
null
null
Module 2/Ex2.3 UCB.ipynb
tycho01/Reinforcement-Learning-Explained
ec59f2c977999687adc01faeba8cadae30a5a7ba
[ "Unlicense" ]
null
null
null
Module 2/Ex2.3 UCB.ipynb
tycho01/Reinforcement-Learning-Explained
ec59f2c977999687adc01faeba8cadae30a5a7ba
[ "Unlicense" ]
null
null
null
169.151515
23,444
0.891571
[ [ [ "# DAT257x: Reinforcement Learning Explained\n\n## Lab 2: Bandits\n\n### Exercise 2.3: UCB", "_____no_output_____" ] ], [ [ "import numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \n\nfrom lib.envs.bandit import BanditEnv\nfrom lib.simulation import Experiment", "_____no_output_____" ], [ "#Policy interface\nclass Policy:\n #num_actions: (int) Number of arms [indexed by 0 ... num_actions-1]\n def __init__(self, num_actions):\n self.num_actions = num_actions\n \n def act(self):\n pass\n \n def feedback(self, action, reward):\n pass", "_____no_output_____" ], [ "#Greedy policy\nclass Greedy(Policy):\n def __init__(self, num_actions):\n Policy.__init__(self, num_actions)\n self.name = \"Greedy\"\n self.total_rewards = np.zeros(num_actions, dtype = np.longdouble)\n self.total_counts = np.zeros(num_actions, dtype = np.longdouble)\n \n def act(self):\n current_averages = np.divide(self.total_rewards, self.total_counts, where = self.total_counts > 0)\n current_averages[self.total_counts <= 0] = 0.5 #Correctly handles Bernoulli rewards; over-estimates otherwise\n current_action = np.argmax(current_averages)\n return current_action\n \n def feedback(self, action, reward):\n self.total_rewards[action] += reward\n self.total_counts[action] += 1", "_____no_output_____" ], [ "#Epsilon Greedy policy\nclass EpsilonGreedy(Greedy):\n def __init__(self, num_actions, epsilon):\n Greedy.__init__(self, num_actions)\n if (epsilon is None or epsilon < 0 or epsilon > 1):\n print(\"EpsilonGreedy: Invalid value of epsilon\", flush = True)\n sys.exit(0)\n \n self.epsilon = epsilon\n self.name = \"Epsilon Greedy\"\n \n def act(self):\n choice = None\n if self.epsilon == 0:\n choice = 0\n elif self.epsilon == 1:\n choice = 1\n else:\n choice = np.random.binomial(1, self.epsilon)\n \n if choice == 1:\n return np.random.choice(self.num_actions)\n else:\n current_averages = np.divide(self.total_rewards, self.total_counts, where = self.total_counts > 0)\n current_averages[self.total_counts <= 0] = 0.5 #Correctly handles Bernoulli rewards; over-estimates otherwise\n current_action = np.argmax(current_averages)\n return current_action\n ", "_____no_output_____" ] ], [ [ "Now let's implement a UCB algorithm. \n\n", "_____no_output_____" ] ], [ [ "import math\n#UCB policy\nclass UCB(Greedy):\n def __init__(self, num_actions):\n Greedy.__init__(self, num_actions)\n self.name = \"UCB\"\n self.round = 0\n \n def act(self):\n current_action = None\n self.round += 1\n if self.round <= self.num_actions:\n \"\"\"The first k rounds, where k is the number of arms/actions, play each arm/action once\"\"\"\n current_action = self.round % self.num_actions\n else:\n \"\"\"At round t, play the arms with maximum average and exploration bonus\"\"\"\n current_averages = np.divide(self.total_rewards, self.total_counts)\n f = lambda idx, avg: avg + math.sqrt(2 * math.log(self.round) / self.total_counts[idx])\n scores = np.fromiter((f(i, v) for i, v in enumerate(current_averages)), dtype = np.longdouble)\n current_action = np.argmax(scores)\n return current_action\n", "_____no_output_____" ] ], [ [ "Now let's prepare the simulation. ", "_____no_output_____" ] ], [ [ "evaluation_seed = 1239\nnum_actions = 10\ntrials = 10000\n# distribution = \"bernoulli\"\ndistribution = \"normal\"\n", "_____no_output_____" ] ], [ [ "What do you think the regret graph would look like?", "_____no_output_____" ] ], [ [ "env = BanditEnv(num_actions, distribution, evaluation_seed)\nagent = UCB(num_actions)\nexperiment = Experiment(env, agent)\nexperiment.run_bandit(trials)", "Distribution: normal (array([ 1.67210906, 0.04144904, -2.26004314, 0.55185287, -0.99557374,\n -0.2838564 , -0.50553487, -0.05963477, -0.54748047, 0.61487342]), array([0.16003572, 0.39623439, 0.70679209, 0.13345484, 0.97314585,\n 0.28711056, 0.11221114, 0.52693607, 0.53345874, 0.62434873]))\nOptimal arm: 0\n--------------------------------------------------\nPolicy: UCB \nAverage Reward: 1.6593342407230154 \nAverage Regret: 0.009883337889958318\nArm pulls: [9.935e+03 7.000e+00 1.000e+00 1.500e+01 3.000e+00 4.000e+00 4.000e+00\n 7.000e+00 4.000e+00 2.000e+01]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a675de3568b69c7e9601f7c023eb7b906aa0f0f
53,171
ipynb
Jupyter Notebook
2021_12_06_generate_data.ipynb
TwQin0403/tbrain_2021_winter_esun_credit_card_prediction
fcfd2b14913b4d5b942920506337a81c2d85ef19
[ "MIT" ]
null
null
null
2021_12_06_generate_data.ipynb
TwQin0403/tbrain_2021_winter_esun_credit_card_prediction
fcfd2b14913b4d5b942920506337a81c2d85ef19
[ "MIT" ]
null
null
null
2021_12_06_generate_data.ipynb
TwQin0403/tbrain_2021_winter_esun_credit_card_prediction
fcfd2b14913b4d5b942920506337a81c2d85ef19
[ "MIT" ]
null
null
null
29.071077
85
0.437137
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport math\n\nimport sys\n\nsys.path.insert(0, './code')\nimport dataloader\nimport plot", "_____no_output_____" ], [ "import importlib\n\nimportlib.reload(dataloader)\nimportlib.reload(plot)\n\nfeat_handler = dataloader.AmtFeatLoader()\n# time_feat_handler = dataloader.TimeFeatLoader()\nloader = dataloader.DataLoader()\nlabel_handler = dataloader.LabelLoader()", "_____no_output_____" ], [ "# test_data = loader.load_result('2021_11_03_test_data.joblib')\ndata = loader.load_data('train.csv', data_type='csv')", "_____no_output_____" ], [ "feats = []\nfor i in range(50):\n print(\"Round {}\".format(i + 1))\n profile_nums = [j + 10000000 for j in range(i * 10000, (i + 1) * 10000)]\n use_data = data[data['chid'].isin(profile_nums)].copy()\n print(use_data.shape)\n feat_handler.update_data(use_data)\n feat_handler.fit()\n feats.append(feat_handler.feats)\n # loader.save_data(feat_handler.feats,\n # '2021_12_06_profile_amt_{}.joblib'.format(i),\n # '{}_{}'.format(i * 100000, (i + 1) * 100000))", "Round 1\n(650862, 53)\n{'chid': 10000000}\n{'chid': 10000313}\n{'chid': 10000626}\n{'chid': 10000939}\n{'chid': 10001252}\n{'chid': 10001565}\n{'chid': 10001878}\n{'chid': 10002191}\n{'chid': 10002504}\n{'chid': 10002817}\n{'chid': 10003130}\n{'chid': 10003443}\n{'chid': 10003756}\n{'chid': 10004069}\n{'chid': 10004382}\n{'chid': 10004695}\n{'chid': 10005008}\n{'chid': 10005321}\n{'chid': 10005634}\n{'chid': 10005947}\n{'chid': 10006260}\n{'chid': 10006573}\n{'chid': 10006886}\n{'chid': 10007199}\n{'chid': 10007512}\n{'chid': 10007825}\n{'chid': 10008138}\n{'chid': 10008451}\n{'chid': 10008764}\n{'chid': 10009077}\n{'chid': 10009390}\n{'chid': 10009703}\nRound 2\n(662138, 53)\n{'chid': 10010000}\n{'chid': 10010313}\n{'chid': 10010626}\n{'chid': 10010939}\n{'chid': 10011252}\n{'chid': 10011565}\n{'chid': 10011878}\n{'chid': 10012191}\n{'chid': 10012504}\n{'chid': 10012817}\n{'chid': 10013130}\n{'chid': 10013443}\n{'chid': 10013756}\n{'chid': 10014069}\n{'chid': 10014382}\n{'chid': 10014695}\n{'chid': 10015008}\n{'chid': 10015321}\n{'chid': 10015634}\n{'chid': 10015947}\n{'chid': 10016260}\n{'chid': 10016573}\n{'chid': 10016886}\n{'chid': 10017199}\n{'chid': 10017512}\n{'chid': 10017825}\n{'chid': 10018138}\n{'chid': 10018451}\n{'chid': 10018764}\n{'chid': 10019077}\n{'chid': 10019390}\n{'chid': 10019703}\nRound 3\n(656647, 53)\n{'chid': 10020000}\n{'chid': 10020313}\n{'chid': 10020626}\n{'chid': 10020939}\n{'chid': 10021252}\n{'chid': 10021565}\n{'chid': 10021878}\n{'chid': 10022191}\n{'chid': 10022504}\n{'chid': 10022817}\n{'chid': 10023130}\n{'chid': 10023443}\n{'chid': 10023756}\n{'chid': 10024069}\n{'chid': 10024382}\n{'chid': 10024695}\n{'chid': 10025008}\n{'chid': 10025321}\n{'chid': 10025634}\n{'chid': 10025947}\n{'chid': 10026260}\n{'chid': 10026573}\n{'chid': 10026886}\n{'chid': 10027199}\n{'chid': 10027512}\n{'chid': 10027825}\n{'chid': 10028138}\n{'chid': 10028451}\n{'chid': 10028764}\n{'chid': 10029077}\n{'chid': 10029390}\n{'chid': 10029703}\nRound 4\n(660970, 53)\n{'chid': 10030000}\n{'chid': 10030313}\n{'chid': 10030626}\n{'chid': 10030939}\n{'chid': 10031252}\n{'chid': 10031565}\n{'chid': 10031878}\n{'chid': 10032191}\n{'chid': 10032504}\n{'chid': 10032817}\n{'chid': 10033130}\n{'chid': 10033443}\n{'chid': 10033756}\n{'chid': 10034069}\n{'chid': 10034382}\n{'chid': 10034695}\n{'chid': 10035008}\n{'chid': 10035321}\n{'chid': 10035634}\n{'chid': 10035947}\n{'chid': 10036260}\n{'chid': 10036573}\n{'chid': 10036886}\n{'chid': 10037199}\n{'chid': 10037512}\n{'chid': 10037825}\n{'chid': 10038138}\n{'chid': 10038451}\n{'chid': 10038764}\n{'chid': 10039077}\n{'chid': 10039390}\n{'chid': 10039703}\nRound 5\n(657081, 53)\n{'chid': 10040000}\n{'chid': 10040313}\n{'chid': 10040626}\n{'chid': 10040939}\n{'chid': 10041252}\n{'chid': 10041565}\n{'chid': 10041878}\n{'chid': 10042191}\n{'chid': 10042504}\n{'chid': 10042817}\n{'chid': 10043130}\n{'chid': 10043443}\n{'chid': 10043756}\n{'chid': 10044069}\n{'chid': 10044382}\n{'chid': 10044695}\n{'chid': 10045008}\n{'chid': 10045321}\n{'chid': 10045634}\n{'chid': 10045947}\n{'chid': 10046260}\n{'chid': 10046573}\n{'chid': 10046886}\n{'chid': 10047199}\n{'chid': 10047512}\n{'chid': 10047825}\n{'chid': 10048138}\n{'chid': 10048451}\n{'chid': 10048764}\n{'chid': 10049077}\n{'chid': 10049390}\n{'chid': 10049703}\nRound 6\n(653039, 53)\n{'chid': 10050000}\n{'chid': 10050313}\n{'chid': 10050626}\n{'chid': 10050939}\n{'chid': 10051252}\n{'chid': 10051565}\n{'chid': 10051878}\n{'chid': 10052191}\n{'chid': 10052504}\n{'chid': 10052817}\n{'chid': 10053130}\n{'chid': 10053443}\n{'chid': 10053756}\n{'chid': 10054069}\n{'chid': 10054382}\n{'chid': 10054695}\n{'chid': 10055008}\n{'chid': 10055321}\n{'chid': 10055634}\n{'chid': 10055947}\n{'chid': 10056260}\n{'chid': 10056573}\n{'chid': 10056886}\n{'chid': 10057199}\n{'chid': 10057512}\n{'chid': 10057825}\n{'chid': 10058138}\n{'chid': 10058451}\n{'chid': 10058764}\n{'chid': 10059077}\n{'chid': 10059390}\n{'chid': 10059703}\nRound 7\n(649114, 53)\n{'chid': 10060000}\n{'chid': 10060313}\n{'chid': 10060626}\n{'chid': 10060939}\n{'chid': 10061252}\n{'chid': 10061565}\n{'chid': 10061878}\n{'chid': 10062191}\n{'chid': 10062504}\n{'chid': 10062817}\n{'chid': 10063130}\n{'chid': 10063443}\n{'chid': 10063756}\n{'chid': 10064069}\n{'chid': 10064382}\n{'chid': 10064695}\n{'chid': 10065008}\n{'chid': 10065321}\n{'chid': 10065634}\n{'chid': 10065947}\n{'chid': 10066260}\n{'chid': 10066573}\n{'chid': 10066886}\n{'chid': 10067199}\n{'chid': 10067512}\n{'chid': 10067825}\n{'chid': 10068138}\n{'chid': 10068451}\n{'chid': 10068764}\n{'chid': 10069077}\n{'chid': 10069390}\n{'chid': 10069703}\nRound 8\n(662248, 53)\n{'chid': 10070000}\n{'chid': 10070313}\n{'chid': 10070626}\n{'chid': 10070939}\n{'chid': 10071252}\n{'chid': 10071565}\n{'chid': 10071878}\n{'chid': 10072191}\n{'chid': 10072504}\n{'chid': 10072817}\n{'chid': 10073130}\n{'chid': 10073443}\n{'chid': 10073756}\n{'chid': 10074069}\n{'chid': 10074382}\n{'chid': 10074695}\n{'chid': 10075008}\n{'chid': 10075321}\n{'chid': 10075634}\n{'chid': 10075947}\n{'chid': 10076260}\n{'chid': 10076573}\n{'chid': 10076886}\n{'chid': 10077199}\n{'chid': 10077512}\n{'chid': 10077825}\n{'chid': 10078138}\n{'chid': 10078451}\n{'chid': 10078764}\n{'chid': 10079077}\n{'chid': 10079390}\n{'chid': 10079703}\nRound 9\n(658905, 53)\n{'chid': 10080000}\n{'chid': 10080313}\n{'chid': 10080626}\n{'chid': 10080939}\n{'chid': 10081252}\n{'chid': 10081565}\n{'chid': 10081878}\n{'chid': 10082191}\n{'chid': 10082504}\n{'chid': 10082817}\n{'chid': 10083130}\n{'chid': 10083443}\n{'chid': 10083756}\n{'chid': 10084069}\n{'chid': 10084382}\n{'chid': 10084695}\n{'chid': 10085008}\n{'chid': 10085321}\n{'chid': 10085634}\n{'chid': 10085947}\n{'chid': 10086260}\n{'chid': 10086573}\n{'chid': 10086886}\n{'chid': 10087199}\n{'chid': 10087512}\n{'chid': 10087825}\n{'chid': 10088138}\n{'chid': 10088451}\n{'chid': 10088764}\n{'chid': 10089077}\n{'chid': 10089390}\n{'chid': 10089703}\nRound 10\n(655717, 53)\n{'chid': 10090000}\n{'chid': 10090313}\n{'chid': 10090626}\n{'chid': 10090939}\n{'chid': 10091252}\n{'chid': 10091565}\n{'chid': 10091878}\n{'chid': 10092191}\n{'chid': 10092504}\n{'chid': 10092817}\n{'chid': 10093130}\n{'chid': 10093443}\n{'chid': 10093756}\n{'chid': 10094069}\n{'chid': 10094382}\n{'chid': 10094695}\n{'chid': 10095008}\n{'chid': 10095321}\n{'chid': 10095634}\n{'chid': 10095947}\n{'chid': 10096260}\n{'chid': 10096573}\n{'chid': 10096886}\n{'chid': 10097199}\n{'chid': 10097512}\n{'chid': 10097825}\n{'chid': 10098138}\n{'chid': 10098451}\n{'chid': 10098764}\n{'chid': 10099077}\n{'chid': 10099390}\n{'chid': 10099703}\nRound 11\n(658147, 53)\n{'chid': 10100000}\n{'chid': 10100313}\n{'chid': 10100626}\n{'chid': 10100939}\n{'chid': 10101252}\n{'chid': 10101565}\n{'chid': 10101878}\n{'chid': 10102191}\n{'chid': 10102504}\n{'chid': 10102817}\n{'chid': 10103130}\n{'chid': 10103443}\n{'chid': 10103756}\n{'chid': 10104069}\n{'chid': 10104382}\n{'chid': 10104695}\n{'chid': 10105008}\n{'chid': 10105321}\n{'chid': 10105634}\n{'chid': 10105947}\n{'chid': 10106260}\n{'chid': 10106573}\n{'chid': 10106886}\n{'chid': 10107199}\n{'chid': 10107512}\n{'chid': 10107825}\n{'chid': 10108138}\n{'chid': 10108451}\n{'chid': 10108764}\n{'chid': 10109077}\n{'chid': 10109390}\n{'chid': 10109703}\nRound 12\n(651421, 53)\n{'chid': 10110000}\n{'chid': 10110313}\n{'chid': 10110626}\n{'chid': 10110939}\n{'chid': 10111252}\n{'chid': 10111565}\n{'chid': 10111878}\n{'chid': 10112191}\n{'chid': 10112504}\n{'chid': 10112817}\n{'chid': 10113130}\n{'chid': 10113443}\n{'chid': 10113756}\n{'chid': 10114069}\n{'chid': 10114382}\n{'chid': 10114695}\n{'chid': 10115008}\n{'chid': 10115321}\n{'chid': 10115634}\n{'chid': 10115947}\n{'chid': 10116260}\n{'chid': 10116573}\n{'chid': 10116886}\n{'chid': 10117199}\n{'chid': 10117512}\n{'chid': 10117825}\n{'chid': 10118138}\n{'chid': 10118451}\n{'chid': 10118764}\n{'chid': 10119077}\n{'chid': 10119390}\n{'chid': 10119703}\nRound 13\n(666887, 53)\n{'chid': 10120000}\n{'chid': 10120313}\n{'chid': 10120626}\n{'chid': 10120939}\n{'chid': 10121252}\n{'chid': 10121565}\n{'chid': 10121878}\n{'chid': 10122191}\n{'chid': 10122504}\n{'chid': 10122817}\n{'chid': 10123130}\n{'chid': 10123443}\n{'chid': 10123756}\n{'chid': 10124069}\n{'chid': 10124382}\n{'chid': 10124695}\n{'chid': 10125008}\n{'chid': 10125321}\n{'chid': 10125634}\n{'chid': 10125947}\n{'chid': 10126260}\n{'chid': 10126573}\n{'chid': 10126886}\n{'chid': 10127199}\n{'chid': 10127512}\n{'chid': 10127825}\n{'chid': 10128138}\n{'chid': 10128451}\n{'chid': 10128764}\n{'chid': 10129077}\n{'chid': 10129390}\n{'chid': 10129703}\nRound 14\n(666199, 53)\n{'chid': 10130000}\n{'chid': 10130313}\n{'chid': 10130626}\n{'chid': 10130939}\n{'chid': 10131252}\n{'chid': 10131565}\n{'chid': 10131878}\n{'chid': 10132191}\n{'chid': 10132504}\n{'chid': 10132817}\n{'chid': 10133130}\n{'chid': 10133443}\n{'chid': 10133756}\n{'chid': 10134069}\n{'chid': 10134382}\n{'chid': 10134695}\n{'chid': 10135008}\n{'chid': 10135321}\n{'chid': 10135634}\n{'chid': 10135947}\n{'chid': 10136260}\n{'chid': 10136573}\n{'chid': 10136886}\n{'chid': 10137199}\n{'chid': 10137512}\n{'chid': 10137825}\n{'chid': 10138138}\n{'chid': 10138451}\n{'chid': 10138764}\n{'chid': 10139077}\n{'chid': 10139390}\n{'chid': 10139703}\nRound 15\n(666683, 53)\n{'chid': 10140000}\n{'chid': 10140313}\n{'chid': 10140626}\n{'chid': 10140939}\n{'chid': 10141252}\n{'chid': 10141565}\n{'chid': 10141878}\n{'chid': 10142191}\n{'chid': 10142504}\n{'chid': 10142817}\n{'chid': 10143130}\n{'chid': 10143443}\n{'chid': 10143756}\n{'chid': 10144069}\n{'chid': 10144382}\n{'chid': 10144695}\n{'chid': 10145008}\n{'chid': 10145321}\n{'chid': 10145634}\n{'chid': 10145947}\n{'chid': 10146260}\n{'chid': 10146573}\n{'chid': 10146886}\n{'chid': 10147199}\n{'chid': 10147512}\n{'chid': 10147825}\n{'chid': 10148138}\n{'chid': 10148451}\n{'chid': 10148764}\n{'chid': 10149077}\n{'chid': 10149390}\n{'chid': 10149703}\nRound 16\n(654408, 53)\n{'chid': 10150000}\n{'chid': 10150313}\n{'chid': 10150626}\n{'chid': 10150939}\n{'chid': 10151252}\n{'chid': 10151565}\n{'chid': 10151878}\n{'chid': 10152191}\n{'chid': 10152504}\n{'chid': 10152817}\n{'chid': 10153130}\n{'chid': 10153443}\n{'chid': 10153756}\n{'chid': 10154069}\n{'chid': 10154382}\n{'chid': 10154695}\n{'chid': 10155008}\n{'chid': 10155321}\n{'chid': 10155634}\n{'chid': 10155947}\n{'chid': 10156260}\n{'chid': 10156573}\n{'chid': 10156886}\n{'chid': 10157199}\n{'chid': 10157512}\n{'chid': 10157825}\n{'chid': 10158138}\n{'chid': 10158451}\n{'chid': 10158764}\n{'chid': 10159077}\n{'chid': 10159390}\n{'chid': 10159703}\nRound 17\n(653969, 53)\n{'chid': 10160000}\n{'chid': 10160313}\n{'chid': 10160626}\n{'chid': 10160939}\n{'chid': 10161252}\n{'chid': 10161565}\n{'chid': 10161878}\n{'chid': 10162191}\n{'chid': 10162504}\n{'chid': 10162817}\n{'chid': 10163130}\n{'chid': 10163443}\n{'chid': 10163756}\n{'chid': 10164069}\n{'chid': 10164382}\n{'chid': 10164695}\n{'chid': 10165008}\n{'chid': 10165321}\n{'chid': 10165634}\n{'chid': 10165947}\n{'chid': 10166260}\n{'chid': 10166573}\n{'chid': 10166886}\n{'chid': 10167199}\n{'chid': 10167512}\n{'chid': 10167825}\n{'chid': 10168138}\n{'chid': 10168451}\n{'chid': 10168764}\n{'chid': 10169077}\n{'chid': 10169390}\n{'chid': 10169703}\nRound 18\n(660138, 53)\n{'chid': 10170000}\n{'chid': 10170313}\n{'chid': 10170626}\n{'chid': 10170939}\n{'chid': 10171252}\n{'chid': 10171565}\n{'chid': 10171878}\n{'chid': 10172191}\n{'chid': 10172504}\n{'chid': 10172817}\n{'chid': 10173130}\n{'chid': 10173443}\n{'chid': 10173756}\n{'chid': 10174069}\n{'chid': 10174382}\n{'chid': 10174695}\n{'chid': 10175008}\n{'chid': 10175321}\n{'chid': 10175634}\n{'chid': 10175947}\n{'chid': 10176260}\n{'chid': 10176573}\n{'chid': 10176886}\n{'chid': 10177199}\n{'chid': 10177512}\n{'chid': 10177825}\n{'chid': 10178138}\n{'chid': 10178451}\n{'chid': 10178764}\n{'chid': 10179077}\n{'chid': 10179390}\n{'chid': 10179703}\nRound 19\n(667952, 53)\n{'chid': 10180000}\n{'chid': 10180313}\n{'chid': 10180626}\n{'chid': 10180939}\n{'chid': 10181252}\n{'chid': 10181565}\n{'chid': 10181878}\n{'chid': 10182191}\n{'chid': 10182504}\n{'chid': 10182817}\n{'chid': 10183130}\n{'chid': 10183443}\n{'chid': 10183756}\n{'chid': 10184069}\n{'chid': 10184382}\n{'chid': 10184695}\n{'chid': 10185008}\n{'chid': 10185321}\n{'chid': 10185634}\n{'chid': 10185947}\n{'chid': 10186260}\n{'chid': 10186573}\n{'chid': 10186886}\n{'chid': 10187199}\n{'chid': 10187512}\n{'chid': 10187825}\n{'chid': 10188138}\n{'chid': 10188451}\n{'chid': 10188764}\n{'chid': 10189077}\n{'chid': 10189390}\n{'chid': 10189703}\nRound 20\n(656042, 53)\n{'chid': 10190000}\n{'chid': 10190313}\n{'chid': 10190626}\n{'chid': 10190939}\n{'chid': 10191252}\n{'chid': 10191565}\n{'chid': 10191878}\n{'chid': 10192191}\n{'chid': 10192504}\n{'chid': 10192817}\n{'chid': 10193130}\n{'chid': 10193443}\n{'chid': 10193756}\n{'chid': 10194069}\n{'chid': 10194382}\n{'chid': 10194695}\n{'chid': 10195008}\n{'chid': 10195321}\n{'chid': 10195634}\n{'chid': 10195947}\n{'chid': 10196260}\n{'chid': 10196573}\n{'chid': 10196886}\n{'chid': 10197199}\n{'chid': 10197512}\n{'chid': 10197825}\n{'chid': 10198138}\n{'chid': 10198451}\n{'chid': 10198764}\n{'chid': 10199077}\n{'chid': 10199390}\n{'chid': 10199703}\nRound 21\n(657131, 53)\n{'chid': 10200000}\n{'chid': 10200313}\n{'chid': 10200626}\n{'chid': 10200939}\n{'chid': 10201252}\n{'chid': 10201565}\n{'chid': 10201878}\n{'chid': 10202191}\n{'chid': 10202504}\n{'chid': 10202817}\n{'chid': 10203130}\n{'chid': 10203443}\n{'chid': 10203756}\n{'chid': 10204069}\n{'chid': 10204382}\n{'chid': 10204695}\n{'chid': 10205008}\n{'chid': 10205321}\n{'chid': 10205634}\n{'chid': 10205947}\n{'chid': 10206260}\n{'chid': 10206573}\n{'chid': 10206886}\n{'chid': 10207199}\n{'chid': 10207512}\n{'chid': 10207825}\n{'chid': 10208138}\n{'chid': 10208451}\n{'chid': 10208764}\n{'chid': 10209077}\n{'chid': 10209390}\n{'chid': 10209703}\nRound 22\n(660988, 53)\n{'chid': 10210000}\n{'chid': 10210313}\n{'chid': 10210626}\n{'chid': 10210939}\n{'chid': 10211252}\n{'chid': 10211565}\n{'chid': 10211878}\n{'chid': 10212191}\n{'chid': 10212504}\n{'chid': 10212817}\n{'chid': 10213130}\n{'chid': 10213443}\n{'chid': 10213756}\n{'chid': 10214069}\n{'chid': 10214382}\n{'chid': 10214695}\n{'chid': 10215008}\n{'chid': 10215321}\n{'chid': 10215634}\n{'chid': 10215947}\n{'chid': 10216260}\n{'chid': 10216573}\n{'chid': 10216886}\n{'chid': 10217199}\n{'chid': 10217512}\n{'chid': 10217825}\n{'chid': 10218138}\n{'chid': 10218451}\n{'chid': 10218764}\n{'chid': 10219077}\n{'chid': 10219390}\n{'chid': 10219703}\nRound 23\n(660426, 53)\n{'chid': 10220000}\n{'chid': 10220313}\n{'chid': 10220626}\n{'chid': 10220939}\n{'chid': 10221252}\n{'chid': 10221565}\n{'chid': 10221878}\n{'chid': 10222191}\n{'chid': 10222504}\n{'chid': 10222817}\n{'chid': 10223130}\n{'chid': 10223443}\n{'chid': 10223756}\n{'chid': 10224069}\n{'chid': 10224382}\n{'chid': 10224695}\n{'chid': 10225008}\n{'chid': 10225321}\n{'chid': 10225634}\n{'chid': 10225947}\n{'chid': 10226260}\n{'chid': 10226573}\n{'chid': 10226886}\n{'chid': 10227199}\n{'chid': 10227512}\n{'chid': 10227825}\n{'chid': 10228138}\n{'chid': 10228451}\n{'chid': 10228764}\n{'chid': 10229077}\n{'chid': 10229390}\n{'chid': 10229703}\nRound 24\n(656002, 53)\n{'chid': 10230000}\n{'chid': 10230313}\n{'chid': 10230626}\n{'chid': 10230939}\n{'chid': 10231252}\n{'chid': 10231565}\n{'chid': 10231878}\n{'chid': 10232191}\n{'chid': 10232504}\n{'chid': 10232817}\n{'chid': 10233130}\n{'chid': 10233443}\n{'chid': 10233756}\n{'chid': 10234069}\n{'chid': 10234382}\n{'chid': 10234695}\n{'chid': 10235008}\n{'chid': 10235321}\n{'chid': 10235634}\n{'chid': 10235947}\n{'chid': 10236260}\n{'chid': 10236573}\n{'chid': 10236886}\n{'chid': 10237199}\n{'chid': 10237512}\n{'chid': 10237825}\n{'chid': 10238138}\n{'chid': 10238451}\n{'chid': 10238764}\n{'chid': 10239077}\n{'chid': 10239390}\n{'chid': 10239703}\nRound 25\n(664296, 53)\n{'chid': 10240000}\n{'chid': 10240313}\n{'chid': 10240626}\n{'chid': 10240939}\n{'chid': 10241252}\n{'chid': 10241565}\n{'chid': 10241878}\n{'chid': 10242191}\n{'chid': 10242504}\n{'chid': 10242817}\n{'chid': 10243130}\n{'chid': 10243443}\n{'chid': 10243756}\n{'chid': 10244069}\n{'chid': 10244382}\n{'chid': 10244695}\n{'chid': 10245008}\n{'chid': 10245321}\n{'chid': 10245634}\n{'chid': 10245947}\n{'chid': 10246260}\n{'chid': 10246573}\n{'chid': 10246886}\n{'chid': 10247199}\n{'chid': 10247512}\n{'chid': 10247825}\n{'chid': 10248138}\n{'chid': 10248451}\n{'chid': 10248764}\n{'chid': 10249077}\n{'chid': 10249390}\n{'chid': 10249703}\nRound 26\n(657208, 53)\n{'chid': 10250000}\n{'chid': 10250313}\n{'chid': 10250626}\n{'chid': 10250939}\n{'chid': 10251252}\n{'chid': 10251565}\n{'chid': 10251878}\n{'chid': 10252191}\n{'chid': 10252504}\n{'chid': 10252817}\n{'chid': 10253130}\n{'chid': 10253443}\n{'chid': 10253756}\n{'chid': 10254069}\n{'chid': 10254382}\n{'chid': 10254695}\n{'chid': 10255008}\n{'chid': 10255321}\n{'chid': 10255634}\n{'chid': 10255947}\n{'chid': 10256260}\n{'chid': 10256573}\n{'chid': 10256886}\n{'chid': 10257199}\n{'chid': 10257512}\n{'chid': 10257825}\n{'chid': 10258138}\n{'chid': 10258451}\n{'chid': 10258764}\n{'chid': 10259077}\n{'chid': 10259390}\n{'chid': 10259703}\nRound 27\n(652902, 53)\n{'chid': 10260000}\n{'chid': 10260313}\n{'chid': 10260626}\n{'chid': 10260939}\n{'chid': 10261252}\n{'chid': 10261565}\n{'chid': 10261878}\n{'chid': 10262191}\n{'chid': 10262504}\n{'chid': 10262817}\n{'chid': 10263130}\n{'chid': 10263443}\n{'chid': 10263756}\n{'chid': 10264069}\n{'chid': 10264382}\n{'chid': 10264695}\n{'chid': 10265008}\n{'chid': 10265321}\n{'chid': 10265634}\n{'chid': 10265947}\n{'chid': 10266260}\n{'chid': 10266573}\n{'chid': 10266886}\n{'chid': 10267199}\n{'chid': 10267512}\n{'chid': 10267825}\n{'chid': 10268138}\n{'chid': 10268451}\n{'chid': 10268764}\n{'chid': 10269077}\n{'chid': 10269390}\n{'chid': 10269703}\nRound 28\n(655642, 53)\n{'chid': 10270000}\n{'chid': 10270313}\n{'chid': 10270626}\n{'chid': 10270939}\n{'chid': 10271252}\n{'chid': 10271565}\n{'chid': 10271878}\n{'chid': 10272191}\n{'chid': 10272504}\n{'chid': 10272817}\n{'chid': 10273130}\n{'chid': 10273443}\n{'chid': 10273756}\n{'chid': 10274069}\n{'chid': 10274382}\n{'chid': 10274695}\n{'chid': 10275008}\n{'chid': 10275321}\n{'chid': 10275634}\n{'chid': 10275947}\n{'chid': 10276260}\n{'chid': 10276573}\n{'chid': 10276886}\n{'chid': 10277199}\n{'chid': 10277512}\n{'chid': 10277825}\n{'chid': 10278138}\n{'chid': 10278451}\n{'chid': 10278764}\n{'chid': 10279077}\n{'chid': 10279390}\n{'chid': 10279703}\nRound 29\n(664949, 53)\n{'chid': 10280000}\n{'chid': 10280313}\n{'chid': 10280626}\n{'chid': 10280939}\n{'chid': 10281252}\n{'chid': 10281565}\n{'chid': 10281878}\n{'chid': 10282191}\n{'chid': 10282504}\n{'chid': 10282817}\n{'chid': 10283130}\n{'chid': 10283443}\n{'chid': 10283756}\n{'chid': 10284069}\n{'chid': 10284382}\n{'chid': 10284695}\n{'chid': 10285008}\n{'chid': 10285321}\n{'chid': 10285634}\n{'chid': 10285947}\n{'chid': 10286260}\n{'chid': 10286573}\n{'chid': 10286886}\n{'chid': 10287199}\n{'chid': 10287512}\n{'chid': 10287825}\n{'chid': 10288138}\n{'chid': 10288451}\n{'chid': 10288764}\n{'chid': 10289077}\n{'chid': 10289390}\n{'chid': 10289703}\nRound 30\n(657184, 53)\n{'chid': 10290000}\n{'chid': 10290313}\n{'chid': 10290626}\n{'chid': 10290939}\n{'chid': 10291252}\n{'chid': 10291565}\n{'chid': 10291878}\n{'chid': 10292191}\n{'chid': 10292504}\n{'chid': 10292817}\n{'chid': 10293130}\n{'chid': 10293443}\n{'chid': 10293756}\n{'chid': 10294069}\n{'chid': 10294382}\n{'chid': 10294695}\n{'chid': 10295008}\n{'chid': 10295321}\n{'chid': 10295634}\n{'chid': 10295947}\n{'chid': 10296260}\n{'chid': 10296573}\n{'chid': 10296886}\n{'chid': 10297199}\n{'chid': 10297512}\n{'chid': 10297825}\n{'chid': 10298138}\n{'chid': 10298451}\n{'chid': 10298764}\n{'chid': 10299077}\n{'chid': 10299390}\n{'chid': 10299703}\nRound 31\n(648797, 53)\n{'chid': 10300000}\n{'chid': 10300313}\n{'chid': 10300626}\n{'chid': 10300939}\n{'chid': 10301252}\n{'chid': 10301565}\n{'chid': 10301878}\n{'chid': 10302191}\n{'chid': 10302504}\n{'chid': 10302817}\n{'chid': 10303130}\n{'chid': 10303443}\n{'chid': 10303756}\n{'chid': 10304069}\n{'chid': 10304382}\n{'chid': 10304695}\n{'chid': 10305008}\n{'chid': 10305321}\n{'chid': 10305634}\n{'chid': 10305947}\n{'chid': 10306260}\n{'chid': 10306573}\n{'chid': 10306886}\n{'chid': 10307199}\n{'chid': 10307512}\n{'chid': 10307825}\n{'chid': 10308138}\n{'chid': 10308451}\n{'chid': 10308764}\n{'chid': 10309077}\n{'chid': 10309390}\n{'chid': 10309703}\nRound 32\n(668561, 53)\n{'chid': 10310000}\n{'chid': 10310313}\n{'chid': 10310626}\n{'chid': 10310939}\n{'chid': 10311252}\n{'chid': 10311565}\n{'chid': 10311878}\n{'chid': 10312191}\n{'chid': 10312504}\n{'chid': 10312817}\n{'chid': 10313130}\n{'chid': 10313443}\n{'chid': 10313756}\n{'chid': 10314069}\n{'chid': 10314382}\n{'chid': 10314695}\n{'chid': 10315008}\n{'chid': 10315321}\n{'chid': 10315634}\n{'chid': 10315947}\n{'chid': 10316260}\n{'chid': 10316573}\n{'chid': 10316886}\n{'chid': 10317199}\n{'chid': 10317512}\n{'chid': 10317825}\n{'chid': 10318138}\n{'chid': 10318451}\n{'chid': 10318764}\n{'chid': 10319077}\n{'chid': 10319390}\n{'chid': 10319703}\nRound 33\n(647338, 53)\n{'chid': 10320000}\n{'chid': 10320313}\n{'chid': 10320626}\n{'chid': 10320939}\n{'chid': 10321252}\n{'chid': 10321565}\n{'chid': 10321878}\n{'chid': 10322191}\n{'chid': 10322504}\n{'chid': 10322817}\n{'chid': 10323130}\n{'chid': 10323443}\n{'chid': 10323756}\n{'chid': 10324069}\n{'chid': 10324382}\n{'chid': 10324695}\n{'chid': 10325008}\n{'chid': 10325321}\n{'chid': 10325634}\n{'chid': 10325947}\n{'chid': 10326260}\n{'chid': 10326573}\n{'chid': 10326886}\n{'chid': 10327199}\n{'chid': 10327512}\n{'chid': 10327825}\n{'chid': 10328138}\n{'chid': 10328451}\n{'chid': 10328764}\n{'chid': 10329077}\n{'chid': 10329390}\n{'chid': 10329703}\nRound 34\n(664927, 53)\n{'chid': 10330000}\n{'chid': 10330313}\n{'chid': 10330626}\n{'chid': 10330939}\n{'chid': 10331252}\n{'chid': 10331565}\n{'chid': 10331878}\n{'chid': 10332191}\n{'chid': 10332504}\n{'chid': 10332817}\n{'chid': 10333130}\n{'chid': 10333443}\n{'chid': 10333756}\n{'chid': 10334069}\n{'chid': 10334382}\n{'chid': 10334695}\n{'chid': 10335008}\n{'chid': 10335321}\n{'chid': 10335634}\n{'chid': 10335947}\n{'chid': 10336260}\n{'chid': 10336573}\n{'chid': 10336886}\n{'chid': 10337199}\n{'chid': 10337512}\n{'chid': 10337825}\n{'chid': 10338138}\n{'chid': 10338451}\n{'chid': 10338764}\n{'chid': 10339077}\n{'chid': 10339390}\n{'chid': 10339703}\nRound 35\n(666430, 53)\n{'chid': 10340000}\n{'chid': 10340313}\n{'chid': 10340626}\n{'chid': 10340939}\n{'chid': 10341252}\n{'chid': 10341565}\n{'chid': 10341878}\n{'chid': 10342191}\n{'chid': 10342504}\n{'chid': 10342817}\n{'chid': 10343130}\n{'chid': 10343443}\n{'chid': 10343756}\n{'chid': 10344069}\n{'chid': 10344382}\n{'chid': 10344695}\n{'chid': 10345008}\n{'chid': 10345321}\n{'chid': 10345634}\n{'chid': 10345947}\n{'chid': 10346260}\n{'chid': 10346573}\n{'chid': 10346886}\n{'chid': 10347199}\n{'chid': 10347512}\n{'chid': 10347825}\n{'chid': 10348138}\n{'chid': 10348451}\n{'chid': 10348764}\n{'chid': 10349077}\n{'chid': 10349390}\n{'chid': 10349703}\nRound 36\n(670246, 53)\n{'chid': 10350000}\n{'chid': 10350313}\n{'chid': 10350626}\n{'chid': 10350939}\n{'chid': 10351252}\n{'chid': 10351565}\n{'chid': 10351878}\n{'chid': 10352191}\n{'chid': 10352504}\n{'chid': 10352817}\n{'chid': 10353130}\n{'chid': 10353443}\n{'chid': 10353756}\n{'chid': 10354069}\n{'chid': 10354382}\n{'chid': 10354695}\n{'chid': 10355008}\n{'chid': 10355321}\n{'chid': 10355634}\n{'chid': 10355947}\n{'chid': 10356260}\n{'chid': 10356573}\n{'chid': 10356886}\n{'chid': 10357199}\n{'chid': 10357512}\n{'chid': 10357825}\n{'chid': 10358138}\n{'chid': 10358451}\n{'chid': 10358764}\n{'chid': 10359077}\n{'chid': 10359390}\n{'chid': 10359703}\nRound 37\n(661546, 53)\n{'chid': 10360000}\n{'chid': 10360313}\n{'chid': 10360626}\n{'chid': 10360939}\n{'chid': 10361252}\n{'chid': 10361565}\n{'chid': 10361878}\n{'chid': 10362191}\n{'chid': 10362504}\n{'chid': 10362817}\n{'chid': 10363130}\n{'chid': 10363443}\n{'chid': 10363756}\n{'chid': 10364069}\n{'chid': 10364382}\n{'chid': 10364695}\n{'chid': 10365008}\n{'chid': 10365321}\n{'chid': 10365634}\n{'chid': 10365947}\n{'chid': 10366260}\n{'chid': 10366573}\n{'chid': 10366886}\n{'chid': 10367199}\n{'chid': 10367512}\n{'chid': 10367825}\n{'chid': 10368138}\n{'chid': 10368451}\n{'chid': 10368764}\n{'chid': 10369077}\n{'chid': 10369390}\n{'chid': 10369703}\nRound 38\n(663451, 53)\n{'chid': 10370000}\n{'chid': 10370313}\n{'chid': 10370626}\n{'chid': 10370939}\n{'chid': 10371252}\n{'chid': 10371565}\n{'chid': 10371878}\n{'chid': 10372191}\n{'chid': 10372504}\n{'chid': 10372817}\n{'chid': 10373130}\n{'chid': 10373443}\n{'chid': 10373756}\n{'chid': 10374069}\n{'chid': 10374382}\n{'chid': 10374695}\n{'chid': 10375008}\n{'chid': 10375321}\n{'chid': 10375634}\n{'chid': 10375947}\n{'chid': 10376260}\n{'chid': 10376573}\n{'chid': 10376886}\n{'chid': 10377199}\n{'chid': 10377512}\n{'chid': 10377825}\n{'chid': 10378138}\n{'chid': 10378451}\n{'chid': 10378764}\n{'chid': 10379077}\n{'chid': 10379390}\n{'chid': 10379703}\nRound 39\n(671730, 53)\n{'chid': 10380000}\n{'chid': 10380313}\n{'chid': 10380626}\n{'chid': 10380939}\n{'chid': 10381252}\n{'chid': 10381565}\n{'chid': 10381878}\n{'chid': 10382191}\n{'chid': 10382504}\n{'chid': 10382817}\n{'chid': 10383130}\n{'chid': 10383443}\n{'chid': 10383756}\n{'chid': 10384069}\n{'chid': 10384382}\n{'chid': 10384695}\n{'chid': 10385008}\n{'chid': 10385321}\n{'chid': 10385634}\n{'chid': 10385947}\n{'chid': 10386260}\n{'chid': 10386573}\n{'chid': 10386886}\n{'chid': 10387199}\n{'chid': 10387512}\n{'chid': 10387825}\n{'chid': 10388138}\n{'chid': 10388451}\n{'chid': 10388764}\n{'chid': 10389077}\n{'chid': 10389390}\n{'chid': 10389703}\nRound 40\n(655556, 53)\n{'chid': 10390000}\n{'chid': 10390313}\n{'chid': 10390626}\n{'chid': 10390939}\n{'chid': 10391252}\n{'chid': 10391565}\n{'chid': 10391878}\n{'chid': 10392191}\n{'chid': 10392504}\n{'chid': 10392817}\n{'chid': 10393130}\n{'chid': 10393443}\n{'chid': 10393756}\n{'chid': 10394069}\n{'chid': 10394382}\n{'chid': 10394695}\n{'chid': 10395008}\n{'chid': 10395321}\n{'chid': 10395634}\n{'chid': 10395947}\n{'chid': 10396260}\n{'chid': 10396573}\n{'chid': 10396886}\n{'chid': 10397199}\n{'chid': 10397512}\n{'chid': 10397825}\n{'chid': 10398138}\n{'chid': 10398451}\n{'chid': 10398764}\n{'chid': 10399077}\n{'chid': 10399390}\n{'chid': 10399703}\nRound 41\n(642903, 53)\n{'chid': 10400000}\n{'chid': 10400313}\n{'chid': 10400626}\n{'chid': 10400939}\n{'chid': 10401252}\n{'chid': 10401565}\n{'chid': 10401878}\n{'chid': 10402191}\n{'chid': 10402504}\n{'chid': 10402817}\n{'chid': 10403130}\n{'chid': 10403443}\n{'chid': 10403756}\n{'chid': 10404069}\n{'chid': 10404382}\n{'chid': 10404695}\n{'chid': 10405008}\n{'chid': 10405321}\n{'chid': 10405634}\n{'chid': 10405947}\n{'chid': 10406260}\n{'chid': 10406573}\n{'chid': 10406886}\n{'chid': 10407199}\n{'chid': 10407512}\n{'chid': 10407825}\n{'chid': 10408138}\n{'chid': 10408451}\n{'chid': 10408764}\n{'chid': 10409077}\n{'chid': 10409390}\n{'chid': 10409703}\nRound 42\n(670638, 53)\n{'chid': 10410000}\n{'chid': 10410313}\n{'chid': 10410626}\n{'chid': 10410939}\n{'chid': 10411252}\n{'chid': 10411565}\n{'chid': 10411878}\n{'chid': 10412191}\n{'chid': 10412504}\n{'chid': 10412817}\n{'chid': 10413130}\n{'chid': 10413443}\n{'chid': 10413756}\n{'chid': 10414069}\n{'chid': 10414382}\n{'chid': 10414695}\n{'chid': 10415008}\n{'chid': 10415321}\n{'chid': 10415634}\n{'chid': 10415947}\n{'chid': 10416260}\n{'chid': 10416573}\n{'chid': 10416886}\n{'chid': 10417199}\n{'chid': 10417512}\n{'chid': 10417825}\n{'chid': 10418138}\n{'chid': 10418451}\n{'chid': 10418764}\n{'chid': 10419077}\n{'chid': 10419390}\n{'chid': 10419703}\nRound 43\n(644007, 53)\n{'chid': 10420000}\n{'chid': 10420313}\n{'chid': 10420626}\n{'chid': 10420939}\n{'chid': 10421252}\n{'chid': 10421565}\n{'chid': 10421878}\n{'chid': 10422191}\n{'chid': 10422504}\n{'chid': 10422817}\n{'chid': 10423130}\n{'chid': 10423443}\n{'chid': 10423756}\n{'chid': 10424069}\n{'chid': 10424382}\n{'chid': 10424695}\n{'chid': 10425008}\n{'chid': 10425321}\n{'chid': 10425634}\n{'chid': 10425947}\n{'chid': 10426260}\n{'chid': 10426573}\n{'chid': 10426886}\n{'chid': 10427199}\n{'chid': 10427512}\n{'chid': 10427825}\n{'chid': 10428138}\n{'chid': 10428451}\n{'chid': 10428764}\n{'chid': 10429077}\n{'chid': 10429390}\n{'chid': 10429703}\nRound 44\n(670257, 53)\n{'chid': 10430000}\n{'chid': 10430313}\n{'chid': 10430626}\n{'chid': 10430939}\n{'chid': 10431252}\n{'chid': 10431565}\n{'chid': 10431878}\n{'chid': 10432191}\n{'chid': 10432504}\n{'chid': 10432817}\n{'chid': 10433130}\n{'chid': 10433443}\n{'chid': 10433756}\n{'chid': 10434069}\n{'chid': 10434382}\n{'chid': 10434695}\n{'chid': 10435008}\n{'chid': 10435321}\n{'chid': 10435634}\n{'chid': 10435947}\n{'chid': 10436260}\n{'chid': 10436573}\n{'chid': 10436886}\n{'chid': 10437199}\n{'chid': 10437512}\n{'chid': 10437825}\n{'chid': 10438138}\n{'chid': 10438451}\n{'chid': 10438764}\n{'chid': 10439077}\n{'chid': 10439390}\n{'chid': 10439703}\nRound 45\n(660870, 53)\n{'chid': 10440000}\n{'chid': 10440313}\n{'chid': 10440626}\n{'chid': 10440939}\n{'chid': 10441252}\n{'chid': 10441565}\n{'chid': 10441878}\n{'chid': 10442191}\n{'chid': 10442504}\n{'chid': 10442817}\n{'chid': 10443130}\n{'chid': 10443443}\n{'chid': 10443756}\n{'chid': 10444069}\n{'chid': 10444382}\n{'chid': 10444695}\n{'chid': 10445008}\n{'chid': 10445321}\n{'chid': 10445634}\n{'chid': 10445947}\n{'chid': 10446260}\n{'chid': 10446573}\n{'chid': 10446886}\n{'chid': 10447199}\n{'chid': 10447512}\n{'chid': 10447825}\n{'chid': 10448138}\n{'chid': 10448451}\n{'chid': 10448764}\n{'chid': 10449077}\n{'chid': 10449390}\n{'chid': 10449703}\nRound 46\n(661359, 53)\n{'chid': 10450000}\n{'chid': 10450313}\n{'chid': 10450626}\n{'chid': 10450939}\n{'chid': 10451252}\n{'chid': 10451565}\n{'chid': 10451878}\n{'chid': 10452191}\n{'chid': 10452504}\n{'chid': 10452817}\n{'chid': 10453130}\n{'chid': 10453443}\n{'chid': 10453756}\n{'chid': 10454069}\n{'chid': 10454382}\n{'chid': 10454695}\n{'chid': 10455008}\n{'chid': 10455321}\n{'chid': 10455634}\n{'chid': 10455947}\n{'chid': 10456260}\n{'chid': 10456573}\n{'chid': 10456886}\n{'chid': 10457199}\n{'chid': 10457512}\n{'chid': 10457825}\n{'chid': 10458138}\n{'chid': 10458451}\n{'chid': 10458764}\n{'chid': 10459077}\n{'chid': 10459390}\n{'chid': 10459703}\nRound 47\n(666126, 53)\n{'chid': 10460000}\n{'chid': 10460313}\n{'chid': 10460626}\n{'chid': 10460939}\n{'chid': 10461252}\n{'chid': 10461565}\n{'chid': 10461878}\n{'chid': 10462191}\n{'chid': 10462504}\n{'chid': 10462817}\n{'chid': 10463130}\n{'chid': 10463443}\n{'chid': 10463756}\n{'chid': 10464069}\n{'chid': 10464382}\n{'chid': 10464695}\n{'chid': 10465008}\n{'chid': 10465321}\n{'chid': 10465634}\n{'chid': 10465947}\n{'chid': 10466260}\n{'chid': 10466573}\n{'chid': 10466886}\n{'chid': 10467199}\n{'chid': 10467512}\n{'chid': 10467825}\n{'chid': 10468138}\n{'chid': 10468451}\n{'chid': 10468764}\n{'chid': 10469077}\n{'chid': 10469390}\n{'chid': 10469703}\nRound 48\n(664374, 53)\n{'chid': 10470000}\n{'chid': 10470313}\n{'chid': 10470626}\n{'chid': 10470939}\n{'chid': 10471252}\n{'chid': 10471565}\n{'chid': 10471878}\n{'chid': 10472191}\n{'chid': 10472504}\n{'chid': 10472817}\n{'chid': 10473130}\n{'chid': 10473443}\n{'chid': 10473756}\n{'chid': 10474069}\n{'chid': 10474382}\n{'chid': 10474695}\n{'chid': 10475008}\n{'chid': 10475321}\n{'chid': 10475634}\n{'chid': 10475947}\n{'chid': 10476260}\n{'chid': 10476573}\n{'chid': 10476886}\n{'chid': 10477199}\n{'chid': 10477512}\n{'chid': 10477825}\n{'chid': 10478138}\n{'chid': 10478451}\n{'chid': 10478764}\n{'chid': 10479077}\n{'chid': 10479390}\n{'chid': 10479703}\nRound 49\n(656139, 53)\n{'chid': 10480000}\n{'chid': 10480313}\n{'chid': 10480626}\n{'chid': 10480939}\n{'chid': 10481252}\n{'chid': 10481565}\n{'chid': 10481878}\n{'chid': 10482191}\n{'chid': 10482504}\n{'chid': 10482817}\n{'chid': 10483130}\n{'chid': 10483443}\n{'chid': 10483756}\n{'chid': 10484069}\n{'chid': 10484382}\n{'chid': 10484695}\n{'chid': 10485008}\n{'chid': 10485321}\n{'chid': 10485634}\n{'chid': 10485947}\n{'chid': 10486260}\n{'chid': 10486573}\n{'chid': 10486886}\n{'chid': 10487199}\n{'chid': 10487512}\n{'chid': 10487825}\n{'chid': 10488138}\n{'chid': 10488451}\n{'chid': 10488764}\n{'chid': 10489077}\n{'chid': 10489390}\n{'chid': 10489703}\nRound 50\n(665103, 53)\n{'chid': 10490000}\n{'chid': 10490313}\n{'chid': 10490626}\n{'chid': 10490939}\n{'chid': 10491252}\n{'chid': 10491565}\n{'chid': 10491878}\n{'chid': 10492191}\n{'chid': 10492504}\n{'chid': 10492817}\n{'chid': 10493130}\n{'chid': 10493443}\n{'chid': 10493756}\n{'chid': 10494069}\n{'chid': 10494382}\n{'chid': 10494695}\n{'chid': 10495008}\n{'chid': 10495321}\n{'chid': 10495634}\n{'chid': 10495947}\n{'chid': 10496260}\n{'chid': 10496573}\n{'chid': 10496886}\n{'chid': 10497199}\n{'chid': 10497512}\n{'chid': 10497825}\n{'chid': 10498138}\n{'chid': 10498451}\n{'chid': 10498764}\n{'chid': 10499077}\n{'chid': 10499390}\n{'chid': 10499703}\n" ], [ "feats = pd.concat(feats)", "_____no_output_____" ], [ "loader.save_data(feats, '2021_12_06_amt_feats.joblib', 'generate_amt_feats')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
4a675e7237308acd764f473bf8ed91b23449b916
10,595
ipynb
Jupyter Notebook
s04_consume_asset.ipynb
frankenstien-831/mantaray_jupyter
65ff4adee5f5f3ac849ec50ed8a0f1e0fd088aee
[ "Apache-2.0" ]
null
null
null
s04_consume_asset.ipynb
frankenstien-831/mantaray_jupyter
65ff4adee5f5f3ac849ec50ed8a0f1e0fd088aee
[ "Apache-2.0" ]
1
2020-03-12T00:36:42.000Z
2020-03-12T00:36:42.000Z
s04_consume_asset.ipynb
frankenstien-831/mantaray_jupyter
65ff4adee5f5f3ac849ec50ed8a0f1e0fd088aee
[ "Apache-2.0" ]
1
2020-03-12T00:33:52.000Z
2020-03-12T00:33:52.000Z
33.109375
141
0.632374
[ [ [ "\n<p><img src=\"https://oceanprotocol.com/static/media/[email protected]\" alt=\"drawing\" width=\"800\" align=\"center\"/>", "_____no_output_____" ], [ "\n<h1><center>Ocean Protocol - Manta Ray project</center></h1>\n<h3><center>Decentralized Data Science and Engineering, powered by Ocean Protocol</center></h3>\n<p>Version 0.5.3 - beta</p>\n<p>Package compatibility: squid-py v0.6.13, keeper-contracts 0.10.3, utilities 0.2.2,\n<p>Component compatibility (Nile): Brizo v0.3.12, Aquarius v0.3.4, Nile testnet smart contracts 0.10.3</p>\n\n<p><a href=\"https://github.com/oceanprotocol/mantaray\">mantaray on Github</a></p>\n<p>", "_____no_output_____" ], [ "Getting Underway - Downloading Datasets (Assets)\nTo complete the basic datascience workflow, this notebook will demonstrate how a user\ncan download an asset. Downloading an asset is a simple example of a Service Execution Agreement -\nsimilar to a contract with a series of clauses. Each clause is secured on the blockchain, allowing for trustful\nexecution of a contract.\n\nIn this notebook, an asset will be first published as before, and then ordered and downloaded.", "_____no_output_____" ], [ "### Section 0: Import modules, and setup logging", "_____no_output_____" ] ], [ [ "import logging\nimport os\nfrom squid_py import Metadata, Ocean\nimport squid_py\nimport mantaray_utilities as manta_utils\n\n# Setup logging\nfrom mantaray_utilities.user import get_account_from_config\nfrom mantaray_utilities.events import subscribe_event\nmanta_utils.logging.logger.setLevel('INFO')\nimport mantaray_utilities as manta_utils\nfrom squid_py import Config\nfrom squid_py.keeper import Keeper\nfrom pathlib import Path\nimport datetime\nimport web3\nimport asyncio", "_____no_output_____" ], [ "# path_log_file = Path.home() / '{}.log'.format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n# fh = logging.FileHandler(path_log_file)\n# fh.setLevel(logging.DEBUG)\n# manta_utils.logging.logger.addHandler(fh)", "_____no_output_____" ] ], [ [ "## Section 1: Get the configuration from the INI file", "_____no_output_____" ] ], [ [ "# Get the configuration file path for this environment\nOCEAN_CONFIG_PATH = Path(os.environ['OCEAN_CONFIG_PATH'])\nassert OCEAN_CONFIG_PATH.exists(), \"{} - path does not exist\".format(OCEAN_CONFIG_PATH)\n\n# The Market Place will be delegated to provide access to your assets, so we need the address\nMARKET_PLACE_PROVIDER_ADDRESS = os.environ['MARKET_PLACE_PROVIDER_ADDRESS']\n\nlogging.critical(\"Configuration file selected: {}\".format(OCEAN_CONFIG_PATH))\nlogging.critical(\"Deployment type: {}\".format(manta_utils.config.get_deployment_type()))\nlogging.critical(\"Squid API version: {}\".format(squid_py.__version__))\nlogging.info(\"MARKET_PLACE_PROVIDER_ADDRESS:{}\".format(MARKET_PLACE_PROVIDER_ADDRESS))", "_____no_output_____" ], [ "# Instantiate Ocean with the default configuration file.\nconfiguration = Config(OCEAN_CONFIG_PATH)\nsquid_py.ConfigProvider.set_config(configuration)\nocn = Ocean(configuration)", "_____no_output_____" ] ], [ [ "## Section 2: Delegate access of your asset to the marketplace\nWhen we publish a register a DDO to a marketplace, we assign several services and conditions on those services.\nBy default, the permission to grant access will lie with you, the publisher. As a publisher, you would need to\nrun the services component (brizo), in order to manage access to your assets.\n\nHowever, for the case of a marketplace, we will delegate permission to grant access to these services to the market\nplace on our behalf. Therefore, we will need the public address of the marketplace component. Of course, the\nconditions are defined ultimately by you, the publisher.", "_____no_output_____" ] ], [ [ "MARKET_PLACE_PROVIDER_ADDRESS = web3.Web3.toChecksumAddress(MARKET_PLACE_PROVIDER_ADDRESS)", "_____no_output_____" ] ], [ [ "## Section 3: Instantiate Ocean", "_____no_output_____" ] ], [ [ "keeper = Keeper.get_instance()", "_____no_output_____" ] ], [ [ "## Section 4: Get Publisher and register an asset for testing the download\nOf course, you can download your own asset, one that you have created, or\none that you have found via the search api. All you need is the DID of the asset.", "_____no_output_____" ] ], [ [ "publisher_account = manta_utils.user.get_account_by_index(ocn,0)\n\n# publisher_account = get_account_from_config(config_from_ini, 'parity.address', 'parity.password')\nprint(\"Publisher address: {}\".format(publisher_account.address))\nprint(\"Publisher ETH: {:0.1f}\".format(ocn.accounts.balance(publisher_account).eth/10**18))\nprint(\"Publisher OCEAN: {:0.1f}\".format(ocn.accounts.balance(publisher_account).ocn/10**18))", "_____no_output_____" ], [ "# Register an asset\nddo = ocn.assets.create(Metadata.get_example(), publisher_account, providers=[MARKET_PLACE_PROVIDER_ADDRESS])\nlogging.info(f'registered ddo: {ddo.did}')\nasset_price = int(ddo.metadata['base']['price']) / 10**18\nasset_name = ddo.metadata['base']['name']\nprint(\"Registered {} for {} OCN\".format(asset_name, asset_price))", "_____no_output_____" ] ], [ [ "## Section 5: Get Consumer account, ensure token balance", "_____no_output_____" ] ], [ [ "# consumer_account = get_account_from_config(config_from_ini, 'parity.address1', 'parity.password1')\nconsumer_account = manta_utils.user.get_account_by_index(ocn,1)\nprint(\"Consumer address: {}\".format(consumer_account.address))\nprint(\"Consumer ETH: {:0.1f}\".format(ocn.accounts.balance(consumer_account).eth/10**18))\nprint(\"Consumer OCEAN: {:0.1f}\".format(ocn.accounts.balance(consumer_account).ocn/10**18))\nassert ocn.accounts.balance(consumer_account).eth/10**18 > 1, \"Insufficient ETH in account {}\".format(consumer_account.address)\n# Ensure the consumer always has enough Ocean Token (with a margin)\nif ocn.accounts.balance(consumer_account).ocn/10**18 < asset_price + 1:\n logging.info(\"Insufficient Ocean Token balance for this asset!\".format())\n refill_amount = int(15 - ocn.accounts.balance(consumer_account).ocn/10**18)\n logging.info(\"Requesting {} tokens\".format(refill_amount))\n ocn.accounts.request_tokens(consumer_account, refill_amount)", "_____no_output_____" ] ], [ [ "## Section 6: Initiate the agreement for accessing (downloading) the asset, wait for condition events", "_____no_output_____" ] ], [ [ "agreement_id = ocn.assets.order(ddo.did, 'Access', consumer_account)\nlogging.info(\"Consumer has placed an order for asset {}\".format(ddo.did))\nlogging.info(\"The service agreement ID is {}\".format(agreement_id))", "_____no_output_____" ] ], [ [ "In Ocean Protocol, downloading an asset is enforced by a contract.\nThe contract conditions and clauses are set by the publisher. Conditions trigger events, which are monitored\nto ensure the contract is successfully executed.", "_____no_output_____" ] ], [ [ "subscribe_event(\"created agreement\", keeper, agreement_id)\nsubscribe_event(\"lock reward\", keeper, agreement_id)\nsubscribe_event(\"access secret store\", keeper, agreement_id)\nsubscribe_event(\"escrow reward\", keeper, agreement_id)", "_____no_output_____" ] ], [ [ "Now wait for all events to complete!", "_____no_output_____" ], [ "Now that the agreement is signed, the consumer can download the asset.", "_____no_output_____" ] ], [ [ "assert ocn.agreements.is_access_granted(agreement_id, ddo.did, consumer_account.address)\n# ocn.agreements.status(agreement_id)\nocn.assets.consume(agreement_id, ddo.did, 'Access', consumer_account, 'downloads_nile')\n\nlogging.info('Success buying asset.')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a676155048f92aecedfd4d47e4e6d63bea33dc1
6,770
ipynb
Jupyter Notebook
exercise-1/exercise.ipynb
mportwood/ch7-iteration
827cca3747e971135287a76da0507d8bc17f41b6
[ "MIT" ]
null
null
null
exercise-1/exercise.ipynb
mportwood/ch7-iteration
827cca3747e971135287a76da0507d8bc17f41b6
[ "MIT" ]
null
null
null
exercise-1/exercise.ipynb
mportwood/ch7-iteration
827cca3747e971135287a76da0507d8bc17f41b6
[ "MIT" ]
1
2019-01-30T03:47:04.000Z
2019-01-30T03:47:04.000Z
29.307359
386
0.536484
[ [ [ "# Exercise 1\nAdd the specified code for each code cell, running the cells _in order_.", "_____no_output_____" ], [ "Write a **`while`** loop that prints out every 5th number (multiples of 5) from 0 to 100 (inclusive).\n- _Tip:_ use an **`end=','`** keyword argument to the `print()` function to print all the numbers on the same line.", "_____no_output_____" ] ], [ [ "nums = 0\nwhile nums <= 100:\n print(nums, end = ',')\n nums = nums + 5\n", "_____no_output_____" ] ], [ [ "Use a **`while`** loop to print out the first 15 [Triangular numbers](https://en.wikipedia.org/wiki/Triangular_number). This is a sequence of numbers for which the _nth_ value is the sum of the numbers from 0 to _n_. **Do this only using addition!**\n- _Hint:_ use an additional variable to keep track of the `total` value, and have that value increase by the number of times you've been through the loop each iteration!", "_____no_output_____" ] ], [ [ "total = 0\nnums = 0\nwhile nums <= 15: \n total = total + nums\n print(total, end = ',')\n nums = nums + 1", "_____no_output_____" ] ], [ [ "_Challenge_ Use a **`while`** loop to print out 20 numbers, each of which is larger than the previous by the the _sum_ of the **two** previous numbers (the [Fibonacci sequence](https://en.wikipedia.org/wiki/Fibonacci_number).\n- _Hint_: you'll need to keep track of the two previous values (start them at 0 and 1), and then \"update\" them each time through the loop, storing the \"new total\" in the first previous variable, and the first previous variable in the second (be careful about the ordering of this!) ", "_____no_output_____" ] ], [ [ "start = 0 #Not the best names\nnext = 1\ncount = 0 \nwhile count < 20: \n print(next, end =',') #print initial 1 value. Print before calcs\n sum = start + next\n start = next\n next = sum\n count = count + 1", "_____no_output_____" ] ], [ [ "Use a **`while`** loop to print out a sequence of random numbers from 0 to 10, stopping after the number `4` is printed for the first time. You will need to import the `random` module.", "_____no_output_____" ] ], [ [ "import random \ntest = True\nwhile test: #The syntax \"while test == True\" isn't necessary here; works w/o it\n num =random.randint(0,11)\n print(num, end = ', ')\n if num == 4:\n test = False\n\n#An \"else\" statement isn't necessary here\n# else: \n# test = True\n\n ", "_____no_output_____" ] ], [ [ "Modify the below \"coin flipping\" example from the course text so that it keeps flipping coins until you get two \"heads\" in a row.", "_____no_output_____" ] ], [ [ "\n# # flip a coin until it shows up heads\n# still_flipping = True\n# while still_flipping:\n# flip = randint(0,1)\n# if flip == 0:\n# flip = \"Heads\"\n# else:\n# flip = \"Tails\"\n# print(flip, end=\", \")\n# if flip == \"Heads\":\n# still_flipping = False\n \n# flip a coin until it shows up heads twice\nimport random\nstill_flipping = True\nprevious_flip = None\nwhile still_flipping:\n flip = random.randint(0,1)\n if flip == 0:\n flip = \"Heads\"\n else:\n flip = \"Tails\"\n print(flip, end=\", \")\n \n if previous_flip == \"Heads\" and flip ==\"Heads\": \n still_flipping = False\n previous_flip = flip\n", "_____no_output_____" ] ], [ [ "Define a function **`input_number()`** that takes a minimum and maximum value as arguments. This function should prompt the user to input a number within the range, repeating the prompt if the provided value is not acceptable. Once an acceptable value has been provided, the function should return that number. You can assume that the user-entered input will always be numeric.\n\nBe sure and call your function and print its results to test it!", "_____no_output_____" ] ], [ [ "def input_number(min, max): \n valid = False\n while (not valid): \n number = int(input(\"Pick a # between \" + str(min)+ \" and \" + str(max)+ \": \"))\n if (min <= number <= max): \n valid = True\n print(\"Good Choice\") \n else: \n print(\"Invalid number!\")\n return number\n\nprint(input_number (2,10))", "Pick a # between 2 and 10: 1\nInvalid number!\n1\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a676f1fbdd833e9d13e0e5219bd2fcc17d0311d
177,885
ipynb
Jupyter Notebook
8.1.LRP.ipynb
pourmand1376/nn_interpretability
3f494ae1835baa1389097c0afc71671393e50ca2
[ "MIT" ]
41
2020-10-13T18:46:32.000Z
2022-02-21T15:52:50.000Z
8.1.LRP.ipynb
pourmand1376/nn_interpretability
3f494ae1835baa1389097c0afc71671393e50ca2
[ "MIT" ]
4
2021-07-11T12:38:03.000Z
2022-03-08T14:47:38.000Z
8.1.LRP.ipynb
pourmand1376/nn_interpretability
3f494ae1835baa1389097c0afc71671393e50ca2
[ "MIT" ]
7
2020-10-21T13:03:16.000Z
2022-03-07T11:45:00.000Z
294.511589
6,068
0.927335
[ [ [ "import numpy as np\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom nn_interpretability.interpretation.lrp.lrp_0 import LRP0\nfrom nn_interpretability.interpretation.lrp.lrp_eps import LRPEpsilon\nfrom nn_interpretability.interpretation.lrp.lrp_gamma import LRPGamma\nfrom nn_interpretability.interpretation.lrp.lrp_ab import LRPAlphaBeta\nfrom nn_interpretability.interpretation.lrp.lrp_composite import LRPMix\nfrom nn_interpretability.model.model_trainer import ModelTrainer\nfrom nn_interpretability.model.model_repository import ModelRepository\nfrom nn_interpretability.visualization.mnist_visualizer import MnistVisualizer\nfrom nn_interpretability.dataset.mnist_data_loader import MnistDataLoader", "_____no_output_____" ], [ "model_name = 'model_cnn.pt'\n\ntrain = False", "_____no_output_____" ], [ "mnist_data_loader = MnistDataLoader()", "_____no_output_____" ], [ "MnistVisualizer.show_dataset_examples(mnist_data_loader.trainloader)", "_____no_output_____" ], [ "model = ModelRepository.get_general_mnist_cnn(model_name)\n\nif train:\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.0005)\n\n model.train()\n ModelTrainer.train(model, criterion, optimizer, mnist_data_loader.trainloader)\n \n ModelRepository.save(model, model_name)", "_____no_output_____" ] ], [ [ "# I. LRP-0", "_____no_output_____" ] ], [ [ "images = []\n\nfor i in range(10):\n \n img = mnist_data_loader.get_image_for_class(i)\n \n # LRP0(model, target_class, transforms, visualize_layer) \n interpretor = LRP0(model, i, None, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n \nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ], [ [ "## Comparison between LRP gradient and LRP convolution transpose implementation \nFor **convolution layers** there is no difference, we will obtain the same numerical results with either approach. However, for **pooling layers** the result from convolution transpose approach is **4^(n)** as large as for those from gradient approach, where n is the number of pooling layers. The reason is because in every average unpooling operation, s will be unpooled directly without multiplying any scaling factor. For gradient approach, every input activation influence the output equally therefore the gradient for every activation entrices is 0.25. The operation is an analog of first unpooling and then multiplying a scale of 0.25 to s. \n\nThe gradient approach will be more reasonable to the equation described in Montavon's paper. As we treat pooling layers like convolutional layers, the scaling factor 0.25 from pooling should be considered in the steps that we multiply weights in convolutional layers (step1 and step3).", "_____no_output_____" ], [ "# II. LRP-ε", "_____no_output_____" ] ], [ [ "images = []\n\nfor i in range(10):\n img = mnist_data_loader.get_image_for_class(i)\n \n # LRPEpsilon(model, target_class, transforms, visualize_layer) \n interpretor = LRPEpsilon(model, i, None, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n \nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ], [ [ "# III. LRP- γ", "_____no_output_____" ] ], [ [ "images = []\n\nfor i in range(10):\n img = mnist_data_loader.get_image_for_class(i)\n\n # LRPGamma(model, target_class, transforms, visualize_layer) \n interpretor = LRPGamma(model, i, None, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n \nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ], [ [ "# IV. LRP-αβ", "_____no_output_____" ], [ "## 1. LPP-α1β0", "_____no_output_____" ] ], [ [ "images = []\n\nfor i in range(10):\n img = mnist_data_loader.get_image_for_class(i)\n \n # LRPAlphaBeta(model, target_class, transforms, alpha, beta, visualize_layer) \n interpretor = LRPAlphaBeta(model, i, None, 1, 0, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n \nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ], [ [ "## 2. LPP-α2β1", "_____no_output_____" ] ], [ [ "images = []\nimg_shape = (28, 28)\n\nfor i in range(10):\n img = mnist_data_loader.get_image_for_class(i)\n \n # LRPAlphaBeta(model, target_class, transforms, alpha, beta, visualize_layer) \n interpretor = LRPAlphaBeta(model, i, None, 2, 1, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n \nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ], [ [ "# IV. Composite LRP", "_____no_output_____" ] ], [ [ "images = []\nimg_shape = (28, 28)\n\nfor i in range(10):\n img = mnist_data_loader.get_image_for_class(i)\n \n # LRPMix(model, target_class, transforms, alpha, beta, visualize_layer)\n interpretor = LRPMix(model, i, None, 1, 0, 0)\n endpoint = interpretor.interpret(img)\n\n images.append(endpoint[0].detach().cpu().numpy().sum(axis=0))\n\nMnistVisualizer.display_heatmap_for_each_class(images)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6779cb98ffab9559a1e069ba86860e2ddf927c
4,497
ipynb
Jupyter Notebook
AIOpSchool/Zorg/0110_BeslissingsboomPraktisch.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
null
null
null
AIOpSchool/Zorg/0110_BeslissingsboomPraktisch.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
3
2021-09-30T11:38:24.000Z
2021-10-04T09:25:39.000Z
AIOpSchool/Zorg/0110_BeslissingsboomPraktisch.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
null
null
null
35.132813
310
0.53658
[ [ [ "<img src=\"images/bannerugentdwengo.png\" alt=\"Banner\" width=\"400\"/>", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h1>BESLISSINGSBOOM</h1> \n </font>\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-box alert-success\">\nIn deze notebook laat je Python een beslissingsboom genereren op basis van een tabel met gelabelde voorbeelden.<br>Een beslissingsboom biedt een oplossing voor een classificatieprobleem, hier in een medische context. \n</div>", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>1. Het medisch probleem</h2> \n </font>\n</div>", "_____no_output_____" ], [ "Men kan enkele parameters in rekening brengen om te proberen voorspellen of een patiënt risico loopt op een hartaanval. Van een gekende patiënt zijn bepaalde parameters terug te vinden in het patiëntendossier.<br>\nDe volgende tabel toont zo’n parameters voor zes (gekende) patiënten met de vermelding of ze al dan niet een hartaanval kregen.", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>2. De beslissingsboom</h2> \n </font>\n</div>", "_____no_output_____" ] ], [ [ "# nodige modules importeren\nimport numpy as np # om tabel te kunnen ingeven als een matrix\nimport matplotlib.pyplot as plt # om afbeelding van beslissingsboom te kunnen tonen \nfrom sklearn import tree # om beslissingsboom te generen\n\n# data\ndata = np.array(\n [[1, 1, 0, 1, 1],\n [1, 1, 1, 0, 1],\n [0, 0, 1, 0, 1],\n [0, 1, 0, 1, 0],\n [1, 0, 1, 1, 1],\n [0, 1, 1, 1, 0]])\n\n# parameters en klasse onderscheiden\ngezondheidsparameters = data[:, :4] # eerste 4 kolommen van matrix zijn beschouwde parameters\nklasse = data[:, 4] # laatste kolom zijn klasse \n\n# beslissingsboom genereren op basis van data\nbeslissingsboom = tree.DecisionTreeClassifier(criterion=\"gini\") # boom wordt aangemaakt via gini-index\nbeslissingsboom.fit(gezondheidsparameters, klasse) # boom genereren die overeenkomt met data\n\n# beslissingsboom tonen \nplt.figure(figsize=(10,10)) # tekenvenster aanmaken\ntree.plot_tree(beslissingsboom, # aangeven wat er moet getoond worden\n class_names=[\"geen risico\", \"risico\"], \n feature_names=[\"Pijn borststreek\", \"Man\", \"Rookt\", \"Beweging\"], # gezondheidsparameters: 'pijn in borststreek', 'man', 'rookt', 'voldoende beweging'\n filled=True, rounded=True)\nplt.show() # figuur tonen", "_____no_output_____" ] ], [ [ "<img src=\"images/cclic.png\" alt=\"Banner\" align=\"left\" width=\"100\"/><br><br>\nNotebook AI in de Zorg, zie <a href=\"http://www.aiopschool.be\">AI Op School</a>, van F. wyffels & N. Gesquière is in licentie gegeven volgens een <a href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a6780cfb537c3495cbe0ea7df5e2ce51beee0c6
1,578
ipynb
Jupyter Notebook
12_Sum_predictions.ipynb
VICS-CORE/stats
721d8ddc627a52a78c35ec5baa31d852e4226573
[ "MIT" ]
6
2020-05-11T14:25:50.000Z
2021-01-19T17:17:03.000Z
12_Sum_predictions.ipynb
VICS-CORE/stats
721d8ddc627a52a78c35ec5baa31d852e4226573
[ "MIT" ]
1
2021-03-30T09:55:44.000Z
2021-03-30T09:55:44.000Z
12_Sum_predictions.ipynb
VICS-CORE/stats
721d8ddc627a52a78c35ec5baa31d852e4226573
[ "MIT" ]
2
2020-05-25T12:59:04.000Z
2020-06-29T14:22:07.000Z
29.773585
128
0.484157
[ [ [ "import json\n\nwith open(\"predictions.json\") as f:\n api = json.loads(f.read())\n\napi['TT'] = {}\nfor state in api:\n if state == 'TT':\n continue\n for date in api[state]:\n api['TT'][date] = api['TT'].get(date, {'delta':{}, 'total':{}})\n for k in ['delta', 'total']:\n api['TT'][date][k]['confirmed'] = api['TT'][date][k].get('confirmed', 0) + api[state][date][k]['confirmed']\n api['TT'][date][k]['deceased'] = api['TT'][date][k].get('deceased', 0) + api[state][date][k]['deceased']\n api['TT'][date][k]['recovered'] = api['TT'][date][k].get('recovered', 0) + api[state][date][k]['recovered']\n api['TT'][date][k]['active'] = api['TT'][date][k].get('active', 0) + api[state][date][k]['active']\n\nwith open(\"predictions_tt.json\", \"w\") as f:\n f.write(json.dumps(api, sort_keys=True))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a67826a1d89a9d19c29a9daf25b1a0660ddfaae
759,782
ipynb
Jupyter Notebook
Machine_Learning/Simple_Linear_Regression/Practice/First_Analysis/SLR_1.ipynb
bnonni/Python
9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d
[ "Apache-2.0" ]
4
2019-10-05T03:41:20.000Z
2020-11-04T00:39:13.000Z
Machine_Learning/Simple_Linear_Regression/Practice/First_Analysis/SLR_1.ipynb
bnonni/Python
9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d
[ "Apache-2.0" ]
null
null
null
Machine_Learning/Simple_Linear_Regression/Practice/First_Analysis/SLR_1.ipynb
bnonni/Python
9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d
[ "Apache-2.0" ]
2
2019-10-02T14:08:51.000Z
2019-10-03T20:49:09.000Z
550.167994
210,424
0.932142
[ [ [ "# Project 1: Linear Regression Model\n\nThis is the first project of our data science fundamentals. This project is designed to solidify your understanding of the concepts we have learned in Regression and to test your knowledge on regression modelling. There are four main objectives of this project.\n\n1\\. Build Linear Regression Models \n* Use closed form solution to estimate parameters\n* Use packages of choice to estimate parameters<br>\n\n2\\. Model Performance Assessment\n* Provide an analytical rationale with choice of model\n* Visualize the Model performance\n * MSE, R-Squared, Train and Test Error <br>\n\n3\\. Model Interpretation\n\n* Intepret the results of your model\n* Intepret the model assement <br>\n \n4\\. Model Dianostics\n* Does the model meet the regression assumptions\n \n#### About this Notebook\n\n1\\. This notebook should guide you through this project and provide started code\n2\\. The dataset used is the housing dataset from Seattle homes\n3\\. Feel free to consult online resources when stuck or discuss with data science team members\n\n\nLet's get started.\n\n### Packages\n\nImporting the necessary packages for the analysis", "_____no_output_____" ] ], [ [ "# Necessary Packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Model and data preprocessing\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR\nfrom sklearn.feature_selection import RFE\nfrom sklearn import preprocessing\n\n%matplotlib inline\n", "_____no_output_____" ] ], [ [ "Now that you have imported your packages, let's read the data that we are going to be using. The dataset provided is a titled *housing_data.csv* and contains housing prices and information about the features of the houses. Below, read the data into a variable and visualize the top 8 rows of the data. ", "_____no_output_____" ] ], [ [ "# Initiliazing seed\nnp.random.seed(42)\n\ndata1 = pd.read_csv('housing_data.csv')\ndata = pd.read_csv('housing_data_2.csv')\ndata.head(8)\n", "_____no_output_____" ] ], [ [ "### Split data into train and test\n\nIn the code below, we need to split the data into the train and test for modeling and validation of our models. We will cover the Train/Validation/Test as we go along in the project. Fill the following code.\n\n1\\. Subset the features to the variable: features <br>\n2\\. Subset the target variable: target <br>\n3\\. Set the test size in proportion in to a variable: test_size <br>\n", "_____no_output_____" ] ], [ [ "features = data[['lot_area', 'firstfloor_sqft', 'living_area', 'bath', 'garage_area', 'price']]\ntarget = data['price']\ntest_size = .33\n\n\nx_train, x_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42)", "_____no_output_____" ] ], [ [ "### Data Visualization\n\nThe best way to explore the data we have is to build some plots that can help us determine the relationship of the data. We can use a scatter matrix to explore all our variables. Below is some starter code to build the scatter matrix ", "_____no_output_____" ] ], [ [ "features = pd.plotting.scatter_matrix(x_train, figsize=(14,8), alpha=1, diagonal='kde')\n#columns = pd.plotting.scatter_matrix(columns, figsize=(14,8), alpha=1, diagonal='kde')", "_____no_output_____" ] ], [ [ "Based on the scatter matrix above, write a brief description of what you observe. In thinking about the description, think about the relationship and whether linear regression is an appropriate choice for modelling this data.\n\n#### a. lot_area \nMy initial intutions tell me that lot_area would be the best indicator of price; that being said, there is a weak correlation between lot_area and the other features, which is a good sign! However, the distribution is dramatically skewed-right indicating that the mean lot_area is greater than the median. This tells me that lot_area stays around the same size while price increases. In turn, that tells me that some other feature is helping determine the price bceause if lot_area we're determining the increase in price, we'd see a linear distribution. In determining the best feature for my linear regression model, I think lot_area may be one of the least fitting to use.\n\n#### b. firstfloor_sqft\nThere is a stronger correlation between firstfloor_sqft and the other features. The distrubution is still skewed-right making the median a better measure of center. firstfloor_sqft would be a good candidate for the linear regression model becuse of the stronger correlation and wider distribution; however, there appears to be a overly strong, linear correlation between firstfloor_sqft and living_area. Given that this linear correlation goes against the Regression Assumption that \"all inputs are linearly independent,\" I would not consider using both in my model. I could, however, use one or the other. \n\n#### c. living_area\nThere is a similarly strong correlation between living_area (as compared to firstfloor_sqft) and the other features, but these plots are better distributed than firstfloor_sqft. A right skew still exists, but less so than the firstfloor_sqft. However, the observation of a strong, linear correlation between firstfloor_sqft and living_area (or living_area and firstfloor_sqft) is reinforced here. Thus, I would not use both of these in my final model and having to choose between the two, I will likely choose living_area since it appears to be more well-distributed. \n\n#### d. bath\nBaths are static numbers, so the plots are much less distributed; however, the length and the clustering of the bath to living_area & bath to garage_area may indicate a correlation. Since I cannot use both living_area and firstfloor_sqft, and I think living_area has a better distribution, I would consider using bath in conjunction with living_area. \n\n#### e. garage_area \nGarage_area appears to be well-distributed with the lowest correlation between the other features. This could make it a great fit for the final regression model. It's also the least skewed right distribution. \n\n#### Correlation Matrix\n\nIn the code below, compute the correlation matrix and write a few thoughts about the observations. In doing so, consider the interplay in the features and how their correlation may affect your modeling.\n \nThe correlation matrix below is in-line with my thought process. Lot_area has the lowest correlation between it and the other features, but it's not well distributed. firstfloor_sqft has a strong correlation between it and living_area. Given that the correlation is just over 0.5, both features may be able to be used in the model given that the correlation isn't overly strong; however, to be most accurate, I plan to leave out one of them (likely firstfloor_sqft). living_area also reflects this strong correlation between it and firstfloor_sqft. Surprisingly, there is a strong correlation between living_area and bath. Looking solely at the scatter matrix, I did not see this strong correlation. This changes my approach slighltly, which I will outline below. garage_area, again, has the lowest correlations while being the most well-distributed.\n\n#### Approach \nGiven this new correlation information, I will approach the regression model in one of the following ways:\n 1. Leave out bath as a feature and use living_area + garage_area.\n 2. Swap firstfloor_sqft for living_area and include bath + garage area.\n#### Conclusion\nI'm not 100% sure if more features are better than less in this situation; however, I am sure that I want linearly independet features. ", "_____no_output_____" ] ], [ [ "# Use pandas correlation function\nx_train.corr(method='pearson').style.format(\"{:.2}\").background_gradient(cmap=plt.get_cmap('coolwarm'), axis=1)\n", "_____no_output_____" ] ], [ [ "## 1. Build Your Model\n\nNow that we have explored the data at a high level, let's build our model. From our sessions, we have discussed both closed form solution, gradient descent and using packages. In this section you will create your own estimators. Starter code is provided to makes this easier.\n\n\n#### 1.1. Closed Form Solution\nRecall: <br>\n$$\\beta_0 = \\bar {y} - \\beta_1 \\bar{x}$$ <br>\n$$\\beta_1 = \\frac {cov(x, y)} {var(x)}$$ <br>\n\nBelow, let's define functions that will compute these parameters", "_____no_output_____" ] ], [ [ "# Pass the necessary arguments in the function to calculate the coefficients\n\ndef compute_estimators(feature, target):\n n1 = np.sum(feature*target) - np.mean(target)*np.sum(feature)\n d1 = np.sum(feature*feature) - np.mean(feature)*np.sum(feature)\n \n # Compute the Intercept and Slope\n beta1 = n1/d1\n beta0 = np.mean(target) - beta1*np.mean(feature)\n \n return beta0, beta1 # Return the Intercept and Slope\n", "_____no_output_____" ] ], [ [ "Run the compute estimators function above and display the estimated coefficients for any of the predictors/input variables.", "_____no_output_____" ] ], [ [ "# Remember to pass the correct arguments\nx_array = np.array(data1['living_area'])\nnormalized_X = preprocessing.normalize([x_array])\n\nbeta0, beta1 = compute_estimators(normalized_X, data1['price'])\nprint(beta0, beta1)\n\n#### Computing coefficients for our model by hand using the actual mathematical equations\n#y = beta1x + beta0\n#print(y)", "_____no_output_____" ] ], [ [ "#### 1.2. sklearn solution\n\nNow that we know how to compute the estimators, let's leverage the sklearn module to compute the metrics for us. We have already imported the linear model, let's initialize the model and compute the coefficients for the model with the input above.", "_____no_output_____" ] ], [ [ "# Initilize the linear Regression model here\nmodel = linear_model.LinearRegression()\n\n# Pass in the correct inputs\nmodel.fit(data1[['living_area']], data1['price'])\n\n# Print the coefficients\nprint(\"This is beta0:\", model.intercept_)\nprint(\"This is beta1:\", model.coef_) \n#### Computing coefficients for our model using the sklearn package \n", "_____no_output_____" ] ], [ [ "Do the results from the cell above and your implementation match? They should be very close to each other.\n#### Yes!! They match! \n\n### 2. Model Evaluation\n\nNow that we have estimated our single model. We are going to compute the coefficients for all the inputs. We can use a for loop for multiple model estimation. However, we need to create a few functions:\n\n1\\. Prediction function: Functions to compute the predictions <br>\n2\\. MSE: Function to compute Mean Square Error <br>", "_____no_output_____" ] ], [ [ "#Function that computes predictions of our model using the betas above + the feature data we've been using \ndef model_predictions(intercept, slope, feature):\n \"\"\" Compute Model Predictions \"\"\"\n y_hat = intercept+(slope*feature)\n \n return y_hat\n\ny_hat = model_predictions(beta0, beta1, data1['living_area'])\n\n#Function to compute MSE which determines the total loss for each predicted data point in our model\ndef mean_square_error(y_outcome, predictions):\n \"\"\" Compute the mean square error \"\"\"\n mse = (np.sum((y_outcome - predictions) ** 2))/np.size(predictions)\n \n return mse\n\nmse = mean_square_error(target, y_hat)\nprint(mse)", "8957196059.803959\n" ] ], [ [ "The last function we need is a plotting function to visualize our predictions relative to our data.\n", "_____no_output_____" ] ], [ [ "#Function used to plot the data\ndef plotting_model(feature, target, predictions, name):\n \"\"\" Create a scatter and predictions \"\"\"\n fig = plt.figure(figsize=(10,8)) \n plot_model = model.fit(feature, target)\n plt.scatter(x=feature, y=target, color='blue')\n plt.plot(feature, predictions, color='red')\n plt.xlabel(name)\n plt.ylabel('Price')\n\n return model\n\nmodel = plotting_model(data1[['living_area']], data1['price'], y_hat, data1['living_area'].name)\n", "_____no_output_____" ] ], [ [ "## Considerations/Reasoning\n\n#### Data Integrity\nAfter my inital linear model based on the feature \"living area,\" I've eliminated 8 data points. If you look at the graph above, there are 4 outliers that are clear, and at least 4 others that follow a similar trend based on the x, y relationship. I used ~3500 sqft of living area as my cutoff for being not predictive of the model, and any price above 600000. Given the way these data points skew the above model, they intuitively appear to be outliers with high leverage. I determined this by comparing these high leverag points with points similar to it in someway and determined whether it was an outlier (i.e. if point A's price was abnormally high, I found a point (B) with living area at or close to point A's living area and compared the price. vice versa if living area was abnormally high).\n\n#### Inital Feature Analysis - \"Best\" Feature (a priori)\nLiving area is the best metric to use to train the linear model because it incorporates multiple of the other features within it: first floor living space & bath. Living area has a high correlation with both first floor sq ft (0.53) and baths (0.63). Based on the other correlations, these are the two highest, and thus should immediately be eliminated. Additionally, based on initial intuition, one would assume that an increase in the metric \"firstfloor sqft\" will lead to an increase in the \"living area\" metric; if both firstfloor sqft and overall living area are increased, the \"bath\" metric will likely also increase to accommodate the additional living area/sqft in a home. Thus, I will not need to use them in my model because these can be accurately represented by the feature \"living area.\"\n", "_____no_output_____" ], [ "### Single Feature Assessment", "_____no_output_____" ] ], [ [ "#Running each feature through to determine which has best linear fit\nfeatures = data[['living_area', 'garage_area', 'lot_area', 'firstfloor_sqft', 'bath']]\ncount = 0\n\nfor feature in features:\n feature = features.iloc[:, count]\n # Compute the Coefficients\n beta0, beta1 = compute_estimators(feature, target)\n count+=1\n \n # Print the Intercept and Slope\n print(feature.name)\n print('beta0:', beta0)\n print('beta1:', beta1)\n\n # Compute the Train and Test Predictions\n y_hat = model_predictions(beta0, beta1, feature)\n\n # Plot the Model Scatter \n name = feature.name\n model = plotting_model(feature.values.reshape(-1, 1), target, y_hat, name)\n \n # Compute the MSE\n mse = mean_square_error(target, y_hat)\n print('mean squared error:', mse)\n print()\n ", "living_area\nbeta0: 12908.1284411604\nbeta1: 110.89225146416194\nmean squared error: 2815789647.7664313\n\ngarage_area\nbeta0: 71321.26360425558\nbeta1: 230.2030162829738\nmean squared error: 3466639234.8407283\n\nlot_area\nbeta0: 127368.85540055121\nbeta1: 5.232206926890248\nmean squared error: 5049292305.478467\n\nfirstfloor_sqft\nbeta0: 30417.01964454449\nbeta1: 129.30518669452894\nmean squared error: 3582314928.964111\n\nbath\nbeta0: 58167.54993139299\nbeta1: 77960.26632816301\nmean squared error: 4026327007.394888\n\n" ] ], [ [ "#### Analysis of Feature Linear Models\n\nAfter eliminating these 8 data points, MSE for Living Area drop significantly from 8957196059.803959 to 2815789647.7664313. In fact, Living Area has the lowest MSE 2815789647.7664313 of all the individual models, and the best linear fit.\n\nGarage Area is the next lowest MSE 3466639234.8407283, and the model is mostly linear; however, the bottom left of the model is concerning. You'll notice that a large number of data points go vertically upward indicating an increase in price with 0 garage area. That says to me that garage area isn't predicting the price of these homes, which indicates that it may be a good feature to use in conjunction with another feature (i.e. Living Area) or since those data points do not fit in with the rest of the population, they may need to be removed.", "_____no_output_____" ], [ "#### Run Model Assessment\n\nNow that we have our functions ready, we can build individual models, compute preductions, plot our model results and determine our MSE. Notice that we compute our MSE on the test set and not the train set\n\n", "_____no_output_____" ], [ "### Dot Product (multiple feature) Assessment", "_____no_output_____" ] ], [ [ "#Models Living Area alone and compares it to the Dot Product of Living Area with each other feature\n##Determining if a MLR would be a better way to visualize the data\nfeatures = data[['living_area', 'garage_area', 'lot_area', 'firstfloor_sqft', 'bath']]\ncount = 0\n\nfor feature in features:\n feature = features.iloc[:, count]\n #print(feature.head(0))\n if feature.name == 'living_area':\n x = data['living_area']\n else:\n x = feature * data['living_area']\n # Compute the Coefficients\n beta0, beta1 = compute_estimators(x, target)\n \n # Print the Intercept and Slope\n if feature.name == 'living_area':\n print('living_area')\n print('beta0:', beta0)\n print('beta1:', beta1)\n else: \n print(feature.name, \"* living_area\") \n print('beta0:', beta0)\n print('beta1:', beta1)\n\n # Compute the Train and Test Predictions\n y_hat = model_predictions(beta0, beta1, x)\n\n # Plot the Model Scatter \n if feature.name == 'living_area':\n name = 'living_area'\n else:\n name = feature.name + \" \" + \"* living_area\" \n model = plotting_model(x.values.reshape(-1, 1), target, y_hat, name)\n \n # Compute the MSE\n mse = mean_square_error(target, y_hat)\n print('mean squared error:', mse)\n print()\n count+=1\n ", "living_area\nbeta0: 12908.1284411604\nbeta1: 110.89225146416194\nmean squared error: 2815789647.7664313\n\ngarage_area * living_area\nbeta0: 88675.9456927962\nbeta1: 0.12034374773425742\nmean squared error: 2064321922.99655\n\nlot_area * living_area\nbeta0: 123251.8606427904\nbeta1: 0.0035276987010394866\nmean squared error: 3802170878.8921647\n\nfirstfloor_sqft * living_area\nbeta0: 81727.47465317308\nbeta1: 0.053467447469143566\nmean squared error: 2456865745.3672667\n\nbath * living_area\nbeta0: 91196.33728151293\nbeta1: 35.14797065578143\nmean squared error: 2904899481.7202654\n\n" ] ], [ [ "## Analysis\n\nBased on the models, it appears that two of the dot products provide a more accurate model:\n \n 1. Living Area * First Floor SqFt\n 2. Living Area * Garage Area \n \nThese two dot products provide a lower MSE and thus lowers the loss per prediction point.\n\n#1.\nMy intuition says that since Living Area, as a feature, will include First Floor SqFt in its data. The FirstFloor SqFt can be captured by Living Area, so it can be left out. Additionally, since one is included within the other, we cannot say anything in particular about Living Area or FirstFloor SqFt individually. Also, the correlation (Ln 24 & Out 24) between Living Area and FirstFloor SqFt is 0.53, which is the highest apart from Bath. This correlation is low in comparison to the \"standard;\" however, that standard is arbitrary. I've lowered it to be in context with data sets I'm working with in this notebook.\n\n#2.\nThe dot product of Living Area & Garage Area provides doesn't allow us to make a statement about each individually, unless we provide a model of each, which I will do below. This dot product is a better model. Garage Area is advertised as 'bonus' space and CANNOT be included in the overall square footage of the home (i.e. living area). Thus, garage area vector will not be included as an implication within the living area vector making them linearly independent. \n\nGarage Area can be a sought after feature depending on a buyer's desired lifestlye; more garage space would be sought after by buyers with more cars, which allows us to draw a couple possible inferences about the buyers:\n\n 1. enough net worth/monthly to make payments on multiple vehicles plus make payments on a house/garage\n 2. enough disposable income to outright buy multiple vehicles plus make payments on a house/garage\n\nAdditionally, it stands to reason that garage area would scale with living area for pragmatic reasons (more living area implies more people and potentially more vehicles) and for aesthetic reasons (more living area makes home look larger and would need larger garage).\n\nHomes with more living area and garage area may be sought after by buyers with the ability to spend more on a home, and thus the market would bear a higher price for those homes, which helps explain why living area * garage area is a better indicator of home price.\n\n#### Conclusion\nCombining living area with other features lowered the MSE for each. The lowest MSE is living area * garage area, which confirms my hypothesis: Living Area is the best feature to predict price, and garage area is good when used in conjunction. ", "_____no_output_____" ] ], [ [ "#Modeling Living Area & Garage Area separately.\nfeatures = data[['living_area', 'garage_area']]\ncount = 0\nfor feature in features:\n feature = features.iloc[:, count]\n if feature.name == 'living_area':\n x = data['living_area']\n elif feature.name == 'garage_area':\n x = data['garage_area']\n \n beta0, beta1 = compute_estimators(x, target)\n count+=1\n \n if feature.name == 'living_area':\n print('living_area')\n print('beta0:', beta0)\n print('beta1:', beta1)\n elif feature.name == 'garage_area':\n print('garage_area')\n print('beta0:', beta0)\n print('beta1:', beta1)\n \n y_hat = model_predictions(beta0, beta1, x)\n \n if feature.name == 'living_area':\n name = 'living_area'\n elif feature.name == 'garage_area':\n name = 'garage_area'\n model = plotting_model(x.values.reshape(-1, 1), target, y_hat, name)\n \n mse = mean_square_error(target, y_hat)\n print('mean squared error:', mse)\n print()", "living_area\nbeta0: 12908.1284411604\nbeta1: 110.89225146416194\nmean squared error: 2815789647.7664313\n\ngarage_area\nbeta0: 71321.26360425558\nbeta1: 230.2030162829738\nmean squared error: 3466639234.8407283\n\n" ], [ "#Modeling dot product of Living Area * Garage Area \nfeatures = data[['living_area']]\nx = features.iloc[:, 0]\nx2 = x * data['garage_area']\n#x3 = x2 * data['bath']\n\n# Compute the Coefficients\nbeta0, beta1 = compute_estimators(x2, target)\n\n# Print the Intercept and Slope\nprint('Name: garage_area * living_area')\nprint('beta0:', beta0)\nprint('beta1:', beta1)\n\n# Compute the Train and Test Predictions\ny_hat_1 = model_predictions(beta0, beta1, x2)\n\n# Plot the Model Scatter \nname = 'garage_area * living_area'\nmodel = plotting_model(x2.values.reshape(-1, 1), target, y_hat_1, name)\n\n# Compute the MSE\nmse = mean_square_error(target, y_hat_1)\nprint('mean squared error:', mse)\nprint()\n", "Name: garage_area * living_area\nbeta0: 88675.9456927962\nbeta1: 0.12034374773425742\nmean squared error: 2064321922.99655\n\n" ] ], [ [ "## Reasoning\nAbove, I modeled both living area and garage area by themselves then the dot product of Living Area * Garage Area to highlight the MSE of each vs. the MSE of the dot product. Garage Area, much more so than Living Area, has a high MSE indicating that on its own, Garage Area isn't the best predictor of a home's price; we must take the data in context with reality, and intuitively speaking, one wouldn't assume that the garage area, on its own, would be a feature indicative of price. \n\nThis fact combined with the assumption/implication that garage may scale with living area implies some correlation between the features, which would go against the linear assumption of feature independence. As a matter of fact, there is a correlation between them (Ln 24 & Out 24) of 0.44; however, this isn't problematic for two reasons:\n\n 1. 0.44 is quite low in regard to typical correlation standards.\n 2. Data must be seen in context. \n \n#1.\nAlthough I eliminated First Floor SqFt due, in part, to a high correlation and that correclation is only 0.09 points lower. The main reason why First Floor SqFt is eliminated is due to its inclusion within the living area vector. Additionally, the main reason why I'm including garage area is because it is not included with the living area vector.\n\n#2.\nSimilar to my #1 explanation, knowing that garage area is 'bonus space' and, as such, is NOT included in a home's advertised square feet indicates that it isn't within the Living Area data set in the same way FF SqFt or Baths would be. It will most likely to scale with the living area independently of the living area making it a good fit for a MLR.", "_____no_output_____" ], [ "### 3. Model Interpretation\n\nNow that you have calculated all the individual models in the dataset, provide an analytics rationale for which model has performed best. To provide some additional assessment metrics, let's create a function to compute the R-Squared.\n\n#### Mathematically:\n\n$$R^2 = \\frac {SS_{Regression}}{SS_{Total}} = 1 - \\frac {SS_{Error}}{SS_{Total}}$$<br>\n\nwhere:<br>\n$SS_{Regression} = \\sum (\\widehat {y_i} - \\bar {y_i})^2$<br>\n$SS_{Total} = \\sum ({y_i} - \\bar {y_i})^2$<br>\n$SS_{Error} = \\sum ({y_i} - \\widehat {y_i})^2$\n\n\n", "_____no_output_____" ] ], [ [ "#ssr = sum of squares of regression --> variance of prediction from the mean\n#sst = sum of squares total --> variance of the actuals from the prediction\n#sse = sume of squares error --> variance of the atuals from the mean\ndef r_squared(y_outcome, predictions):\n \"\"\" Compute the R Squared \"\"\"\n ssr = np.sum((predictions - np.mean(y_outcome))**2)\n sst = np.sum((y_outcome - np.mean(y_outcome))**2)\n sse = np.sum((y_outcome - predictions)**2)\n \n# print(sse, \"/\", sst)\n print(\"1 - SSE/SST =\", round((1 - (sse/sst))*100), \"%\")\n \n rss = (ssr/sst) * 100\n \n return rss", "_____no_output_____" ] ], [ [ "Now that you we have R Squared calculated, evaluate the R Squared for the test group across all models and determine what model explains the data best. ", "_____no_output_____" ] ], [ [ "rss = r_squared(target, y_hat_1)\nprint(\"R-Squared =\", round(rss), \"%\")\ncount += 1", "1 - SSE/SST = 65.0 %\nR-Squared = 65.0 %\n" ] ], [ [ "### R-Squared Adjusted\n\n$R^2-adjusted = 1 - \\frac {(1-R^2)(n-1)}{n-k-1}$", "_____no_output_____" ] ], [ [ "def r_squared_adjusted(rss, sample_size, regressors):\n n = np.size(sample_size)\n k = regressors\n numerator = (1-rss)*(n)\n denominator = n-k-1\n rssAdj = 1 - (numerator / denominator)\n \n return rssAdj\n\nrssAdj = r_squared_adjusted(rss, y_hat_1, 2)", "_____no_output_____" ], [ "print(round(rssAdj), \"%\")", "65.0 %\n" ] ], [ [ "### 4. Model Diagnostics\n\nLinear regressions depends on meetings assumption in the model. While we have not yet talked about the assumptions, you goal is to research and develop an intuitive understanding of why the assumptions make sense. We will walk through this portion on Multiple Linear Regression Project", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a678ee3dd2932efd24de08239122bd337939bec
590
ipynb
Jupyter Notebook
Untitled.ipynb
girishlal/udacityproject
79677f8860974a3af6c807b742cca97815654c3f
[ "MIT" ]
null
null
null
Untitled.ipynb
girishlal/udacityproject
79677f8860974a3af6c807b742cca97815654c3f
[ "MIT" ]
null
null
null
Untitled.ipynb
girishlal/udacityproject
79677f8860974a3af6c807b742cca97815654c3f
[ "MIT" ]
null
null
null
17.878788
51
0.538983
[]
[]
[]
4a67a792bf01cbc24a421163ec567470663917a5
656,194
ipynb
Jupyter Notebook
tutorials/c1qbx/part8_thrust_qd.ipynb
tbenthompson/BIE_book
dcbbd7f0777ebf4a35d70737643e67138d9d684b
[ "MIT" ]
1
2021-06-18T18:02:55.000Z
2021-06-18T18:02:55.000Z
tutorials/c1qbx/part8_thrust_qd.ipynb
tbenthompson/BIE_notebooks
dcbbd7f0777ebf4a35d70737643e67138d9d684b
[ "MIT" ]
null
null
null
tutorials/c1qbx/part8_thrust_qd.ipynb
tbenthompson/BIE_notebooks
dcbbd7f0777ebf4a35d70737643e67138d9d684b
[ "MIT" ]
1
2021-07-14T19:47:00.000Z
2021-07-14T19:47:00.000Z
635.231365
284,330
0.94341
[ [ [ "[SCEC BP3-QD](https://strike.scec.org/cvws/seas/download/SEAS_BP3.pdf) document is here.", "_____no_output_____" ], [ "# [DRAFT] Quasidynamic thrust fault earthquake cycles (plane strain)\n\n## Summary\n\n* Most of the code here follows almost exactly from [the previous section on strike-slip/antiplane earthquake cycles](c1qbx/part6_qd).\n* Since the fault motion is in the same plane as the fault normal vectors, we are no longer operating in an antiplane approximation. Instead, we use plane strain elasticity, a different 2D reduction of full 3D elasticity.\n* One key difference is the vector nature of the displacement and the tensor nature of the stress. We must always make sure we are dealing with tractions on the correct surface. \n* We construct a mesh, build our discrete boundary integral operators, step through time and then compare against other benchmark participants' results. \n\nDoes this section need detailed explanation or is it best left as lonely code? Most of the explanation would be redundant with the antiplane QD document.", "_____no_output_____" ] ], [ [ "from tectosaur2.nb_config import setup\n\nsetup()", "_____no_output_____" ], [ "import sympy as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tectosaur2 import gauss_rule, refine_surfaces, integrate_term, panelize_symbolic_surface\nfrom tectosaur2.elastic2d import elastic_t, elastic_h\nfrom tectosaur2.rate_state import MaterialProps, qd_equation, solve_friction, aging_law", "_____no_output_____" ], [ "surf_half_L = 1000000\nfault_length = 40000\nmax_panel_length = 400\nn_fault = 400\nmu = shear_modulus = 3.2e10\nnu = 0.25\n\nquad_rule = gauss_rule(6)\nsp_t = sp.var(\"t\")\n\nangle_rad = sp.pi / 6\nsp_x = (sp_t + 1) / 2 * sp.cos(angle_rad) * fault_length\nsp_y = -(sp_t + 1) / 2 * sp.sin(angle_rad) * fault_length\nfault = panelize_symbolic_surface(\n sp_t, sp_x, sp_y,\n quad_rule,\n n_panels=n_fault\n)\n\nfree = refine_surfaces(\n [\n (sp_t, -sp_t * surf_half_L, 0 * sp_t) # free surface\n ],\n quad_rule,\n control_points = [\n # nearfield surface panels and fault panels will be limited to 200m\n # at 200m per panel, we have ~40m per solution node because the panels\n # have 5 nodes each\n (0, 0, 1.5 * fault_length, max_panel_length),\n (0, 0, 0.2 * fault_length, 1.5 * fault_length / (n_fault)),\n # farfield panels will be limited to 200000 m per panel at most\n (0, 0, surf_half_L, 50000),\n ]\n)\nprint(\n f\"The free surface mesh has {free.n_panels} panels with a total of {free.n_pts} points.\"\n)\nprint(\n f\"The fault mesh has {fault.n_panels} panels with a total of {fault.n_pts} points.\"\n)", "The free surface mesh has 646 panels with a total of 3876 points.\nThe fault mesh has 400 panels with a total of 2400 points.\n" ], [ "plt.plot(free.pts[:,0]/1000, free.pts[:,1]/1000, 'k-o')\nplt.plot(fault.pts[:,0]/1000, fault.pts[:,1]/1000, 'r-o')\nplt.xlabel(r'$x ~ \\mathrm{(km)}$')\nplt.ylabel(r'$y ~ \\mathrm{(km)}$')\nplt.axis('scaled')\nplt.xlim([-100, 100])\nplt.ylim([-80, 20])\nplt.show()", "_____no_output_____" ] ], [ [ "And, to start off the integration, we'll construct the operators necessary for solving for free surface displacement from fault slip.", "_____no_output_____" ] ], [ [ "singularities = np.array(\n [\n [-surf_half_L, 0],\n [surf_half_L, 0],\n [0, 0],\n [float(sp_x.subs(sp_t,1)), float(sp_y.subs(sp_t,1))],\n ]\n)", "_____no_output_____" ], [ "(free_disp_to_free_disp, fault_slip_to_free_disp), report = integrate_term(\n elastic_t(nu), free.pts, free, fault, singularities=singularities, safety_mode=True, return_report=True\n)", "/Users/tbent/Dropbox/active/eq/tectosaur2/tectosaur2/integrate.py:204: UserWarning: Some integrals failed to converge during adaptive integration. This an indication of a problem in either the integration or the problem formulation.\n warnings.warn(\n" ], [ "fault_slip_to_free_disp = fault_slip_to_free_disp.reshape((-1, 2 * fault.n_pts))\nfree_disp_to_free_disp = free_disp_to_free_disp.reshape((-1, 2 * free.n_pts))\nfree_disp_solve_mat = (\n np.eye(free_disp_to_free_disp.shape[0]) + free_disp_to_free_disp\n)", "_____no_output_____" ], [ "from tectosaur2.elastic2d import ElasticH\n\n(free_disp_to_fault_stress, fault_slip_to_fault_stress), report = integrate_term(\n ElasticH(nu, d_cutoff=8.0),\n # elastic_h(nu),\n fault.pts,\n free,\n fault,\n tol=1e-12,\n safety_mode=True,\n singularities=singularities,\n return_report=True,\n)\nfault_slip_to_fault_stress *= shear_modulus\nfree_disp_to_fault_stress *= shear_modulus", "_____no_output_____" ] ], [ [ "**We're not achieving the tolerance we asked for!!**\nHypersingular integrals can be tricky but I think this is solvable.", "_____no_output_____" ] ], [ [ "\nreport['integration_error'].max()", "_____no_output_____" ], [ "A = -fault_slip_to_fault_stress.reshape((-1, 2 * fault.n_pts))\nB = -free_disp_to_fault_stress.reshape((-1, 2 * free.n_pts))\nC = fault_slip_to_free_disp\nDinv = np.linalg.inv(free_disp_solve_mat)\ntotal_fault_slip_to_fault_stress = A - B.dot(Dinv.dot(C))\n\nnx = fault.normals[:, 0]\nny = fault.normals[:, 1]\nnormal_mult = np.transpose(np.array([[nx, 0 * nx, ny], [0 * nx, ny, nx]]), (2, 0, 1))\n\ntotal_fault_slip_to_fault_traction = np.sum(\n total_fault_slip_to_fault_stress.reshape((-1, 3, fault.n_pts, 2))[:, None, :, :, :]\n * normal_mult[:, :, :, None, None],\n axis=2,\n).reshape((-1, 2 * fault.n_pts))", "_____no_output_____" ] ], [ [ "## Rate and state friction", "_____no_output_____" ] ], [ [ "siay = 31556952 # seconds in a year\ndensity = 2670 # rock density (kg/m^3)\ncs = np.sqrt(shear_modulus / density) # Shear wave speed (m/s)\nVp = 1e-9 # Rate of plate motion\nsigma_n0 = 50e6 # Normal stress (Pa)\n\n# parameters describing \"a\", the coefficient of the direct velocity strengthening effect\na0 = 0.01\namax = 0.025\nH = 15000\nh = 3000\nfx = fault.pts[:, 0]\nfy = fault.pts[:, 1]\nfd = -np.sqrt(fx ** 2 + fy ** 2)\na = np.where(\n fd > -H, a0, np.where(fd > -(H + h), a0 + (amax - a0) * (fd + H) / -h, amax)\n)\n\nmp = MaterialProps(a=a, b=0.015, Dc=0.008, f0=0.6, V0=1e-6, eta=shear_modulus / (2 * cs))", "_____no_output_____" ], [ "plt.figure(figsize=(3, 5))\nplt.plot(mp.a, fd/1000, label='a')\nplt.plot(np.full(fy.shape[0], mp.b), fd/1000, label='b')\nplt.xlim([0, 0.03])\nplt.ylabel('depth')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "mesh_L = np.max(np.abs(np.diff(fd)))\nLb = shear_modulus * mp.Dc / (sigma_n0 * mp.b)\nhstar = (np.pi * shear_modulus * mp.Dc) / (sigma_n0 * (mp.b - mp.a))\nmesh_L, Lb, np.min(hstar[hstar > 0])", "_____no_output_____" ] ], [ [ "## Quasidynamic earthquake cycle derivatives", "_____no_output_____" ] ], [ [ "from scipy.optimize import fsolve\nimport copy\n\ninit_state_scalar = fsolve(lambda S: aging_law(mp, Vp, S), 0.7)[0]\nmp_amax = copy.copy(mp)\nmp_amax.a=amax\ntau_amax = -qd_equation(mp_amax, sigma_n0, 0, Vp, init_state_scalar)\ninit_state = np.log((2*mp.V0/Vp)*np.sinh((tau_amax - mp.eta*Vp) / (mp.a*sigma_n0))) * mp.a\n\ninit_tau = np.full(fault.n_pts, tau_amax)\ninit_sigma = np.full(fault.n_pts, sigma_n0)\ninit_slip_deficit = np.zeros(fault.n_pts)\ninit_conditions = np.concatenate((init_slip_deficit, init_state))", "_____no_output_____" ], [ "class SystemState:\n\n V_old = np.full(fault.n_pts, Vp)\n state = None\n\n def calc(self, t, y, verbose=False):\n # Separate the slip_deficit and state sub components of the\n # time integration state.\n slip_deficit = y[: init_slip_deficit.shape[0]]\n state = y[init_slip_deficit.shape[0] :]\n\n # If the state values are bad, then the adaptive integrator probably\n # took a bad step.\n if np.any((state < 0) | (state > 2.0)):\n print(\"bad state\")\n return False\n\n # The big three lines solving for quasistatic shear stress, slip rate\n # and state evolution\n sd_vector = np.stack((slip_deficit * -ny, slip_deficit * nx), axis=1).ravel()\n traction = total_fault_slip_to_fault_traction.dot(sd_vector).reshape((-1, 2))\n delta_sigma_qs = np.sum(traction * np.stack((nx, ny), axis=1), axis=1)\n delta_tau_qs = -np.sum(traction * np.stack((-ny, nx), axis=1), axis=1)\n tau_qs = init_tau + delta_tau_qs\n sigma_qs = init_sigma + delta_sigma_qs\n\n V = solve_friction(mp, sigma_qs, tau_qs, self.V_old, state)\n if not V[2]:\n print(\"convergence failed\")\n return False\n\n V=V[0]\n if not np.all(np.isfinite(V)):\n print(\"infinite V\")\n return False\n dstatedt = aging_law(mp, V, state)\n self.V_old = V\n\n slip_deficit_rate = Vp - V\n out = (\n slip_deficit,\n state,\n delta_sigma_qs,\n sigma_qs,\n delta_tau_qs,\n tau_qs,\n V,\n slip_deficit_rate,\n dstatedt,\n )\n self.data = out\n return self.data", "_____no_output_____" ], [ "def plot_system_state(t, SS, xlim=None):\n \"\"\"This is just a helper function that creates some rough plots of the\n current state to help with debugging\"\"\"\n (\n slip_deficit,\n state,\n delta_sigma_qs,\n sigma_qs,\n delta_tau_qs,\n tau_qs,\n V,\n slip_deficit_rate,\n dstatedt,\n ) = SS\n\n slip = Vp * t - slip_deficit\n\n fd = -np.linalg.norm(fault.pts, axis=1)\n\n plt.figure(figsize=(15, 9))\n plt.suptitle(f\"t={t/siay}\")\n plt.subplot(3, 3, 1)\n plt.title(\"slip\")\n plt.plot(fd, slip)\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 2)\n plt.title(\"slip deficit\")\n plt.plot(fd, slip_deficit)\n plt.xlim(xlim)\n\n # plt.subplot(3, 3, 2)\n # plt.title(\"slip deficit rate\")\n # plt.plot(fd, slip_deficit_rate)\n # plt.xlim(xlim)\n\n # plt.subplot(3, 3, 2)\n # plt.title(\"strength\")\n # plt.plot(fd, tau_qs/sigma_qs)\n # plt.xlim(xlim)\n\n plt.subplot(3, 3, 3)\n # plt.title(\"log V\")\n # plt.plot(fd, np.log10(V))\n plt.title(\"V\")\n plt.plot(fd, V)\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 4)\n plt.title(r\"$\\sigma_{qs}$\")\n plt.plot(fd, sigma_qs)\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 5)\n plt.title(r\"$\\tau_{qs}$\")\n plt.plot(fd, tau_qs, 'k-o')\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 6)\n plt.title(\"state\")\n plt.plot(fd, state)\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 7)\n plt.title(r\"$\\Delta\\sigma_{qs}$\")\n plt.plot(fd, delta_sigma_qs)\n plt.hlines([0], [fd[-1]], [fd[0]])\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 8)\n plt.title(r\"$\\Delta\\tau_{qs}$\")\n plt.plot(fd, delta_tau_qs)\n plt.hlines([0], [fd[-1]], [fd[0]])\n plt.xlim(xlim)\n\n plt.subplot(3, 3, 9)\n plt.title(\"dstatedt\")\n plt.plot(fd, dstatedt)\n plt.xlim(xlim)\n plt.tight_layout()\n\n plt.show()", "_____no_output_____" ], [ "def calc_derivatives(state, t, y):\n \"\"\"\n This helper function calculates the system state and then extracts the\n relevant derivatives that the integrator needs. It also intentionally\n returns infinite derivatives when the `y` vector provided by the integrator\n is invalid.\n \"\"\"\n if not np.all(np.isfinite(y)):\n return np.inf * y\n state_vecs = state.calc(t, y)\n if not state_vecs:\n return np.inf * y\n derivatives = np.concatenate((state_vecs[-2], state_vecs[-1]))\n return derivatives", "_____no_output_____" ] ], [ [ "## Integrating through time", "_____no_output_____" ] ], [ [ "%%time\nfrom scipy.integrate import RK23, RK45\n\n# We use a 5th order adaptive Runge Kutta method and pass the derivative function to it\n# the relative tolerance will be 1e-11 to make sure that even\nstate = SystemState()\nderivs = lambda t, y: calc_derivatives(state, t, y)\nintegrator = RK45\natol = Vp * 1e-6\nrtol = 1e-11\nrk = integrator(derivs, 0, init_conditions, 1e50, atol=atol, rtol=rtol)\n\n# Set the initial time step to one day.\nrk.h_abs = 60 * 60 * 24\n\n# Integrate for 1000 years.\nmax_T = 1000 * siay\n\nn_steps = 500000\nt_history = [0]\ny_history = [init_conditions.copy()]\nfor i in range(n_steps):\n # Take a time step and store the result\n if rk.step() != None:\n raise Exception(\"TIME STEPPING FAILED\")\n t_history.append(rk.t)\n y_history.append(rk.y.copy())\n\n # Print the time every 5000 steps\n if i % 5000 == 0:\n print(f\"step={i}, time={rk.t / siay} yrs, step={(rk.t - t_history[-2]) / siay}\")\n\n if rk.t > max_T:\n break\n\ny_history = np.array(y_history)\nt_history = np.array(t_history)", "step=0, time=1.4616397558414107e-05 yrs, step=1.4616397558414107e-05\nstep=5000, time=133.40079223999558 yrs, step=0.025598497379265003\nstep=10000, time=176.14015294812103 yrs, step=5.998816102602071e-11\nstep=15000, time=176.14015339898557 yrs, step=8.238172642666623e-11\nstep=20000, time=176.14296040288025 yrs, step=2.1979783596630634e-05\nstep=25000, time=263.1375647457368 yrs, step=0.0006967333900754272\nstep=30000, time=263.2373556060858 yrs, step=5.5787478717397596e-11\nstep=35000, time=263.23735589847996 yrs, step=1.6116718526537453e-10\nstep=40000, time=314.8023513177958 yrs, step=0.030260419123390083\nstep=45000, time=327.9773806823734 yrs, step=7.065608084576142e-11\nstep=50000, time=327.97738099773073 yrs, step=3.294060227625318e-11\nstep=55000, time=356.3693505901927 yrs, step=0.02798904784696968\nstep=60000, time=414.56802731019104 yrs, step=4.732567262808485e-11\nstep=65000, time=414.56802756651166 yrs, step=4.1885940142098087e-11\nstep=70000, time=414.9294840754991 yrs, step=0.002112030610137971\nstep=75000, time=479.28663934235965 yrs, step=5.784248876765926e-11\nstep=80000, time=479.28663967884637 yrs, step=7.796949896581029e-11\nstep=85000, time=479.2877011529307 yrs, step=8.501305135281465e-06\nstep=90000, time=565.6498981880231 yrs, step=0.001480316487609428\nstep=95000, time=565.8770021991605 yrs, step=5.4639090748133724e-11\nstep=100000, time=565.877002481205 yrs, step=1.531586902165607e-10\nstep=105000, time=614.5975778365752 yrs, step=0.029474945371490282\nstep=110000, time=630.595404186933 yrs, step=6.22547162285152e-11\nstep=115000, time=630.5954045063037 yrs, step=3.6990180904709996e-11\nstep=120000, time=656.0166350083252 yrs, step=0.02921588740701827\nstep=125000, time=717.1857705022477 yrs, step=4.0979318061100294e-11\nstep=130000, time=717.1857707585524 yrs, step=5.234231480960598e-11\nstep=135000, time=717.3785763542687 yrs, step=0.0010220541964367556\nstep=140000, time=781.904169712801 yrs, step=6.805709754690108e-11\nstep=145000, time=781.904170048407 yrs, step=7.603537185968166e-11\nstep=150000, time=781.9045694275084 yrs, step=3.6081749206038533e-06\nstep=155000, time=867.9930534924584 yrs, step=0.0034391542903144255\nstep=160000, time=868.4945363826215 yrs, step=5.258408069787206e-11\nstep=165000, time=868.4945366547215 yrs, step=1.5182897783109724e-10\nstep=170000, time=914.161314434437 yrs, step=0.030127968410185897\nstep=175000, time=933.2129355866244 yrs, step=7.228800059155745e-11\nstep=180000, time=933.2129359097105 yrs, step=3.505605379858137e-11\nstep=185000, time=955.611410869588 yrs, step=0.025085701504597362\nCPU times: user 5h 6min 33s, sys: 27min 1s, total: 5h 33min 35s\nWall time: 1h 27min 50s\n" ] ], [ [ "## Plotting the results", "_____no_output_____" ], [ "Now that we've solved for 1000 years of fault slip evolution, let's plot some of the results. I'll start with a super simple plot of the maximum log slip rate over time.", "_____no_output_____" ] ], [ [ "derivs_history = np.diff(y_history, axis=0) / np.diff(t_history)[:, None]\nmax_vel = np.max(np.abs(derivs_history), axis=1)\nplt.plot(t_history[1:] / siay, np.log10(max_vel))\nplt.xlabel('$t ~~ \\mathrm{(yrs)}$')\nplt.ylabel('$\\log_{10}(V)$')\nplt.show()", "_____no_output_____" ] ], [ [ "And next, we'll make the classic plot showing the spatial distribution of slip over time:\n- the blue lines show interseismic slip evolution and are plotted every fifteen years\n- the red lines show evolution during rupture every three seconds.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 4))\nlast_plt_t = -1000\nlast_plt_slip = init_slip_deficit\nevent_times = []\nfor i in range(len(y_history) - 1):\n y = y_history[i]\n t = t_history[i]\n slip_deficit = y[: init_slip_deficit.shape[0]]\n should_plot = False\n\n # Plot a red line every three second if the slip rate is over 0.1 mm/s.\n if (\n max_vel[i] >= 0.0001 and t - last_plt_t > 3\n ):\n if len(event_times) == 0 or t - event_times[-1] > siay:\n event_times.append(t)\n should_plot = True\n color = \"r\"\n\n # Plot a blue line every fifteen years during the interseismic period\n if t - last_plt_t > 15 * siay:\n should_plot = True\n color = \"b\"\n\n if should_plot:\n # Convert from slip deficit to slip:\n slip = -slip_deficit + Vp * t\n plt.plot(slip, fd / 1000.0, color + \"-\", linewidth=0.5)\n last_plt_t = t\n last_plt_slip = slip\nplt.xlim([0, np.max(last_plt_slip)])\nplt.ylim([-40, 0])\nplt.ylabel(r\"$\\textrm{z (km)}$\")\nplt.xlabel(r\"$\\textrm{slip (m)}$\")\nplt.tight_layout()\nplt.savefig(\"halfspace.png\", dpi=300)\nplt.show()", "_____no_output_____" ] ], [ [ "And a plot of recurrence interval:", "_____no_output_____" ] ], [ [ "plt.title(\"Recurrence interval\")\nplt.plot(np.diff(event_times) / siay, \"k-*\")\nplt.xticks(np.arange(0, 10, 1))\nplt.yticks(np.arange(75, 80, 0.5))\nplt.xlabel(\"Event number\")\nplt.ylabel(\"Time between events (yr)\")\nplt.show()", "_____no_output_____" ] ], [ [ "## Comparison against SCEC SEAS results", "_____no_output_____" ] ], [ [ "ozawa_data = np.loadtxt(\"ozawa7500.txt\")\nozawa_slip_rate = 10 ** ozawa_data[:, 2]\nozawa_stress = ozawa_data[:, 3]", "_____no_output_____" ], [ "t_start_idx = np.argmax(max_vel > 1e-4)\nt_end_idx = np.argmax(max_vel[t_start_idx:] < 1e-6)\nn_steps = t_end_idx - t_start_idx\nt_chunk = t_history[t_start_idx : t_end_idx]", "_____no_output_____" ], [ "shear_chunk = []\nslip_rate_chunk = []\nfor i in range(n_steps):\n system_state = SystemState().calc(t_history[t_start_idx + i], y_history[t_start_idx + i])\n slip_deficit, state, delta_sigma_qs, sigma_qs, delta_tau_qs, tau_qs, V, slip_deficit_rate, dstatedt = system_state\n shear_chunk.append((tau_qs - mp.eta * V))\n slip_rate_chunk.append(V)\nshear_chunk = np.array(shear_chunk)\nslip_rate_chunk = np.array(slip_rate_chunk)", "_____no_output_____" ], [ "fault_idx = np.argmax((-7450 > fd) & (fd > -7550))\nVAvg = np.mean(slip_rate_chunk[:, fault_idx:(fault_idx+2)], axis=1)\nSAvg = np.mean(shear_chunk[:, fault_idx:(fault_idx+2)], axis=1)", "_____no_output_____" ], [ "fault_idx", "_____no_output_____" ], [ "t_align = t_chunk[np.argmax(VAvg > 0.2)]\nozawa_t_align = np.argmax(ozawa_slip_rate > 0.2)\n\nfor lims in [(-1, 1), (-15, 30)]:\n plt.figure(figsize=(12, 8))\n plt.subplot(2, 1, 1)\n plt.plot(t_chunk - t_align, SAvg / 1e6, \"k-o\", markersize=0.5, linewidth=0.5, label='here')\n plt.plot(\n ozawa_data[:, 0] - ozawa_data[ozawa_t_align, 0],\n ozawa_stress,\n \"b-*\",\n markersize=0.5,\n linewidth=0.5,\n label='ozawa'\n )\n plt.legend()\n plt.xlim(lims)\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Shear Stress (MPa)\")\n # plt.show()\n\n plt.subplot(2, 1, 2)\n plt.plot(t_chunk - t_align, VAvg, \"k-o\", markersize=0.5, linewidth=0.5, label='here')\n plt.plot(\n ozawa_data[:, 0] - ozawa_data[ozawa_t_align, 0],\n ozawa_slip_rate[:],\n \"b-*\",\n markersize=0.5,\n linewidth=0.5,\n label='ozawa'\n )\n plt.legend()\n plt.xlim(lims)\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Slip rate (m/s)\")\n plt.tight_layout()\n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a67abf6488569e0e9c854cdd878ec75be924bc3
45,839
ipynb
Jupyter Notebook
ch09/ch09.ipynb
pwjdgus/python-machine-learning-book-3rd-edition
180f6f9c9d7e1cee859d9bbdda872efa1da7c9e6
[ "MIT" ]
1
2022-01-11T11:52:38.000Z
2022-01-11T11:52:38.000Z
ch09/ch09.ipynb
pwjdgus/python-machine-learning-book-3rd-edition
180f6f9c9d7e1cee859d9bbdda872efa1da7c9e6
[ "MIT" ]
null
null
null
ch09/ch09.ipynb
pwjdgus/python-machine-learning-book-3rd-edition
180f6f9c9d7e1cee859d9bbdda872efa1da7c9e6
[ "MIT" ]
null
null
null
26.314007
1,421
0.54386
[ [ [ "# 머신 러닝 교과서 3판", "_____no_output_____" ], [ "# 9장 - 웹 애플리케이션에 머신 러닝 모델 내장하기", "_____no_output_____" ], [ "**아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.**\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://nbviewer.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch09/ch09.ipynb\"><img src=\"https://jupyter.org/assets/share.png\" width=\"60\" />주피터 노트북 뷰어로 보기</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch09/ch09.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />구글 코랩(Colab)에서 실행하기</a>\n </td>\n</table>", "_____no_output_____" ], [ "### 목차", "_____no_output_____" ], [ "- 8장 정리 - 영화 리뷰 분류를 위한 모델 훈련하기\n- 학습된 사이킷런 추정기 저장\n- 데이터를 저장하기 위해 SQLite 데이터베이스 설정\n- 플라스크 웹 애플리케이션 개발\n - 첫 번째 플라스크 애플리케이션\n - 폼 검증과 화면 출력\n- 영화 리뷰 분류기를 웹 애플리케이션으로 만들기\n- 공개 서버에 웹 애플리케이션 배포\n - 영화 분류기 업데이트\n- 요약", "_____no_output_____" ] ], [ [ "# 코랩에서 실행할 경우 최신 버전의 사이킷런을 설치합니다.\n!pip install --upgrade scikit-learn", "Requirement already satisfied: scikit-learn in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (1.0)\nRequirement already satisfied: joblib>=0.11 in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (from scikit-learn) (1.1.0)\nRequirement already satisfied: scipy>=1.1.0 in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (from scikit-learn) (1.7.1)\nRequirement already satisfied: numpy>=1.14.6 in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (from scikit-learn) (1.21.3)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (from scikit-learn) (3.0.0)\n" ], [ "from IPython.display import Image", "_____no_output_____" ] ], [ [ "플래스크(Flask) 웹 애플리케이션 코드는 다음 디렉토리에 있습니다:\n \n- `1st_flask_app_1/`: 간단한 플래스크 웹 애플리케이션\n- `1st_flask_app_2/`: `1st_flask_app_1`에 폼 검증과 렌더링을 추가하여 확장한 버전\n- `movieclassifier/`: 웹 애플리케이션에 내장한 영화 리뷰 분류기\n- `movieclassifier_with_update/`: `movieclassifier`와 같지만 초기화를 위해 sqlite 데이터베이스를 사용합니다.", "_____no_output_____" ], [ "웹 애플리케이션을 로컬에서 실행하려면 `cd`로 (위에 나열된) 각 디렉토리에 들어가서 메인 애플리케이션 스크립트를 실행합니다.\n\n cd ./1st_flask_app_1\n python app.py\n \n터미널에서 다음같은 내용일 출력됩니다.\n \n * Running on http://127.0.0.1:5000/\n * Restarting with reloader\n \n웹 브라우저를 열고 터미널에 출력된 주소(일반적으로 http://127.0.0.1:5000/)를 입력하여 웹 애플리케이션에 접속합니다.", "_____no_output_____" ], [ "**이 튜토리얼로 만든 예제 애플리케이션 데모는 다음 주소에서 볼 수 있습니다: http://haesun.pythonanywhere.com/**.", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ], [ "# 8장 정리 - 영화 리뷰 분류를 위한 모델 훈련하기", "_____no_output_____" ], [ "이 절은 8장의 마지막 섹션에서 훈련한 로지스틱 회귀 모델을 다시 사용합니다. 이어지는 코드 블럭을 실행하여 다음 절에서 사용할 모델을 훈련시키겠습니다.", "_____no_output_____" ], [ "**노트**\n\n다음 코드는 8장에서 만든 `movie_data.csv` 데이터셋을 사용합니다.\n\n**코랩을 사용할 때는 다음 셀을 실행하세요.**", "_____no_output_____" ] ], [ [ "!wget https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch09/movie_data.csv.gz", "--2021-10-23 10:41:51-- https://github.com/rickiepark/python-machine-learning-book-3rd-edition/raw/master/ch09/movie_data.csv.gz\nResolving github.com (github.com)... 15.164.81.167\nConnecting to github.com (github.com)|15.164.81.167|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/rickiepark/python-machine-learning-book-3rd-edition/master/ch09/movie_data.csv.gz [following]\n--2021-10-23 10:41:51-- https://raw.githubusercontent.com/rickiepark/python-machine-learning-book-3rd-edition/master/ch09/movie_data.csv.gz\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.108.133, 185.199.111.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 26521894 (25M) [application/octet-stream]\nSaving to: ‘movie_data.csv.gz.1’\n\nmovie_data.csv.gz.1 100%[===================>] 25.29M 55.5MB/s in 0.5s \n\n2021-10-23 10:41:54 (55.5 MB/s) - ‘movie_data.csv.gz.1’ saved [26521894/26521894]\n\n" ], [ "import gzip\n\n\nwith gzip.open('movie_data.csv.gz') as f_in, open('movie_data.csv', 'wb') as f_out:\n f_out.writelines(f_in)", "_____no_output_____" ], [ "import nltk\nnltk.download('stopwords')", "[nltk_data] Downloading package stopwords to /home/haesun/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "import numpy as np\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\n\nstop = stopwords.words('english')\nporter = PorterStemmer()\n\ndef tokenizer(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\ndef stream_docs(path):\n with open(path, 'r', encoding='utf-8') as csv:\n next(csv) # skip header\n for line in csv:\n text, label = line[:-3], int(line[-2])\n yield text, label", "_____no_output_____" ], [ "next(stream_docs(path='movie_data.csv'))", "_____no_output_____" ], [ "def get_minibatch(doc_stream, size):\n docs, y = [], []\n try:\n for _ in range(size):\n text, label = next(doc_stream)\n docs.append(text)\n y.append(label)\n except StopIteration:\n return None, None\n return docs, y", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\n\nvect = HashingVectorizer(decode_error='ignore', \n n_features=2**21,\n preprocessor=None, \n tokenizer=tokenizer)\n\nclf = SGDClassifier(loss='log', random_state=1, max_iter=1)\ndoc_stream = stream_docs(path='movie_data.csv')", "_____no_output_____" ] ], [ [ "`pyprind`는 주피터 노트북에서 진행바를 출력하기 위한 유틸리티입니다. `pyprind` 패키지를 설치하려면 다음 셀을 실행하세요.", "_____no_output_____" ] ], [ [ "!pip install pyprind", "Requirement already satisfied: pyprind in /home/haesun/python-machine-learning-book-3rd-edition/.env/lib/python3.7/site-packages (2.11.3)\n" ], [ "import pyprind\npbar = pyprind.ProgBar(45)\n\nclasses = np.array([0, 1])\nfor _ in range(45):\n X_train, y_train = get_minibatch(doc_stream, size=1000)\n if not X_train:\n break\n X_train = vect.transform(X_train)\n clf.partial_fit(X_train, y_train, classes=classes)\n pbar.update()", "Warning: No valid output stream.\n" ], [ "X_test, y_test = get_minibatch(doc_stream, size=5000)\nX_test = vect.transform(X_test)\nprint('정확도: %.3f' % clf.score(X_test, y_test))", "정확도: 0.868\n" ], [ "clf = clf.partial_fit(X_test, y_test)", "_____no_output_____" ] ], [ [ "### 노트\n\npickle 파일을 만드는 것이 조금 까다로울 수 있기 때문에 `pickle-test-scripts/` 디렉토리에 올바르게 환경이 설정되었는지 확인하는 간단한 테스트 스크립트를 추가했습니다. 기본적으로 `movie_data` 데이터 일부를 포함하고 있고 `ch08`의 관련된 코드를 정리한 버전입니다.\n\n다음처럼 실행하면\n\n python pickle-dump-test.py\n\n`movie_data_small.csv`에서 작은 분류 모델을 훈련하고 2개의 pickle 파일을 만듭니다.\n\n stopwords.pkl\n classifier.pkl\n\n그다음 아래 명령을 실행하면\n\n python pickle-load-test.py\n\n다음 2줄이 출력되어야 합니다:\n\n Prediction: positive\n Probability: 85.71%", "_____no_output_____" ], [ "<br>\n<br>", "_____no_output_____" ], [ "# 학습된 사이킷런 추정기 저장", "_____no_output_____" ], [ "앞에서 로지스틱 회귀 모델을 훈련한 후에 분류기, 불용어, 포터 어간 추출기, `HashingVectorizer`를 로컬 디스크에 직렬화된 객체로 저장합니다. 나중에 웹 애플리케이션에서 학습된 분류기를 이용하겠습니다.", "_____no_output_____" ] ], [ [ "import pickle\nimport os\n\ndest = os.path.join('movieclassifier', 'pkl_objects')\nif not os.path.exists(dest):\n os.makedirs(dest)\n\npickle.dump(stop, open(os.path.join(dest, 'stopwords.pkl'), 'wb'), protocol=4) \npickle.dump(clf, open(os.path.join(dest, 'classifier.pkl'), 'wb'), protocol=4)", "_____no_output_____" ] ], [ [ "그다음 나중에 임포트할 수 있도록 별도의 파일에 `HashingVectorizer`를 저장합니다.", "_____no_output_____" ] ], [ [ "%%writefile movieclassifier/vectorizer.py\nfrom sklearn.feature_extraction.text import HashingVectorizer\nimport re\nimport os\nimport pickle\n\ncur_dir = os.path.dirname(__file__)\nstop = pickle.load(open(\n os.path.join(cur_dir, \n 'pkl_objects', \n 'stopwords.pkl'), 'rb'))\n\ndef tokenizer(text):\n text = re.sub('<[^>]*>', '', text)\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)',\n text.lower())\n text = re.sub('[\\W]+', ' ', text.lower()) \\\n + ' '.join(emoticons).replace('-', '')\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\nvect = HashingVectorizer(decode_error='ignore',\n n_features=2**21,\n preprocessor=None,\n tokenizer=tokenizer)", "Overwriting movieclassifier/vectorizer.py\n" ] ], [ [ "이전 코드 셀을 실행한 후에 객체가 올바르게 저장되었는지 확인하기 위해 IPython 노트북 커널을 재시작할 수 있습니다.", "_____no_output_____" ], [ "먼저 현재 파이썬 디렉토리를 `movieclassifer`로 변경합니다:", "_____no_output_____" ] ], [ [ "import os\nos.chdir('movieclassifier')", "_____no_output_____" ], [ "import pickle\nimport re\nimport os\nfrom vectorizer import vect\n\nclf = pickle.load(open(os.path.join('pkl_objects', 'classifier.pkl'), 'rb'))", "_____no_output_____" ], [ "import numpy as np\nlabel = {0:'음성', 1:'양성'}\n\nexample = [\"I love this movie. It's amazing.\"]\nX = vect.transform(example)\nprint('예측: %s\\n확률: %.2f%%' %\\\n (label[clf.predict(X)[0]], \n np.max(clf.predict_proba(X))*100))", "예측: 양성\n확률: 95.55%\n" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# 데이터를 저장하기 위해 SQLite 데이터베이스 설정", "_____no_output_____" ], [ "이 코드를 실행하기 전에 현재 위치가 `movieclassifier` 디렉토리인지 확인합니다.", "_____no_output_____" ] ], [ [ "os.getcwd()", "_____no_output_____" ], [ "import sqlite3\nimport os\n\nconn = sqlite3.connect('reviews.sqlite')\nc = conn.cursor()\n\nc.execute('DROP TABLE IF EXISTS review_db')\nc.execute('CREATE TABLE review_db (review TEXT, sentiment INTEGER, date TEXT)')\n\nexample1 = 'I love this movie'\nc.execute(\"INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME('now'))\", (example1, 1))\n\nexample2 = 'I disliked this movie'\nc.execute(\"INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME('now'))\", (example2, 0))\n\nconn.commit()\nconn.close()", "_____no_output_____" ], [ "conn = sqlite3.connect('reviews.sqlite')\nc = conn.cursor()\n\nc.execute(\"SELECT * FROM review_db WHERE date BETWEEN '2017-01-01 10:10:10' AND DATETIME('now')\")\nresults = c.fetchall()\n\nconn.close()", "_____no_output_____" ], [ "print(results)", "[('I love this movie', 1, '2021-10-23 10:42:27'), ('I disliked this movie', 0, '2021-10-23 10:42:27')]\n" ], [ "Image(url='https://git.io/Jts3V', width=700) ", "_____no_output_____" ] ], [ [ "<br>", "_____no_output_____" ], [ "# 플라스크 웹 애플리케이션 개발", "_____no_output_____" ], [ "...", "_____no_output_____" ], [ "## 첫 번째 플라스크 애플리케이션", "_____no_output_____" ], [ "...", "_____no_output_____" ] ], [ [ "Image(url='https://git.io/Jts3o', width=700) ", "_____no_output_____" ] ], [ [ "## 폼 검증과 화면 출력", "_____no_output_____" ] ], [ [ "Image(url='https://git.io/Jts3K', width=400) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts36', width=400) ", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## 화면 요약", "_____no_output_____" ] ], [ [ "Image(url='https://git.io/Jts3P', width=800) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts3X', width=800) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts31', width=400) ", "_____no_output_____" ] ], [ [ "# 영화 리뷰 분류기를 웹 애플리케이션으로 만들기", "_____no_output_____" ] ], [ [ "Image(url='https://git.io/Jts3M', width=400) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts3D', width=400) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts3y', width=400) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts3S', width=200) ", "_____no_output_____" ], [ "Image(url='https://git.io/Jts32', width=400) ", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "# 공개 서버에 웹 애플리케이션 배포", "_____no_output_____" ] ], [ [ "Image(url='https://git.io/Jts39', width=600) ", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ], [ "## 영화 분류기 업데이트", "_____no_output_____" ], [ "다운로드한 깃허브 저장소에 들어있는 movieclassifier_with_update 디렉토리를 사용합니다(그렇지 않으면 `movieclassifier` 디렉토리를 복사해서 사용하세요).", "_____no_output_____" ], [ "**코랩을 사용할 때는 다음 셀을 실행하세요.**", "_____no_output_____" ] ], [ [ "!cp -r ../movieclassifier ../movieclassifier_with_update", "_____no_output_____" ], [ "import shutil\n\nos.chdir('..')\n\nif not os.path.exists('movieclassifier_with_update'):\n os.mkdir('movieclassifier_with_update')\nos.chdir('movieclassifier_with_update')\n\nif not os.path.exists('pkl_objects'):\n os.mkdir('pkl_objects')\n\nshutil.copyfile('../movieclassifier/pkl_objects/classifier.pkl',\n './pkl_objects/classifier.pkl')\n\nshutil.copyfile('../movieclassifier/reviews.sqlite',\n './reviews.sqlite')", "_____no_output_____" ] ], [ [ "SQLite 데이터베이스에 저장된 데이터로 분류기를 업데이트하는 함수를 정의합니다:", "_____no_output_____" ] ], [ [ "import pickle\nimport sqlite3\nimport numpy as np\n\n# 로컬 디렉토리에서 HashingVectorizer를 임포트합니다\nfrom vectorizer import vect\n\ndef update_model(db_path, model, batch_size=10000):\n\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('SELECT * from review_db')\n \n results = c.fetchmany(batch_size)\n while results:\n data = np.array(results)\n X = data[:, 0]\n y = data[:, 1].astype(int)\n \n classes = np.array([0, 1])\n X_train = vect.transform(X)\n clf.partial_fit(X_train, y, classes=classes)\n results = c.fetchmany(batch_size)\n \n conn.close()\n return None", "_____no_output_____" ] ], [ [ "모델을 업데이트합니다:", "_____no_output_____" ] ], [ [ "cur_dir = '.'\n\n# app.py 파일에 이 코드를 삽입했다면 다음 경로를 사용하세요.\n\n# import os\n# cur_dir = os.path.dirname(__file__)\n\nclf = pickle.load(open(os.path.join(cur_dir,\n 'pkl_objects',\n 'classifier.pkl'), 'rb'))\ndb = os.path.join(cur_dir, 'reviews.sqlite')\n\nupdate_model(db_path=db, model=clf, batch_size=10000)\n\n# classifier.pkl 파일을 업데이트하려면 다음 주석을 해제하세요.\n\n# pickle.dump(clf, open(os.path.join(cur_dir, \n# 'pkl_objects', 'classifier.pkl'), 'wb')\n# , protocol=4)", "_____no_output_____" ] ], [ [ "<br>\n<br>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a67d98821d7a23aea35782009eae21b3d45b645
933,675
ipynb
Jupyter Notebook
WHO-Report-20200403.ipynb
NExt-Machine-Learning-Project/RecifeLandSlidings
4ce37a87dc81ba72c743a81bd54d9947fab94134
[ "MIT" ]
1
2021-11-14T21:32:06.000Z
2021-11-14T21:32:06.000Z
WHO-Report-20200403.ipynb
NExt-Machine-Learning-Project/RecifeLandSlidings
4ce37a87dc81ba72c743a81bd54d9947fab94134
[ "MIT" ]
null
null
null
WHO-Report-20200403.ipynb
NExt-Machine-Learning-Project/RecifeLandSlidings
4ce37a87dc81ba72c743a81bd54d9947fab94134
[ "MIT" ]
null
null
null
348.646378
87,800
0.900323
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('Data/20200403-WHO.csv')\ndf", "_____no_output_____" ], [ "df = df[df['Country/Territory'] != 'conveyance (Diamond']", "_____no_output_____" ], [ "death_rate = df['Total Deaths']/df['Total Confirmed']*100\ndf['Death Rate'] = death_rate\ndf", "c:\\users\\ricardo_2\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n" ], [ "countries_infected = len(df)\nprint('The total number of countries infected is:',countries_infected)", "The total number of countries infected is: 205\n" ], [ "df = df.sort_values(by=['Death Rate'],ascending=False)\n\ndf[0:30]", "_____no_output_____" ], [ "minimum_number_cases = 1000 #define the minimum number of cases here/defina o número mínimo de casos aqui\n\ndfMinNumCases = df[df['Total Confirmed'] > minimum_number_cases]\ndfMinNumCases = dfMinNumCases.reset_index(drop=True)\ndfMinNumCases.index = np.arange(1, (len(dfMinNumCases)+1))\n\ndfMinNumCases[0:30]", "_____no_output_____" ], [ "#matplotlib defaults\nsns.set(style=\"whitegrid\")\ntop15_deathrate = dfMinNumCases[0:15]\ndeath_rate = top15_deathrate.round({'Death Rate':2})\ndeath_rate = death_rate['Death Rate']\n\n\nplt.figure(figsize=(15,10))\nplt.barh(top15_deathrate['Country/Territory'],top15_deathrate['Death Rate'],height=0.7, color='red')\nplt.title('Death Rate per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Death Rate [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\nplt.gca().invert_yaxis()\nfor i in range (0,15):\n plt.text(x=death_rate.iloc[i]+0.4, y=i , s=death_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=14)\nplt.show()", "_____no_output_____" ], [ "#seaborn defaults\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Death Rate',y='Country/Territory', data=top15_deathrate ,\n label=\"Deaths\", color=\"red\")\nplt.title('Death Rate per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Death Rate [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\n\nfor i in range (0,15):\n plt.text(x=death_rate.iloc[i]+0.4, y=i , s=death_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_DeathRatePerCountry.png', bbox_inches='tight')\nplt.show()\n", "_____no_output_____" ], [ "#matplotlib defaults\ntop15_confirmed = top15_deathrate.sort_values(by=['Total Confirmed'],ascending=False)\ncountries = np.array(top15_confirmed['Country/Territory'])\nconfirmed = np.array(top15_confirmed['Total Confirmed'])\ndeaths = np.array(top15_confirmed['Total Deaths'])\ndiference = confirmed - deaths\n\nplt.figure(figsize=(15,10))\np1 = plt.barh(countries,deaths, color='red')\np2 = plt.barh(countries,diference,left=deaths, color='yellow')\nplt.title('Total Number of Cases/Deaths (03/04/2020)',fontsize=25)\nplt.xlabel('Cases/Deaths',fontsize=18)\nplt.ylabel('Country',fontsize=18)\nplt.legend((p1[0], p2[0]), ('Deaths', 'Confirmed'), loc='lower right')\nplt.gca().invert_yaxis()\nfor i in range (0,15):\n plt.text(x=deaths[i]+1900, y=i , s=deaths[i],horizontalalignment='center',verticalalignment='center', color='red',fontsize=14)\n plt.text(x=confirmed[i]+4000, y=i , s=confirmed[i],horizontalalignment='center',verticalalignment='center', fontsize=14)\nplt.show()", "_____no_output_____" ], [ "#seaborn defaults\nsns.set(style=\"whitegrid\")\n\nf, ax = plt.subplots(figsize=(15, 6))\n\n\nsns.set_color_codes(\"pastel\")\nsns.barplot(x='Total Confirmed',y='Country/Territory', data=top15_confirmed,\n label=\"Confirmed\", color=\"yellow\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Total Deaths',y='Country/Territory', data=top15_confirmed ,\n label=\"Deaths\", color=\"red\")\n\n\nplt.title('Total Number of Cases/Deaths in the Top15 Death Rate Countries (03/04/2020)',fontsize=18)\nax.legend(ncol=2, loc=\"lower right\", frameon=True)\nax.set(ylabel=\"Countries/Territory\",\n xlabel=\"Cases/Deaths\")\nfor i in range (0,15):\n plt.text(x=deaths[i]+1900, y=i , s=deaths[i],horizontalalignment='center',verticalalignment='center', color='red',fontsize=14)\n plt.text(x=confirmed[i]+4000, y=i , s=confirmed[i],horizontalalignment='center',verticalalignment='center', fontsize=14)\nsns.despine(left=True, bottom=True)\nplt.savefig('Graphs/20200403_TotalNumberCasesDeaths.png', bbox_inches='tight')", "_____no_output_____" ], [ "dfDSLRC = df.sort_values(by=['Days since last reported case'],ascending=False)#dfDSLRC = dataframe Days since last reported case\ndfDSLRC[0:30]", "_____no_output_____" ], [ "#seaborn defaults\ntop15DSLRC = dfDSLRC[0:15].sort_values(by=['Days since last reported case'])\nDSLRC = top15DSLRC['Days since last reported case']\n\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Country/Territory',y='Days since last reported case', data=top15DSLRC ,\n label=\"Days since last reported case\", color=\"blue\")\nplt.title('Days since Last Reported Case per Country (03/04/2020)',fontsize=24)\nplt.ylabel('Days since last reported case',fontsize=18)\nplt.xlabel('Countries/Territory',fontsize=18)\nplt.xticks(rotation='vertical')\nfor i in range (0,15):\n plt.text(x=i, y=DSLRC.iloc[i]+0.4 , s=DSLRC.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_DaysSinceLast.png', bbox_inches='tight')\nplt.show()", "c:\\users\\ricardo_2\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\matplotlib\\backends\\backend_agg.py:211: RuntimeWarning: Glyph 13 missing from current font.\n font.set_text(s, 0.0, flags=flags)\nc:\\users\\ricardo_2\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\matplotlib\\backends\\backend_agg.py:180: RuntimeWarning: Glyph 13 missing from current font.\n font.set_text(s, 0, flags=flags)\n" ], [ "#seaborn defaults\nconfirmedDSLRC = np.array(top15DSLRC['Total Confirmed'])\ndeathsDSLRC = np.array(top15DSLRC['Total Deaths'])\nsns.set(style=\"whitegrid\")\n\nf, ax = plt.subplots(figsize=(15, 6))\n\n\nsns.set_color_codes(\"pastel\")\nsns.barplot(x='Total Confirmed',y='Country/Territory', data=top15DSLRC,\n label=\"Confirmed\", color=\"yellow\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Total Deaths',y='Country/Territory', data=top15DSLRC ,\n label=\"Deaths\", color=\"red\")\n\n\nplt.title('Total Number of Cases/Deaths in the Top15 Days Since Last Reported Case Countries (03/04/2020)',fontsize=18)\nax.legend(ncol=2, loc=\"lower right\", frameon=True)\nax.set(ylabel=\"Countries/Territory\",\n xlabel=\"Cases/Deaths\")\nfor i in range (0,15):\n plt.text(x=deathsDSLRC[i]+0.2, y=i , s=deathsDSLRC[i],horizontalalignment='center',verticalalignment='center', color='red',fontsize=14)\n plt.text(x=confirmedDSLRC[i]+0.4, y=i , s=confirmedDSLRC[i],horizontalalignment='center',verticalalignment='center', fontsize=14)\nsns.despine(left=True, bottom=True)\nplt.savefig('Graphs/20200403_TotalNumberCasesDeathsDSLRC.png', bbox_inches='tight')", "_____no_output_____" ], [ "Transmission_type = pd.get_dummies(df, columns=['Transmission Classification'])\nTransmission_type", "_____no_output_____" ], [ "print('The number of countries with only imported cases is:',Transmission_type['Transmission Classification_Imported cases only'].sum())", "The number of countries with only imported cases is: 32\n" ], [ "print('The number of countries with local transmissions cases is:',Transmission_type['Transmission Classification_Local transmission'].sum())", "The number of countries with local transmissions cases is: 168\n" ], [ "print('The number of countries under investigation to determine the type of transmission is:',Transmission_type['Transmission Classification_Under investigation'].sum())", "The number of countries under investigation to determine the type of transmission is: 5\n" ], [ "WorldPopulation = pd.read_csv('Data/WorldPopulation.csv')\ndf['Population'] = 0\nfor i in range (0,len(df)):\n pop = WorldPopulation.loc[WorldPopulation.loc[:,'Country/Territory']==df.loc[i,'Country/Territory']]\n if pop.empty == True:\n df.loc[i,'Population'] = 0\n else:\n df.loc[i,'Population'] = pop.iloc[0,1]\nfor i in range (0,len(df)):\n if df.loc[i,'Population'] != 0:\n df.loc[i,'Population Contaminated %'] = df.loc[i,'Total Confirmed']/df.loc[i,'Population']*100\n else:\n df.loc[i,'Population Contaminated %'] = 0\ndfPopContaminated = df.sort_values(by=['Population Contaminated %'],ascending=False)\n\nminimum_number_cases = 1 #define the minimum number of cases here/defina o número mínimo de casos aqui\n\ndfPopMinNumCases = dfPopContaminated[dfPopContaminated['Total Confirmed'] > minimum_number_cases]\ndfPopMinNumCases = dfPopMinNumCases.reset_index(drop=True)\ndfPopMinNumCases.index = np.arange(1, (len(dfPopMinNumCases)+1))\ntop15_contaminated = dfPopMinNumCases[0:15]\ncontamination_rate = top15_contaminated.round({'Population Contaminated %':4})\ncontamination_rate = contamination_rate['Population Contaminated %']", "_____no_output_____" ], [ "#seaborn defaults\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Population Contaminated %',y='Country/Territory', data=top15_contaminated ,\n label=\"Deaths\", color=\"navy\")\nplt.title('Cases Confirmed per Number of Habitants per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Cases Confirmed per Number of Habitants per Country [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\n\nfor i in range (0,15):\n plt.text(x=contamination_rate.iloc[i]+0.03, y=i , s=contamination_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_ContaminationPerCountry.png', bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "minimum_number_cases = 1000 #define the minimum number of cases here/defina o número mínimo de casos aqui\n\ndfPopMinNumCases = dfPopContaminated[dfPopContaminated['Total Confirmed'] > minimum_number_cases]\ndfPopMinNumCases = dfPopMinNumCases.reset_index(drop=True)\ndfPopMinNumCases.index = np.arange(1, (len(dfPopMinNumCases)+1))\ntop15_contaminated = dfPopMinNumCases[0:15]\ncontamination_rate = top15_contaminated.round({'Population Contaminated %':4})\ncontamination_rate = contamination_rate['Population Contaminated %']\n#seaborn defaults\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Population Contaminated %',y='Country/Territory', data=top15_contaminated ,\n label=\"Deaths\", color=\"navy\")\nplt.title('Cases Confirmed per Number of Habitants per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Cases Confirmed per Number of Habitants per Country [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\n\nfor i in range (0,15):\n plt.text(x=contamination_rate.iloc[i]+0.03, y=i , s=contamination_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_ContaminationPerCountry1kCases.png', bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "for i in range (0,len(df)):\n if df.loc[i,'Population'] != 0:\n df.loc[i,'Population Death Rate %'] = df.loc[i,'Total Deaths']/df.loc[i,'Population']*100\n else:\n df.loc[i,'Population Death Rate %'] = 0\ndfPopDeathRate = df.sort_values(by=['Population Death Rate %'],ascending=False)\n\nminimum_number_cases = 1 #define the minimum number of cases here/defina o número mínimo de casos aqui\n\ndfPopMinNumCases = dfPopDeathRate[dfPopDeathRate['Total Confirmed'] > minimum_number_cases]\ndfPopMinNumCases = dfPopMinNumCases.reset_index(drop=True)\ndfPopMinNumCases.index = np.arange(1, (len(dfPopMinNumCases)+1))\ntop15_PopDeathRate = dfPopMinNumCases[0:15]\npopDeath_rate = top15_PopDeathRate.round({'Population Death Rate %':4})\npopDeath_rate = popDeath_rate['Population Death Rate %']\n\n#seaborn defaults\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Population Death Rate %',y='Country/Territory', data=top15_PopDeathRate ,\n label=\"Deaths\", color=\"navy\")\nplt.title('Death rate per Number of Habitants per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Death rate per Number of Habitants per Country [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\n\nfor i in range (0,15):\n plt.text(x=popDeath_rate.iloc[i]+0.003, y=i , s=popDeath_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_DeathRateinPopPerCountryCases.png', bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "minimum_number_cases = 1000 #define the minimum number of cases here/defina o número mínimo de casos aqui\n\ndfPopMinNumCases = dfPopDeathRate[dfPopDeathRate['Total Confirmed'] > minimum_number_cases]\ndfPopMinNumCases = dfPopMinNumCases.reset_index(drop=True)\ndfPopMinNumCases.index = np.arange(1, (len(dfPopMinNumCases)+1))\ntop15_PopDeathRate = dfPopMinNumCases[0:15]\npopDeath_rate = top15_PopDeathRate.round({'Population Death Rate %':4})\npopDeath_rate = popDeath_rate['Population Death Rate %']\n\n#seaborn defaults\nf, ax = plt.subplots(figsize=(15, 12))\nsns.set(style=\"whitegrid\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Population Death Rate %',y='Country/Territory', data=top15_PopDeathRate ,\n label=\"Deaths\", color=\"navy\")\nplt.title('Death rate per Number of Habitants per Country (03/04/2020)',fontsize=25)\nplt.xlabel('Death rate per Number of Habitants per Country [%]',fontsize=18)\nplt.ylabel('Country',fontsize=18)\n\nfor i in range (0,15):\n plt.text(x=popDeath_rate.iloc[i]+0.001, y=i , s=popDeath_rate.iloc[i],horizontalalignment='center',verticalalignment='center', fontsize=16)\nplt.savefig('Graphs/20200403_DeathRateinPopPerCountry1kCases.png', bbox_inches='tight')\nplt.show()", "_____no_output_____" ], [ "#seaborn defaults\nconfirmedPop = np.array(top15_PopDeathRate['Total Confirmed'])\ndeathsPop = np.array(top15_PopDeathRate['Total Deaths'])\nsns.set(style=\"whitegrid\")\n\nf, ax = plt.subplots(figsize=(15, 6))\n\n\nsns.set_color_codes(\"pastel\")\nsns.barplot(x='Total Confirmed',y='Country/Territory', data=top15_PopDeathRate,\n label=\"Confirmed\", color=\"yellow\")\nsns.set_color_codes(\"muted\")\nsns.barplot(x='Total Deaths',y='Country/Territory', data=top15_PopDeathRate ,\n label=\"Deaths\", color=\"red\")\n\n\nplt.title('Total Number of Cases/Deaths in the Top15 Death Rate per Number of Habitants Countries (03/04/2020)',fontsize=18)\nax.legend(ncol=2, loc=\"upper right\", frameon=True)\nax.set(ylabel=\"Countries/Territory\",\n xlabel=\"Cases/Deaths\")\nfor i in range (0,15):\n plt.text(x=deathsPop[i]+2500, y=i , s=deathsPop[i],horizontalalignment='center',verticalalignment='center', color='blue',fontsize=14)\n plt.text(x=confirmedPop[i]+10000, y=i , s=confirmedPop[i],horizontalalignment='center',verticalalignment='center', fontsize=14)\nsns.despine(left=True, bottom=True)\nplt.savefig('Graphs/20200403_TotalNumberCasesDeathsPop.png', bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a67db32e9b7a811cd9964bd28625d726c9bccf7
498,636
ipynb
Jupyter Notebook
07-Assignment/Solution/Solution.ipynb
hritik5102/SHALA2020
f0c74cd6718b51ddeffc9a8cda7c30d3cd78dcd3
[ "MIT" ]
5
2020-05-10T15:43:11.000Z
2022-03-02T00:15:36.000Z
07-Assignment/Solution/Solution.ipynb
Sankalp679/SHALA2020
d9e596346b396acde33f2965f6f39f7aefcd7188
[ "MIT" ]
null
null
null
07-Assignment/Solution/Solution.ipynb
Sankalp679/SHALA2020
d9e596346b396acde33f2965f6f39f7aefcd7188
[ "MIT" ]
5
2020-05-10T17:51:14.000Z
2020-06-05T15:12:11.000Z
855.29331
244,522
0.94926
[ [ [ "#Gaussian bayes classifier\n\nIn this assignment we will use a Gaussian bayes classfier to classify our data points.", "_____no_output_____" ], [ "# Import packages", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\nfrom sklearn.metrics import classification_report\nfrom matplotlib import cm", "_____no_output_____" ] ], [ [ "# Load training data\n\nOur data has 2D feature $x1, x2$. Data from the two classes is are in $\\texttt{class1_train}$ and $\\texttt{class2_train}$ respectively. Each file has two columns corresponding to the 2D feature.", "_____no_output_____" ] ], [ [ "class1_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class1_train').to_numpy()\nclass2_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class2_train').to_numpy()", "_____no_output_____" ] ], [ [ "# Visualize training data\nGenerate 2D scatter plot of the training data. Plot the points from class 1 in red and the points from class 2 in blue.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,10))\nplt.scatter(class1_train[:,0], class1_train[:,1], color = 'red', label = 'Class 1')\nplt.scatter(class2_train[:,0], class2_train[:,1], color = 'blue', label = 'Class 2')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.legend(loc = 'best')\nplt.show()", "_____no_output_____" ] ], [ [ "# Maximum likelihood estimate of parameters\n\nWe will model the likelihood, $P(\\mathbf{x}|C_1)$ and $P(\\mathbf{x}|C_2)$ as $\\mathcal{N}(\\mathbf{\\mu_1},|\\Sigma_1)$ and $\\mathcal{N}(\\mathbf{\\mu_2},|\\Sigma_2)$ respectively. The prior probability of the classes are called, $P(C_1)=\\pi_1$ and $P(C_2)=\\pi_2$.\n\nThe maximum likelihood estimate of the parameters as follows:\n\\begin{align*}\n\\pi_k &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}{N}\\\\\n\\mathbf{\\mu_k} &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)\\mathbf{x}^i}{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}\\\\\n\\Sigma_k &= \\frac{\\sum_{i=1}^N \\mathbb{1}(t^i=k)(\\mathbf{x}^i-\\mathbf{\\mu_k})(\\mathbf{x}^i-\\mathbf{\\mu_k})^T}{\\sum_{i=1}^N \\mathbb{1}(t^i=k)}\\\\\n\\end{align*}\n\nHere, $t^i$ is the target or class of $i^{th}$ sample. $\\mathbb{1}(t^i=k)$ is 1 if $t^i=k$ and 0 otherwise.\n\nCompute maximum likelihood values estimates of $\\pi_1$, $\\mu_1$, $\\Sigma_1$ and $\\pi_2$, $\\mu_2$, $\\Sigma_2$ \n\nAlso print these values\n", "_____no_output_____" ] ], [ [ "n1, n2 = class1_train.shape[0], class2_train.shape[0]\npi1, pi2 = n1/(n1+n2), n2/(n1+n2)\n\nmu1 = np.mean(class1_train, axis = 0)\nmu2 = np.mean(class2_train, axis = 0)\n\n# ------------------ sigma -------------------- #\nXT = (class1_train-mu1).reshape(n1,1,2)\nX = (class1_train-mu1).reshape(n1,2,1)\nsigma1 = np.matmul(X,XT).mean(axis = 0)\n\nXT = (class2_train-mu2).reshape(n2,1,2)\nX = (class2_train-mu2).reshape(n2,2,1)\nsigma2 = np.matmul(X,XT).mean(axis = 0)\n\nprint(' pi1 = {}\\n mu1 = {}\\n sigma1 = \\n{}\\n'.format(pi1, mu1, sigma1))\n\nprint(' pi2 = {}\\n mu2 = {}\\n sigma2 = \\n{}\\n'.format(pi2, mu2, sigma2))", " pi1 = 0.8040201005025126\n mu1 = [0.96998989 1.02894917]\n sigma1 = \n[[0.95527085 0.07775973]\n [0.07775973 0.81591945]]\n\n pi2 = 0.19597989949748743\n mu2 = [-1.02482819 -0.91492055]\n sigma2 = \n[[1.16715324 0.46947177]\n [0.46947177 0.91362912]]\n\n" ] ], [ [ "# Alternate approach", "_____no_output_____" ] ], [ [ "sigma1 = np.cov((class1_train-mu1).T, bias='True')\nsigma2 = np.cov((class2_train-mu2).T, bias='True')\nprint(sigma1)\nprint(sigma2)", "[[0.95527085 0.07775973]\n [0.07775973 0.81591945]]\n[[1.16715324 0.46947177]\n [0.46947177 0.91362912]]\n" ] ], [ [ "# Another alternate", "_____no_output_____" ] ], [ [ "XT = (class1_train-mu1).T\nX = (class1_train-mu1)\nsigma1 = np.matmul(XT,X)/n1\n\nXT = (class2_train-mu2).T\nX = (class2_train-mu2)\nsigma2 = np.matmul(XT,X)/n2\nprint(sigma1)\nprint(sigma2)", "[[0.95527085 0.07775973]\n [0.07775973 0.81591945]]\n[[1.16715324 0.46947177]\n [0.46947177 0.91362912]]\n" ] ], [ [ "# Visualize the likelihood\nNow that you have the parameters, let us visualize how the likelihood looks like.\n\n1. Use $\\texttt{np.mgrid}$ to generate points uniformly spaced in -5 to 5 along 2 axes\n1. Use $\\texttt{multivariate_normal.pdf}$ to get compute the Gaussian likelihood for each class \n1. Use $\\texttt{plot_surface}$ to plot the likelihood of each class.\n1. Use $\\texttt{contourf}$ to plot the likelihood of each class. \n\nFor the plots, use $\\texttt{cmap=cm.Reds}$ for class 1 and $\\texttt{cmap=cm.Blues}$ for class 2. Use $\\texttt{alpha=0.5}$ to overlay both plots together.", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "x, y = np.mgrid[-5:5:.01, -5:5:.01]\npos = np.empty(x.shape + (2,))\npos[:, :, 0] = x; pos[:, :, 1] = y\nrv1 = multivariate_normal(mean = mu1, cov = sigma1)\nrv2 = multivariate_normal(mean = mu2, cov = sigma2)\n# plt.plot(x,y,likelihood1.pdf(pos), coo = 'red')\n\nlikelihood1 = rv1.pdf(pos)\nlikelihood2 = rv2.pdf(pos)\n\nfig = plt.figure(figsize=(20,10))\nax = fig.add_subplot(121, projection='3d')\nplt.title('Likelihood')\nax.plot_surface(x,y,likelihood1, cmap=cm.Reds, alpha = 0.5)\nax.plot_surface(x,y,likelihood2, cmap=cm.Blues, alpha = 0.5)\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.subplot(122)\nplt.title('Contour plot of likelihood')\nplt.contourf(x, y, likelihood1, cmap=cm.Reds, alpha = 0.5)\nplt.contourf(x, y, likelihood2, cmap=cm.Blues, alpha = 0.5)\nplt.xlabel('x1')\nplt.ylabel('x2')", "_____no_output_____" ] ], [ [ "#Visualize the posterior\nUse the prior and the likelihood you've computed to obtain the posterior distribution for each class.\n\nLike in the case of the likelihood above, make same similar surface and contour plots for the posterior.", "_____no_output_____" ] ], [ [ "posterior1 = likelihood1*pi1/(likelihood1*pi1+likelihood2*pi2)\nposterior2 = likelihood2*pi2/(likelihood1*pi1+likelihood2*pi2)\nfig = plt.figure(figsize=(20,10))\nax = fig.add_subplot(121, projection='3d')\nplt.title('Posterior')\nax.plot_surface(x,y,posterior1, cmap=cm.Reds, alpha = 0.5)\nax.plot_surface(x,y,posterior2, cmap=cm.Blues, alpha = 0.5)\nplt.xlabel('x1')\nplt.ylabel('x2')\nplt.subplot(122)\nplt.title('Contour plot of Posterior')\nplt.contourf(x, y, posterior1, cmap=cm.Reds, alpha = 0.5)\nplt.contourf(x, y, posterior2, cmap=cm.Blues, alpha = 0.5)\nplt.xlabel('x1')\nplt.ylabel('x2')", "_____no_output_____" ] ], [ [ "# Decision boundary\n1. Decision boundary can be obtained by $P(C_2|x)>P(C_1|x)$ in python. Use $\\texttt{contourf}$ to plot the decision boundary. Use $\\texttt{cmap=cm.Blues}$ and $\\texttt{alpha=0.5}$\n1. Also overlay the scatter plot of train data points from the 2 classes on the same plot. Use red color for class 1 and blue color for class 2 ", "_____no_output_____" ] ], [ [ "decision = posterior2>posterior1\nplt.figure(figsize=(10,10))\nplt.contourf(x, y, decision, cmap=cm.Blues, alpha = 0.5)\nplt.scatter(class1_train[:,0], class1_train[:,1], color = 'red', label = 'Class 1')\nplt.scatter(class2_train[:,0], class2_train[:,1], color = 'blue', label = 'Class 2')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.legend(loc = 'best')\nplt.show()", "_____no_output_____" ] ], [ [ "# Test Data\nNow let's use our trained model to classify test data points\n\n1. $\\texttt{test_data}$ contains the $x1,x2$ features of different data points\n1. $\\texttt{test_label}$ contains the true class of the data points. 0 means class 1. 1 means class 2. \n1. Classify the test points based on whichever class has higher posterior probability for each data point\n1. Use $\\texttt{classification_report}$ to test the classification performance", "_____no_output_____" ] ], [ [ "test = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/test').to_numpy()\ntest_data, test_label = test[:,:2], test[:,2]\n\n# classfication\nl1 = pi1*rv1.pdf(test_data)\nl2 = pi2*rv2.pdf(test_data)\nden = l1+l2\nl1 /= den\nl2 /= den\n\ntest_decision = l2>l1\n\nprint(classification_report(test_label, test_decision))", " precision recall f1-score support\n\n 0.0 0.93 0.93 0.93 40\n 1.0 0.67 0.67 0.67 9\n\n accuracy 0.88 49\n macro avg 0.80 0.80 0.80 49\nweighted avg 0.88 0.88 0.88 49\n\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a67e1f1e6a9aed08daf50631a676b079044dbf4
15,691
ipynb
Jupyter Notebook
S02 - Data Wrangling/BLU02 - Advanced Wrangling/Learning Notebook - Part 2 of 3 - Combining Dataframes.ipynb
jtiagosg/batch3-students
5eb94bee46625881e9470da2b137aaa0f6cf7912
[ "MIT" ]
12
2019-07-06T09:06:17.000Z
2020-11-13T00:58:42.000Z
S02 - Data Wrangling/BLU02 - Advanced Wrangling/Learning Notebook - Part 2 of 3 - Combining Dataframes.ipynb
Daniel3424/batch3-students
10c46963e51ce974837096ad06a8c134ed4bcd8a
[ "MIT" ]
29
2019-07-01T14:19:49.000Z
2021-03-24T13:29:50.000Z
S02 - Data Wrangling/BLU02 - Advanced Wrangling/Learning Notebook - Part 2 of 3 - Combining Dataframes.ipynb
Daniel3424/batch3-students
10c46963e51ce974837096ad06a8c134ed4bcd8a
[ "MIT" ]
36
2019-07-05T15:53:35.000Z
2021-07-04T04:18:02.000Z
27.053448
179
0.553884
[ [ [ "# BLU02 - Learning Notebook - Data wrangling workflows - Part 2 of 3", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nimport pandas as pd\nimport os", "_____no_output_____" ] ], [ [ "# 2 Combining dataframes in Pandas\n\n## 2.1 How many programs are there per season?\n\nHow many different programs does the NYP typically present per season?\n\nPrograms are under `/data/programs/` which contains a file per Season.\n\n### Concatenate\n\nTo analyze how many programs there are per season, over time, we need a single dataframe containing *all* seasons.\n\nConcatenation means, in short, to unite multiple dataframes (or series) in one. \n\nThe `pd.concat()` function performs concatenation operations along an axis (`axis=0` for index and `axis=1` for columns).", "_____no_output_____" ] ], [ [ "season_0 = pd.read_csv('./data/programs/1842-43.csv')\nseason_1 = pd.read_csv('./data/programs/1843-44.csv')\n\nseasons = [season_0, season_1]\npd.concat(seasons, axis=1)", "_____no_output_____" ] ], [ [ "Concatenating like this makes no sense, as we no longer have a single observation per row.\n\nWhat we want to do instead is to concatenate the dataframe along the index.", "_____no_output_____" ] ], [ [ "pd.concat(seasons, axis=0)", "_____no_output_____" ] ], [ [ "This dataframe looks better, but there's something weird with the index: it's not unique anymore.\n\nDifferent observations share the same index. Not cool.\n\nFor dataframes that don't have a meaningful index, you may wish to ignore the indexes altogether.", "_____no_output_____" ] ], [ [ "pd.concat(seasons, axis=0, ignore_index=True)", "_____no_output_____" ] ], [ [ "Now, let's try something different. \n\nLet's try to change the name of the columns, so that each dataframe has different ones, before concatenating.", "_____no_output_____" ] ], [ [ "season_0_ = season_0.copy()\nseason_0_.columns = [0, 1, 2, 'Season']\nseasons_ = [season_0_, season_1]\npd.concat(seasons_, axis=0)", "_____no_output_____" ] ], [ [ "What a mess! What did we learn?\n\n* When the dataframes have different columns, `pd.concat()` will take the union of all dataframes by default (no information loss)\n* Concatenation will fill columns that are not present for specific dataframes with `np.NaN` (missing values).\n\nThe good news is that you can set how you want to glue the dataframes in regards to the other axis, the one not being concatenated. \n\nSetting `join='inner'` will take the intersection, i.e., the columns that are present in all dataframes.", "_____no_output_____" ] ], [ [ "pd.concat(seasons_, axis=0, join='inner')", "_____no_output_____" ] ], [ [ "There you go. Concatenation complete.", "_____no_output_____" ], [ "### Append\n\nThe method `df.append()` is a shortcut for `pd.concat()`, that can be called on either a `pd.DataFrame` or a `pd.Series`.", "_____no_output_____" ] ], [ [ "season_0.append(season_1)", "_____no_output_____" ] ], [ [ "It can take multiple objects to concatenate as well. Please note the `ignore_index=True`.", "_____no_output_____" ] ], [ [ "season_2 = pd.read_csv('./data/programs/1844-45.csv')\n\nmore_seasons = [season_1, season_2]\nseason_0.append(more_seasons, ignore_index=True)", "_____no_output_____" ] ], [ [ "We are good to go. Let's use `pd.concat` to combine all seasons into a great dataframe.", "_____no_output_____" ] ], [ [ "def read_season(file):\n path = os.path.join('.', 'data', 'programs', file)\n return pd.read_csv(path)\n\nfiles = os.listdir('./data/programs/')\nfiles = [f for f in files if '.csv' in f]", "_____no_output_____" ] ], [ [ "A logical approach would be to iterate over all files and appending all of them to a single dataframe.", "_____no_output_____" ] ], [ [ "%%timeit\n\nprograms = pd.DataFrame()\nfor file in files:\n season = read_season(file)\n programs = programs.append(season, ignore_index=True)", "_____no_output_____" ] ], [ [ "It is worth noting that both `pd.concat()` and `df.append()` make a full copy of the data and continually reusing this function can create a significant performance hit. \n\nInstead, use a list comprehension if you need to use the operation several times. \n\nThis way, you only call `pd.concat()` or `df.append()` once.", "_____no_output_____" ] ], [ [ "%%timeit\n\nseasons = [read_season(f) for f in files if '.csv' in f]\nprograms = pd.concat(seasons, axis=0, ignore_index=True)", "_____no_output_____" ], [ "seasons = [read_season(f) for f in files if '.csv' in f]\nprograms = pd.concat(seasons, axis=0, ignore_index=True)", "_____no_output_____" ] ], [ [ "Now that we have the final `programs` dataframe, we can see how the number of distinct programs changes over time.", "_____no_output_____" ] ], [ [ "programs['Season'] = pd.to_datetime(programs['Season'].str[:4])\n\n(programs.groupby('Season')\n .size()\n .plot(legend=False, use_index=True, figsize=(10, 7),\n title='Number of programs per season (from 1842-43 to 2016-17)'));", "_____no_output_____" ] ], [ [ "The NYP appears to be investing in increasing the number of distinct programs per season since '95. ", "_____no_output_____" ], [ "## 2.2 How many concerts are there per season?\n\nWhat about the number of concerts? The first thing we need to do is to import the `concerts.csv` data.", "_____no_output_____" ] ], [ [ "concerts = pd.read_csv('./data/concerts.csv')\nconcerts.head()", "_____no_output_____" ] ], [ [ "We will use the Leon Levy Digital Archives ID (`GUID`) to identify each program.\n\nNow, we have information regarding all the concerts that took place and the season for each program.\n\nThe problem? Information about the concert and the season are in different tables, and the program is the glue between the two. Familiar?\n\n### Merge\n\nPandas provides high-performance join operations, very similar to SQL.\n\nThe method `df.merge()` method provides an interface for all database-like join methods.", "_____no_output_____" ] ], [ [ "?pd.merge", "_____no_output_____" ] ], [ [ "We can call `pd.merge` to join both tables on the `GUID` (and the `ProgramID`, that provides similar info).", "_____no_output_____" ] ], [ [ "# Since GUID and ProgramID offer similar info, we will drop the later.\nprograms = programs.drop(columns='ProgramID')\n\ndf = pd.merge(programs, concerts, on='GUID')\ndf.head()", "_____no_output_____" ] ], [ [ "Or, alternatively, we can call `merge()` directly on the dataframe.", "_____no_output_____" ] ], [ [ "df_ = programs.merge(concerts, on='GUID')\ndf_.head()", "_____no_output_____" ] ], [ [ "The critical parameter here is the `how`. Since we are not explicitly using it, the merge default to `inner` (for inner-join) by default.\n\nBut, in fact, you can use any join, just like you did in SQL: `left`, `right`, `outer` and `inner`.\n\nRemember?\n\n![](../media/types_of_joins.jpg)\n\n*Fig. 1 - Types of joins in SQL, note how left, right, outer and inner translate directly to Pandas.*\n\nA refresher on different types of joins, all supported by Pandas:\n\n| Pandas | SQL | What it does |\n| ---------------------------------------------- | ---------------- | ----------------------------------------- |\n| `pd.merge(right, left, on='key', how='left')` | LEFT OUTER JOIN | Use all keys from left frame only |\n| `pd.merge(right, left, on='key', how='right')` | RIGHT OUTER JOIN | Use all keys from right frame only |\n| `pd.merge(right, left, on='key', how='outer')` | FULL OUTER JOIN | Use union of keys from both frames |\n| `pd.merge(right, left, on='key', how='inner')` | INNER JOIN | Use intersection of keys from both frames |\n\nIn this particular case, we have:\n* A one-to-many relationship (i.e., one program to many concerts)\n* Since every single show in `concerts` has a match in `programs`, the type of join we use doesn't matter.\n\nWe can use the `validate` argument to automatically check whether there are unexpected duplicates in the merge keys and check their uniqueness.", "_____no_output_____" ] ], [ [ "df__ = pd.merge(programs, concerts, on='GUID', how='outer', validate=\"one_to_many\")\nassert(concerts.shape[0] == df_.shape[0] == df__.shape[0])", "_____no_output_____" ] ], [ [ "Back to our question, how is the number of concerts per season evolving?", "_____no_output_____" ] ], [ [ "(programs.merge(concerts, on='GUID')\n .groupby('Season')\n .size()\n .plot(legend=False, use_index=True, figsize=(10, 7),\n title='Number of concerts per season (from 1842-43 to 2016-17)'));", "_____no_output_____" ] ], [ [ "Likewise, the number of concerts seems to be trending upwards since about 1995, which could be a sign of growing interest in the genre.\n\n### Join\n\nNow, we want the top-3 composer in total appearances.\n\nWithout surprise, we start by importing `works.csv`.", "_____no_output_____" ] ], [ [ "works = pd.read_csv('./data/works.csv',index_col='GUID')", "_____no_output_____" ] ], [ [ "Alternatively, we can use `df.join()` instead of `df.merge()`. \n\nThere are, however, differences in the default behavior: for example `df.join` uses `how='left'` by default.", "_____no_output_____" ], [ "Let's try to perform the merge.", "_____no_output_____" ] ], [ [ "(programs.merge(works, on=\"GUID\")\n .head(n=3))", "_____no_output_____" ], [ "programs.merge(works, on=\"GUID\").shape", "_____no_output_____" ], [ "(programs.join(works, on='GUID')\n .head(n=3))\n\n# equivalent to\n# pd.merge(programs, works, left_on='GUID', right_index=True,\n# how='left').head(n=3)", "_____no_output_____" ], [ "programs.join(works, on=\"GUID\").shape", "_____no_output_____" ] ], [ [ "We noticed that the shape of the results is diferent, we have a different number of lines in each one of the methods.\nTypically, you would use `df.join()` when you want to do a left join or when you want to join on the index of the dataframe on the right.\n\nNow for our goal: what are the top-3 composers?", "_____no_output_____" ] ], [ [ "(programs.join(works, on='GUID')\n .groupby('ComposerName')\n .size()\n .nlargest(n=3))", "_____no_output_____" ] ], [ [ "Wagner wins!\n\nWhat about the top-3 works?", "_____no_output_____" ] ], [ [ "(programs.join(works, on='GUID')\n .groupby(['ComposerName', 'WorkTitle'])\n .size()\n .nlargest(n=3))", "_____no_output_____" ] ], [ [ "Wagner wins three times!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a67e8c9648e0e08c97627e5a994d4485cc3af20
5,824
ipynb
Jupyter Notebook
lab10/decomposition/plot_faces_decomposition.ipynb
cruxiu/MLStudies
2b0a9ac7dbede4200080666dfdcba6a2f65f93af
[ "MIT" ]
1
2019-08-22T01:35:16.000Z
2019-08-22T01:35:16.000Z
lab10/decomposition/plot_faces_decomposition.ipynb
cruxiu/MLStudies
2b0a9ac7dbede4200080666dfdcba6a2f65f93af
[ "MIT" ]
null
null
null
lab10/decomposition/plot_faces_decomposition.ipynb
cruxiu/MLStudies
2b0a9ac7dbede4200080666dfdcba6a2f65f93af
[ "MIT" ]
null
null
null
107.851852
4,588
0.593578
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Faces dataset decompositions\n\n\nThis example applies to `olivetti_faces` different unsupervised\nmatrix decomposition (dimension reduction) methods from the module\n:py:mod:`sklearn.decomposition` (see the documentation chapter\n`decompositions`) .\n\n\n", "_____no_output_____" ] ], [ [ "print(__doc__)\n\n# Authors: Vlad Niculae, Alexandre Gramfort\n# License: BSD 3 clause\n\nimport logging\nfrom time import time\n\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn import decomposition\n\n# Display progress logs on stdout\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\nrng = RandomState(0)\n\n# #############################################################################\n# Load faces data\ndataset = fetch_olivetti_faces(shuffle=True, random_state=rng)\nfaces = dataset.data\n\nn_samples, n_features = faces.shape\n\n# global centering\nfaces_centered = faces - faces.mean(axis=0)\n\n# local centering\nfaces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)\n\nprint(\"Dataset consists of %d faces\" % n_samples)\n\n\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row):\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,\n interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)\n\n# #############################################################################\n# List of the different estimators, whether to center and transpose the\n# problem, and whether the transformer uses the clustering API.\nestimators = [\n ('Eigenfaces - PCA using randomized SVD',\n decomposition.PCA(n_components=n_components, svd_solver='randomized',\n whiten=True),\n True),\n\n ('Non-negative components - NMF',\n decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),\n False),\n\n ('Independent components - FastICA',\n decomposition.FastICA(n_components=n_components, whiten=True),\n True),\n\n ('Sparse comp. - MiniBatchSparsePCA',\n decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,\n n_iter=100, batch_size=3,\n random_state=rng),\n True),\n\n ('MiniBatchDictionaryLearning',\n decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,\n n_iter=50, batch_size=3,\n random_state=rng),\n True),\n\n ('Cluster centers - MiniBatchKMeans',\n MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,\n max_iter=50, random_state=rng),\n True),\n\n ('Factor Analysis components - FA',\n decomposition.FactorAnalysis(n_components=n_components, max_iter=2),\n True),\n]\n\n\n# #############################################################################\n# Plot a sample of the input data\n\nplot_gallery(\"First centered Olivetti faces\", faces_centered[:n_components])\n\n# #############################################################################\n# Do the estimation and plot it\n\nfor name, estimator, center in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n t0 = time()\n data = faces\n if center:\n data = faces_centered\n estimator.fit(data)\n train_time = (time() - t0)\n print(\"done in %0.3fs\" % train_time)\n if hasattr(estimator, 'cluster_centers_'):\n components_ = estimator.cluster_centers_\n else:\n components_ = estimator.components_\n\n # Plot an image representing the pixelwise variance provided by the\n # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,\n # via the PCA decomposition, also provides a scalar noise_variance_\n # (the mean of pixelwise variance) that cannot be displayed as an image\n # so we skip it.\n if (hasattr(estimator, 'noise_variance_') and\n estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case\n plot_gallery(\"Pixelwise variance\",\n estimator.noise_variance_.reshape(1, -1), n_col=1,\n n_row=1)\n plot_gallery('%s - Train time %.1fs' % (name, train_time),\n components_[:n_components])\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
4a67f89b5a2af694fdd4e8cb1e8fdb9f2aa03de3
180,292
ipynb
Jupyter Notebook
notebooks/logistic regression/Sido0-Levin.ipynb
JaworWr/MLAcceleration
ef0e0661389782b0caeec9137b3d4ddd84643d2c
[ "MIT" ]
null
null
null
notebooks/logistic regression/Sido0-Levin.ipynb
JaworWr/MLAcceleration
ef0e0661389782b0caeec9137b3d4ddd84643d2c
[ "MIT" ]
null
null
null
notebooks/logistic regression/Sido0-Levin.ipynb
JaworWr/MLAcceleration
ef0e0661389782b0caeec9137b3d4ddd84643d2c
[ "MIT" ]
null
null
null
554.744615
97,048
0.947341
[ [ [ "import sys\nsys.path.insert(0, \"../..\")", "_____no_output_____" ], [ "import numpy as np\nimport torch\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\nfrom logistic_regression import LogisticRegressionGD\nfrom extrapolation import *\nfrom experiments import Experiment", "_____no_output_____" ], [ "device = \"cuda\"\n\nX = []\nwith open(\"../../../sido0_train.data\") as f:\n for line in f:\n X.append([float(x) for x in line.split()])\nX = np.array(X)\nX = X[:11000]\nX /= X.max()\nX = np.concatenate([np.ones((X.shape[0], 1)), X], axis=1)\nX = torch.tensor(X, device=device)\nwith open(\"../../../sido0_train.targets\") as f:\n y = [int(x) for x in f]\ny = y[:11000]\ny = torch.tensor(y, device=device)\nX.shape, y.shape", "_____no_output_____" ], [ "np.random.seed(2020)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "_____no_output_____" ], [ "model = LogisticRegressionGD(X_train, y_train, 1e-3, device=\"cuda\")", "_____no_output_____" ], [ "model.fit(1e-5, max_iter=20500)", "_____no_output_____" ], [ "len(model.log)", "_____no_output_____" ], [ "preds = model.predict(X_test)\ntorch.mean((preds == y_test).double())", "_____no_output_____" ], [ "model.to(\"cpu\")", "_____no_output_____" ], [ "experiment = Experiment(model.log, model.obj, values=model.value_log)\nk = 6", "_____no_output_____" ], [ "experiment.run_method(\"VLT\", h_algorithm, k+1, method_kwargs={\"k\": k, \"type\": \"t\"})", "_____no_output_____" ], [ "experiment.run_method(\"VLU\", h_algorithm, k+1, method_kwargs={\"k\": k, \"type\": \"u\"})", "_____no_output_____" ], [ "experiment.run_method(\"VLV\", h_algorithm, k+2, method_kwargs={\"k\": k, \"type\": \"v\"})", "_____no_output_____" ], [ "experiment.run_method(\"SLT\", e_algorithm, k+1, method_kwargs={\"k\": k, \"type\": \"t\"})", "_____no_output_____" ], [ "experiment.run_method(\"SLU\", e_algorithm, k+1, method_kwargs={\"k\": k, \"type\": \"u\"})", "_____no_output_____" ], [ "experiment.run_method(\"SLV\", e_algorithm, k+2, method_kwargs={\"k\": k, \"type\": \"v\"})", "_____no_output_____" ], [ "experiment.run_method(\"RRE+QR\", RRE, k, method_kwargs={\"qr\": True})", "_____no_output_____" ], [ "plt.figure(figsize=(14, 8))\nexperiment.plot_values(n=6000)\nplt.ylim(0, 1000)\nplt.legend()", "_____no_output_____" ], [ "plt.figure(figsize=(14, 8))\nexperiment.plot_log_diff(n=6000)\nplt.ylim(1, 4)\nplt.legend()", "_____no_output_____" ], [ "df = experiment.value_df()\ndf.to_csv(\"results/sido0-levin.csv\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a67ff9270a25483590c3a63028349cf2380b549
40,598
ipynb
Jupyter Notebook
Notebooks/Step_3_get_Grad_Rates.ipynb
NobleNetworkCharterSchools/annual-counseling
b119b56e8b34131625d0de8d5c19058a5b938b18
[ "MIT" ]
null
null
null
Notebooks/Step_3_get_Grad_Rates.ipynb
NobleNetworkCharterSchools/annual-counseling
b119b56e8b34131625d0de8d5c19058a5b938b18
[ "MIT" ]
3
2020-08-17T16:44:52.000Z
2020-08-17T16:45:09.000Z
Notebooks/Step_3_get_Grad_Rates.ipynb
NobleNetworkCharterSchools/annual-counseling
b119b56e8b34131625d0de8d5c19058a5b938b18
[ "MIT" ]
1
2020-08-20T20:28:49.000Z
2020-08-20T20:28:49.000Z
40.39602
339
0.452338
[ [ [ "# In this step, we'll process graduation data from the federal files\n## In most cases, this is a straight \"pull\" from the data, but there are a few possible modifications:\n\n- If the sample is too small from the most recent year, use 3 years of data\n- For HBCUs, boost by 15%\n- For a handful of schools, adjust down to reflect the true Noble rate of success\n- Add in a handful of estimates", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport os\n\n# Edit these to reflect any changes\nwork_location = 'inputs'\ndirectory_file = 'hd2018.csv'\nbase_dir = 'base_dir.csv'\nnoble_attending = '../../raw_inputs/noble_attending.csv'\ngr_output = 'grad_rates.csv'\ngr_files = {'latest':'gr2018.csv',\n 'one_removed':'gr2017.csv',\n 'two_removed':'gr2016.csv'}\noutput_files = {'latest':'grad2018.csv',\n 'one_removed':'grad2017.csv',\n 'two_removed':'grad2016.csv'}", "_____no_output_____" ], [ "os.chdir(work_location)", "_____no_output_____" ], [ "# We'll use a dict to keep track of each grad rate file, reading in each one\nyears=['latest','one_removed','two_removed']\ngr_dfs = {}\nfor year in years:\n gr_dfs[year] = pd.read_csv(gr_files[year], index_col=['UNITID'],\n usecols=['UNITID', 'GRTYPE', 'GRTOTLT','GRBKAAT','GRHISPT'],\n na_values='.',\n dtype={'GRTOTLT':float,'GRBKAAT':float,'GRHISPT':float},\n encoding='latin-1')\n gr_dfs[year].rename(columns={'GRTOTLT':'Total','GRBKAAT':'Black','GRHISPT':'Hisp'}, inplace=True)\n gr_dfs[year]['AA_H']=gr_dfs[year].Black+gr_dfs[year].Hisp\ngr_dfs['latest'].head()", "_____no_output_____" ], [ "# We now have to sort through these GRTYPES:\n# 8 is the adjusted cohort for bachelor's seeking students (completions: 12=6yr, 13=4yr, 14=5yr; transfers=16)\n# 29 for associate's seeking (completions: 30=3yr 35=2yr; transfers=33)\n# We'll build a list of unitids that have both starting cohorts and completions for either one\nvalid_unitids = {}\nfor year in years:\n df = gr_dfs[year]\n valid_unitids[year] = list( (set(df[df['GRTYPE']==8].index) & set(df[df['GRTYPE']==12].index)) |\n (set(df[df['GRTYPE']==29].index) & set(df[df['GRTYPE']==30].index)) )\nprint('%d, %d' % (len(gr_dfs['latest']), len(valid_unitids['latest'])))", "51868, 3669\n" ], [ "# We'll use the basic \"hd\" directory to form the base of the final year output\ndef create_year_df(df, source_df1, source_df2):\n \"\"\"Apply function to pull the appropriate data into a single row per college\"\"\"\n ix = df.name\n if ix in source_df1.index:\n return source_df1.loc[ix][['Total','Black','Hisp','AA_H']]\n elif ix in source_df2.index:\n return source_df2.loc[ix][['Total','Black','Hisp','AA_H']]\n else:\n return [np.nan,np.nan,np.nan,np.nan]\n\nyear_dfs = {}\nfor year in years:\n dir_df = pd.read_csv(directory_file, index_col=['UNITID'],\n usecols=['UNITID','INSTNM'],encoding='latin-1')\n dir_df = dir_df[dir_df.index.isin(valid_unitids[year])]\n \n # First do the starts\n start1 = gr_dfs[year][gr_dfs[year].GRTYPE == 12]\n start2 = gr_dfs[year][gr_dfs[year].GRTYPE == 30]\n dir_df[['Cl_Total','Cl_Black','Cl_Hisp','Cl_AA_H']]=dir_df.apply(create_year_df,axis=1,result_type=\"expand\",\n args=(start1,start2))\n # Then do the completions\n start1 = gr_dfs[year][gr_dfs[year].GRTYPE == 8]\n start2 = gr_dfs[year][gr_dfs[year].GRTYPE == 29]\n dir_df[['St_Total','St_Black','St_Hisp','St_AA_H']]=dir_df.apply(create_year_df,axis=1,result_type=\"expand\",\n args=(start1,start2))\n # Next the transfers\n start1 = gr_dfs[year][gr_dfs[year].GRTYPE == 16]\n start2 = gr_dfs[year][gr_dfs[year].GRTYPE == 33]\n dir_df[['Xf_Total','Xf_Black','Xf_Hisp','Xf_AA_H']]=dir_df.apply(create_year_df,axis=1,result_type=\"expand\",\n args=(start1,start2))\n \n # Finally, calculated within year stats\n for type in ['Total','Black','Hisp','AA_H']:\n dir_df['GR_'+type]=dir_df['Cl_'+type]/dir_df['St_'+type]\n dir_df['Xfr_'+type]=dir_df['Xf_'+type]/dir_df['St_'+type]\n dir_df['CI_'+type]=np.sqrt(dir_df['GR_'+type]*(1-dir_df['GR_'+type])/dir_df['St_'+type])\n dir_df.replace(np.inf,np.nan)\n \n year_dfs[year]=dir_df.copy()\nyear_dfs['latest'].head()", "_____no_output_____" ], [ "# Here, we're just saving the one year files locally for reference\nfor yr in ['latest', 'one_removed', 'two_removed']:\n year_dfs[yr].to_csv(output_files[yr], na_rep=\"N/A\")", "_____no_output_____" ] ], [ [ "## The above code created three DFs for the most recent three years\n## Each DF has the in year counting stats and rates for graduation\n### Now we need create a final set of statistics based on these:\n- Adj6yrGrad (overall number after adjustments)\n- Adj6yrAAH (African American/Hispanic number after adjustments)\n- 6yrGrad (overall number, no adjustments)\n- 6yrAAH (AA/H no adjustments)\n- 6yrAA\n- 6yrH\n- Xfer\n- XferAAH\n- XferAA\n- XferH\n", "_____no_output_____" ] ], [ [ "# We'll start with reading some of the rows from the 'base_dir' created in the last step\ndir_df = pd.read_csv(base_dir, index_col=['UNITID'],\n usecols=['UNITID','INSTNM','Type','HBCU'],encoding='latin-1')\ndir_df.head()", "_____no_output_____" ], [ "# NOTE THAT THERE ARE YEAR REFERENCES IN THIS CODE THAT NEED TO BE UPDATED TOO\ndef bump15(x):\n \"\"\"Helper function to increase by 15% or half the distance to 100\"\"\"\n if x > .7:\n return x + (1-x)*.5\n else:\n return x + .15\n \ndef set_gradrates(df, year_dfs):\n \"\"\"Apply function to decide how to set the specific values specified above\"\"\"\n ix = df.name\n \n # First we see if there is actual data for the latest year\n if ix in year_dfs['latest'].index:\n ty = year_dfs['latest'].loc[ix]\n gr_source = '2018'\n gr_6yr,gr_6yr_aah,gr_6yr_aa,gr_6yr_h,xf,xf_aah,xf_aa,xf_h = ty.reindex(\n ['GR_Total','GR_AA_H','GR_Black','GR_Hisp','Xfr_Total','Xfr_AA_H','Xfr_Black','Xfr_Hisp'])\n \n # If there's data in the latest year, we'll check how robust and add in prior years if necessary\n ci, ci_aah = ty.reindex(['CI_Total','CI_AA_H'])\n # For HBCUs, we bump by the lesser of 15% or half the distance to 100%\n if (df.HBCU == 'Yes') and (ci_aah <= 0.04):\n adj_6yr = gr_6yr\n adj_6yr_aah = bump15(gr_6yr_aah)\n # Otherwise, add more years if the confidence intervals are too low\n elif (ci >0.015) or (ci_aah >0.05):\n calc_fields = ['Cl_Total','Cl_Black','Cl_Hisp','Cl_AA_H',\n 'St_Total','St_Black','St_Hisp','St_AA_H',\n 'Xf_Total','Xf_Black','Xf_Hisp','Xf_AA_H']\n calc_data = ty.reindex(calc_fields)\n \n if ix in year_dfs['one_removed'].index:\n gr_source = '2017-2018'\n ty=year_dfs['one_removed'].loc[ix]\n calc_data = calc_data+ty.reindex(calc_fields)\n \n if ix in year_dfs['two_removed'].index:\n gr_source = '2016-2018'\n ty=year_dfs['two_removed'].loc[ix]\n calc_data = calc_data+ty.reindex(calc_fields)\n \n \n gr_6yr = calc_data['Cl_Total']/calc_data['St_Total'] if calc_data['St_Total']>0 else np.nan\n gr_6yr_aah = calc_data['Cl_AA_H']/calc_data['St_AA_H'] if calc_data['St_AA_H']>0 else np.nan\n gr_6yr_aa = calc_data['Cl_Black']/calc_data['St_Black'] if calc_data['St_Black']>0 else np.nan\n gr_6yr_h = calc_data['Cl_Hisp']/calc_data['St_Hisp'] if calc_data['St_Hisp']>0 else np.nan\n xf = calc_data['Xf_Total']/calc_data['St_Total'] if calc_data['St_Total']>0 else np.nan\n xf_aah = calc_data['Xf_AA_H']/calc_data['St_AA_H'] if calc_data['St_AA_H']>0 else np.nan\n xf_aa = calc_data['Xf_Black']/calc_data['St_Black'] if calc_data['St_Black']>0 else np.nan\n xf_h = calc_data['Xf_Hisp']/calc_data['St_Hisp'] if calc_data['St_Hisp']>0 else np.nan\n adj_6yr = gr_6yr\n adj_6yr_aah = gr_6yr_aah\n \n else:\n adj_6yr = gr_6yr\n adj_6yr_aah = gr_6yr_aah\n \n # If there was no data in the most recent year, we got the prior (and stick--no need to add prior prior)\n elif ix in year_dfs['one_removed'].index:\n ty = year_dfs['one_removed'].loc[ix]\n gr_source = '2017'\n gr_6yr,gr_6yr_aah,gr_6yr_aa,gr_6yr_h,xf,xf_aah,xf_aa,xf_h = ty.reindex(\n ['GR_Total','GR_AA_H','GR_Black','GR_Hisp','Xfr_Total','Xfr_AA_H','Xfr_Black','Xfr_Hisp'])\n adj_6yr = gr_6yr\n adj_6yr_aah = gr_6yr_aah\n \n # If no data in the last two years, we'll go to prior prior (and stick--no need to check CI)\n elif ix in year_dfs['two_removed'].index:\n ty = year_dfs['two_removed'].loc[ix]\n gr_source = '2016'\n gr_6yr,gr_6yr_aah,gr_6yr_aa,gr_6yr_h,xf,xf_aah,xf_aa,xf_h = ty.reindex(\n ['GR_Total','GR_AA_H','GR_Black','GR_Hisp','Xfr_Total','Xfr_AA_H','Xfr_Black','Xfr_Hisp'])\n adj_6yr = gr_6yr\n adj_6yr_aah = gr_6yr_aah\n \n # No data in any of the last 3 years\n else:\n gr_source,adj_6yr,adj_6yr_aah,gr_6yr,gr_6yr_aah,gr_6yr_aa,gr_6yr_h,xf,xf_aah,xf_aa,xf_h=['N/A']+[np.nan]*10\n \n # 2 year schools are given \n if df['Type'] == '2 year':\n adj_6yr = adj_6yr+0.5*xf\n adj_6yr_aah = adj_6yr_aah+0.5*xf_aah\n \n return [gr_source,\n np.round(adj_6yr,decimals=2),np.round(adj_6yr_aah,decimals=2),\n np.round(gr_6yr,decimals=2),np.round(gr_6yr_aah,decimals=2),\n np.round(gr_6yr_aa,decimals=2),np.round(gr_6yr_h,decimals=2),\n np.round(xf,decimals=2),np.round(xf_aah,decimals=2),\n np.round(xf_aa,decimals=2),np.round(xf_h,decimals=2)]\n\nnew_columns = ['GR_Source','Adj6yrGrad','Adj6yrAAH','6yrGrad',\n '6yrAAH','6yrAA','6yrH','Xfer','XferAAH','XferAA','XferH']\ndir_df[new_columns] = dir_df.apply(set_gradrates,axis=1,args=(year_dfs,),result_type=\"expand\")\ndir_df.head()", "_____no_output_____" ], [ "dir_df.to_csv(gr_output,na_rep='N/A')", "_____no_output_____" ] ], [ [ "# A few more manual steps\n## These should eventually be moved to code, but they should be modified in a number of cases (discussed in more detail below):\n1. Add a correction for schools where we have a lot of historic results. Historically, this has meant reducing grad rates for schools by 1/3 of the difference between Noble retention and university retention (typically at only 3-4 schools)\n2. Increase grad rates for partner colleges (15%)\n3. Double check schools known to report oddly: Robert Morris University-Illinois specifically\n4. Look for major shifts in grad rate at schools many Noble students attend and consider shifting to a 3year average\n\nIn all of these cases, we will change the grad rates and the \"GR_Source\" to designate that a non-standard practice was followed\n\nYou can see all of this work in the \"manual_grad_rates_corrections_2020.xlsx\" file in the raw_inputs folder. This file was created by importing columns from the prior year directory and then applying a process against them. Specifically:\n1. Start with \"grad_rates.csv\" (saved above) and insert 6 columns between columns B&C:\n\n-count: # of students (you can grab from financial_aid_analysis_output.xlsx from the archive-analysis)\n\n-Adj6yr2019: from \"manual_grad_rates_corrections_2019.xlsx\" (these will be in the first few columns.)\n\n-Adj6yrAAH2019: same\n\n-2019note: same\n\n-2019src: same\n\n-2020-2019AAH: calculated from the above and what's in the file\n\n2. Create a column after Type (will be Column K) with \"2020 note\". This is where you'll disposition each row.\n3. Create columns X-AG as copies of columns M-V. This is where formula-modified values will go. First we'll fill in values for the modified entries. Second, we'll fill those columns in for the (vast majority) of rows with no corrections.\n\nThen look at the notes below for specific steps, but the main thing to keep in mind is that \"Special\" rows in prior\nyears are likely \"special\" in current years, so be sure to check those. The vast majority will end up \"stet\" meaning no manual adjustment.\n\nThe sections below describe how to do each of the changes listed above.\n\n_After work is completed in this file, the extra columns were removed and the result was saved as \"grad_rates.csv\" in the raw_inputs folder._", "_____no_output_____" ], [ "## For Case #1 in the above, see the \"Noble history bump analysis2020.xlsx\" file in raw_inputs\n\nThis file was taken from the post-NSC process, looking at the \"College and GPA\" tab of the \"snapshot\" report. To create it, take the last version of that file and perform the following steps:\n\n1. Save the \"College and GPA\" tab alone in a new workbook.\n2. Remove the columns at the right for all but the two most recent years and the \"All\" columns for the # starting and # remaining one year later sections. (In this case, we keep 2017 and 2018.)\n3. Filter on \"All\" GPAs\n4. Calculate the columns shown if there were 50 Noble students in 2018 OR if there were 200+ in prior years and 50+ in 2017+2018. (These are arbitrary. Be sure to use the # of starting students for your filter.) Note only keep the \"Adjustment\" columns if the result has a magnitude greater than 1% and is not a two-year college.\n\n\nFor the columns that have an adjustment, edit those rows in the \"manual_grad_rate_corrections_2020.xlsx\" file:\n1. Change the \"GR_Source\" to \"2018-1/3 Noble gap\" (or +) and \"2020 note\" to \"reduce by x%\" (or increase)\n2. Change the values in X-AC based on the rounded adjustment. AD-AG should just equal the original values.\n3. Finally, eyeball how the AA/H value changes compared to the prior year. If there is 5+% drop, change the note to \"stet, big natural drop\" and change the source to 2018.\n\n## For Case #2 in the above:\n1. Filter on the 2019 note for the word \"partner\".\n2. Mirror that increase in columns X-AG unless the partnership has ended. (Also mirror the language in the note and source.)\n\n## For Case #3 in the above:\n1. Filter the 2019 note for anything not \"stet\" or \"N/A\".\n2. For the ones with no 2020 note yet, look at the details and determine (with college counseling guidance) whether a change should be made. A few more notes:\n3. \"minimum value: .25\" is added for 4-year schools with N/A for grad rate if Noble students attend. Apply this rule to all such schools (even if 2019 note was stet or N/A).\n4. \"floor CCC at 20%\" means the rate for City Colleges should be at least 20%. Apply this rule to all City colleges (regardless of 2019 note).\n5. Again for any of these, update the 2020 note, source, and then make the actual changes in X-AG.\n6. Note that you might want to refer back to the manual grad rate corrections file from the prior year and look at X-AG there specifically to see the formulas used to apply the changes for any non-standard rows.\n\n## For Case #4 in the above:\n1. Filter for rows with \"2020 note\" still blank. (The 2019 note for all of these should be \"N/A\" or \"stet\".)\n2. Filter for rows with \"count\" >= 5.\n3. Filter for rows with declines bigger than 5%. If the school is using 2018 as the source, switch to the 3yr average. (You'll need to do this by manually grabbing the source files.\n4. Filter for rows with increases bigger than 10%. You probably won't change these, but discuss the increases with a college counselor to see if they pass the \"smell test\".\n\n## Final disposition:\n1. Change the remaining 2020 note values that were blank to \"stet\"\n2. Populate X-AG for all blank rows with the values in M-V (just use an assignment formula in Excel on the filtered selection).\n3. Save this file for reference.\n3. Copy and paste values for X-AG.\n4. Delete columns B-K. (The file will start with UNITID and GR_Source.\n5. Delete the remaining columns until your next column is the old column X.\n6. Save in raw_inputs as grad_rates.csv", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
4a680e0b3d59c14fb1d4eacd7e4aeef88515246e
2,192
ipynb
Jupyter Notebook
data_set_analysis.ipynb
WKobes/cybersecurity-documents-classification
2ef26dcee7b3499b7464554c31ded5ee7f3703a4
[ "MIT" ]
null
null
null
data_set_analysis.ipynb
WKobes/cybersecurity-documents-classification
2ef26dcee7b3499b7464554c31ded5ee7f3703a4
[ "MIT" ]
4
2021-03-23T11:23:08.000Z
2021-03-23T11:23:26.000Z
data_set_analysis.ipynb
WKobes/cybersecurity-documents-classification
2ef26dcee7b3499b7464554c31ded5ee7f3703a4
[ "MIT" ]
null
null
null
24.629213
91
0.504106
[ [ [ "import os\nimport json", "_____no_output_____" ], [ "files_dir = os.path.abspath(os.curdir) + '/files/'\n\n\nword_counts = []\ndocument_types = {}\ntmp = []\n\n# Loop through all relevant\nfor filename in os.listdir(files_dir):\n \n # Skip directories\n if os.path.isdir(os.path.join(files_dir, filename)):\n continue\n \n \n # Open every file\n with open(files_dir + filename, 'r') as file:\n \n file_data = json.loads(file.read())\n \n word_counts.append(file_data['word_counts'].values())\n \n tmp = list(file_data['word_counts'].keys())\n current_type = file_data['document_type']\n \n if current_type:\n document_types[current_type] = document_types.get(current_type, 0) + 1\n else:\n document_types['Unknown'] = document_types.get('Unknown', 0) + 1\n\naverages = [sum(col) / float(len(col)) for col in zip(*word_counts)]\nrelevant = dict(zip(tmp,averages))", "_____no_output_____" ], [ "# print(json.dumps(document_types, indent=2))\n\n# print(json.dumps(relevant, indent=2))\n\nprint(json.dumps(document_types, indent=2))\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a680e1993f177f2996f9ea30f57d6a17d418a85
12,903
ipynb
Jupyter Notebook
Lectures/Week05-Edge_n_line_detection/notebooks/4-python-straighten-image-using-hough-transform.ipynb
effepivi/ICE-3111-Computer_Vision
d8cf5c8e9e35b107573eeba6912a05db43d0dcd5
[ "BSD-3-Clause" ]
null
null
null
Lectures/Week05-Edge_n_line_detection/notebooks/4-python-straighten-image-using-hough-transform.ipynb
effepivi/ICE-3111-Computer_Vision
d8cf5c8e9e35b107573eeba6912a05db43d0dcd5
[ "BSD-3-Clause" ]
null
null
null
Lectures/Week05-Edge_n_line_detection/notebooks/4-python-straighten-image-using-hough-transform.ipynb
effepivi/ICE-3111-Computer_Vision
d8cf5c8e9e35b107573eeba6912a05db43d0dcd5
[ "BSD-3-Clause" ]
1
2021-12-03T22:08:49.000Z
2021-12-03T22:08:49.000Z
26.88125
198
0.499574
[ [ [ "# Straighten an image using the Hough transform", "_____no_output_____" ], [ "We'll write our own Hough transform to compute the Hough transform and use it to straighten a wonky image.", "_____no_output_____" ], [ "## Package inclusion for Python", "_____no_output_____" ] ], [ [ "import copy\nimport math\nimport numpy as np\nimport cv2", "_____no_output_____" ] ], [ [ "## Read the image from a file on the disk and return a new matrix", "_____no_output_____" ], [ "![../wonky.png](../img/wonky.png)", "_____no_output_____" ] ], [ [ "image = cv2.imread(\"../img/wonky.png\");", "libpng warning: iCCP: CRC error\n" ] ], [ [ "## Check for errors", "_____no_output_____" ] ], [ [ "# Check for failure\nif image is None: \n raise Exception(\"Could not open or find the image\");", "_____no_output_____" ] ], [ [ "## Convert degrees to gradients", "_____no_output_____" ] ], [ [ "def deg2rad(anAngleInDegrees: float) -> float:\n return anAngleInDegrees * math.pi / 180.0", "_____no_output_____" ] ], [ [ "## Apply the Canny edge detector", "_____no_output_____" ] ], [ [ "def cannyEdgeDetector(anInputImage: np.array,\n aCannyThreshold: int) -> np.array:\n \n # Find edges using Canny\n ratio = 3\n kernel_size = 3\n \n edge_image = cv2.Canny(anInputImage,\n aCannyThreshold,\n aCannyThreshold * ratio,\n kernel_size)\n \n return edge_image", "_____no_output_____" ], [ "blurred_image = cv2.blur( image, (3,3) )\nedge = cannyEdgeDetector(blurred_image, 60)\n\ncv2.namedWindow(\"edge\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"edge\", edge)\n\ncv2.namedWindow(\"image\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"image\", image)\n\ncv2.waitKey(0) # Wait for any keystroke in the window\ncv2.destroyAllWindows() # Destroy all the created windows", "_____no_output_____" ] ], [ [ "| Original image | canny |\n|----------------|--------|\n|![image](../img/wonky_original.png) | ![canny](../img/wonky_canny.png) |", "_____no_output_____" ], [ "## Compute the accumulator", "_____no_output_____" ] ], [ [ "def houghTransform(anInputImage: np.array,\n aCannyThreshold: int) -> np.array:\n\n # Blur the input image\n blurred_image = cv2.blur( anInputImage, (3,3) )\n\n # Find edges using Canny\n edge_image = cannyEdgeDetector(blurred_image, aCannyThreshold)\n\n width = 180;\n diagonal = math.sqrt(edge_image.shape[1] * edge_image.shape[1] + edge_image.shape[0] * edge_image.shape[0])\n height = math.floor(2.0 * diagonal)\n half_height = height / 2.0\n accumulator = np.zeros((height, width), np.single)\n\n # Process all the pixels of the edge image\n for j in range(edge_image.shape[0]):\n\n for i in range(edge_image.shape[1]):\n\n # The pixel is on an edge\n if edge_image[j,i] > 0:\n\n # Process all the angles\n for theta in range(180):\n\n angle = deg2rad(theta);\n r = i * math.cos(angle) + j * math.sin(angle)\n\n v = math.floor(r + half_height)\n accumulator[v, theta] += 1\n\n return accumulator", "_____no_output_____" ], [ "accumulator = houghTransform(image, 60)", "_____no_output_____" ] ], [ [ "## Visualise the accumulator\n\nLook for dots. Every dot represents a line in the original image. There are for of them", "_____no_output_____" ] ], [ [ "vis_accumulator = cv2.normalize(accumulator, None, 0, 1, cv2.NORM_MINMAX, cv2.CV_32FC1)\n \ncv2.namedWindow(\"accumulator\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"accumulator\", vis_accumulator)\n\ncv2.waitKey(0) # Wait for any keystroke in the window\ncv2.destroyAllWindows() # Destroy all the created windows", "_____no_output_____" ] ], [ [ "![accumulator](../img/wonky_accumulator.png)", "_____no_output_____" ], [ "## Draw the lines", "_____no_output_____" ] ], [ [ "Scalar = [float, float, float]\n\ndef drawLines(anImage: np.array,\n anAccumulator: np.array,\n aHoughThreshold: float,\n aLineWidth: int,\n aLineColour: Scalar) -> np.array:\n\n # Copy the input image into the output image\n output = copy.deepcopy(anImage)\n\n # Process all the pixels of the accumulator image\n for j in range(anAccumulator.shape[0]):\n\n for i in range(anAccumulator.shape[1]):\n \n # The pixel value in the accumulator is greater than the threshold\n # Display the corresponding line\n if anAccumulator[j, i] >= aHoughThreshold:\n\n # The pixel location\n location = (i, j)\n\n # The two corners of the image\n pt1 = [ 0, 0]\n pt2 = [anImage.shape[1] - 1, anImage.shape[0] - 1]\n\n # Get theta in radian\n theta = deg2rad(location[0])\n\n # Get r\n r = location[1]\n r -= anAccumulator.shape[0] / 2.0\n\n # How to retrieve the line from theta and r:\n # x = (r - y * sin(theta)) / cos(theta);\n # y = (r - x * cos(theta)) / sin(theta);\n\n # sin(theta) != 0\n if location[0] != 0 and location[0] != 180:\n pt1[1] = math.floor((r - pt1[0] * math.cos(theta)) / math.sin(theta))\n pt2[1] = math.floor((r - pt2[0] * math.cos(theta)) /math.sin(theta))\n # math.sin(theta) == 0 && math.cos(theta) != 0\n else:\n pt1[0] = math.floor((r - pt1[1] * sin(theta)) / math.cos(theta))\n pt2[0] = math.floor((r - pt2[1] * sin(theta)) / math.cos(theta))\n\n # Draw the line\n output = cv2.line(output, pt1, pt2, aLineColour, aLineWidth)\n\n return output", "_____no_output_____" ], [ "# Get tne min and max in the accumulator\nmin_value, max_value, min_loc, max_loc = cv2.minMaxLoc(accumulator)\n\nhough_threshold = min_value + 0.6 * (max_value - min_value)\n\nimage_with_lines = drawLines(image, accumulator, hough_threshold, 4, (0, 0, 255))", "_____no_output_____" ], [ "cv2.namedWindow(\"image\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"image\", image)\n\ncv2.namedWindow(\"edge\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"edge\", edge)\n\ncv2.namedWindow(\"image_with_lines\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"image_with_lines\", image_with_lines)\n\ncv2.waitKey(0) # Wait for any keystroke in the window\ncv2.destroyAllWindows() # Destroy all the created windows", "_____no_output_____" ] ], [ [ "| Original image | canny | lines |\n|----------------|--------|--------|\n|![image](../img/wonky_original.png) | ![canny](../img/wonky_canny.png) | ![ines](../img/wonky_with_lines.png)", "_____no_output_____" ], [ "## Extract the angle from the accumulator", "_____no_output_____" ] ], [ [ "min_value, max_value, min_loc, max_loc = cv2.minMaxLoc(accumulator)", "_____no_output_____" ], [ "print(\"Max value: \", max_value, \" Location: \", (90 - max_loc[0], max_loc[1]))", "Max value: 418.0 Location: (9, 1632)\n" ] ], [ [ "We must convert the position along the horizontal axis into an angle. It's simple. [9, 1632] Tells us that the image is rotated by 9 degrees. To straighten it, we must rotate it by -9 degrees.", "_____no_output_____" ] ], [ [ "def rotate(anImage: np.array, angle: float) -> np.array:\n\n /# Point from where to rotate (centre of rotation), here the centre of the image\n pt = (anImage.shape[1] / 2.0, anImage.shape[0] / 2.0) \n \n # Create a rotation matrix\n rotation_matrix = cv2.getRotationMatrix2D(pt, angle, 1.0)\n \n # Apply the transforation to the image\n rotated_image = cv2. warpAffine(anImage, rotation_matrix, (anImage.shape[0], anImage.shape[1])) \n \n return rotated_image", "_____no_output_____" ], [ "print(90 - max_loc[0])\nrotated = rotate(image, -(90 - max_loc[0]))", "9\n" ], [ "cv2.namedWindow(\"image\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"image\", image)\n\ncv2.namedWindow(\"rotated\", cv2.WINDOW_GUI_EXPANDED)\ncv2.imshow(\"rotated\", rotated)\n\ncv2.waitKey(0) # Wait for any keystroke in the window\ncv2.destroyAllWindows() # Destroy all the created windows", "_____no_output_____" ] ], [ [ "| Original image | straighten |\n|----------------|--------|\n|![image](../img/wonky_original.png) | ![straighten](../img/wonky_straighten.png)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4a6810c98489d710ad36d2a0a3f2f08a9a065ae4
442,883
ipynb
Jupyter Notebook
example_boulders.ipynb
malidib/ACID
6aa691da4bf75d6a5702ebb0a5e8b67b79f0cdaf
[ "MIT" ]
null
null
null
example_boulders.ipynb
malidib/ACID
6aa691da4bf75d6a5702ebb0a5e8b67b79f0cdaf
[ "MIT" ]
null
null
null
example_boulders.ipynb
malidib/ACID
6aa691da4bf75d6a5702ebb0a5e8b67b79f0cdaf
[ "MIT" ]
null
null
null
2,446.867403
436,948
0.956711
[ [ [ "from src.acid import *\nfrom src.utils import *\nimport matplotlib\nimport matplotlib.pyplot as plt\n", "WARNING:tensorflow:From /home/mad/anaconda3/envs/myenv/lib/python3.6/site-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.\nInstructions for updating:\nnon-resource variables are not supported in the long term\n" ], [ "image_directory='./images/67p.IMG'\nmodels_directory = './models/*.h5'\n\n\nimage_crop = Read_Preprocess_Image(image_directory,NORMALIZE=1, CONV_GS=1,INVERSE=0,EQUALIZE=0, CLAHE=1,\\\n RESIZE=1 , LIMITS=[1535,1791,0,256])\n\nObjects_Master_list = model_inference(image_crop,models_directory,which_models='all')\n", "-------------------------------------------------- \n \n db ,ad8888ba, 88 88888888ba, \n d88b d8a. `a8b 88 88 `a8b \n d8.`8b d8. 88 88 `8b \n d8. `8b 88 88 88 88 \n d8YaaaaY8b 88 88 88 88 \n d8aaaaaaaa8b Y8, 88 88 8P \n d8. `8b Y8a. .a8P 88 88 .a8P \nd8. `8b `aY8888Ya. 88 88888888Ya. \n\n-------------------------------------------------- \n \nLoading weights from /mnt/d/other_data/model_01.h5\nLoading weights from /mnt/d/other_data/model_02.h5\nLoading weights from /mnt/d/other_data/model_03.h5\nLoading weights from /mnt/d/other_data/model_04.h5\nLoading weights from /mnt/d/other_data/model_05.h5\nLoading weights from /mnt/d/other_data/model_06.h5\nLoading weights from /mnt/d/other_data/model_07.h5\nLoading weights from /mnt/d/other_data/model_08.h5\nLoading weights from /mnt/d/other_data/model_09.h5\nLoading weights from /mnt/d/other_data/model_10.h5\nLoading weights from /mnt/d/other_data/model_11.h5\nLoading weights from /mnt/d/other_data/model_12.h5\nLoading weights from /mnt/d/other_data/model_13.h5\nLoading weights from /mnt/d/other_data/model_14.h5\nLoading weights from /mnt/d/other_data/model_15.h5\nLoading weights from /mnt/d/other_data/model_16.h5\n" ], [ "objects_unique = get_unique_iou(Objects_Master_list,iou_thres=0.5,detection_thres=0.20)\n\nobjects_unique_readable = readable_output(objects_unique)\n\nobjects_unique_readable__ = objects_unique_readable[(objects_unique_readable['detection_thres'] > 0.) &\\\n (objects_unique_readable['ellipticity'] < 3.0) & \\\n (objects_unique_readable['object_size_pixels'] < 0.01*(512**2))]\n\ntotalmask = np.sum(objects_unique_readable__['mask'])\ntotalmask[totalmask>0] = 1\n\n\n", "_____no_output_____" ], [ "\nfig = matplotlib.pyplot.gcf()\nfig.set_size_inches(20, 40.)\nplt.style.use('classic')\nmatplotlib.style='classic'\nplt.subplot(1,2,1)\nplt.imshow(image_crop ,cmap='gray')\nplt.subplot(1,2,2)\nplt.imshow(image_crop ,cmap='gray')\ntotalmaskMasked = np.ma.masked_where(totalmask == 0, totalmask)\n\nplt.imshow(totalmaskMasked,alpha=0.5,cmap='cool')\nplt.show() ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a682084d5e3069a860feb83180406dd7a3f1b0e
88,115
ipynb
Jupyter Notebook
0402_reg_salary_1925.ipynb
loveactualry/dss12_reg_ops
1e581848bcded1f8c02a4600941c58d3beb02e28
[ "MIT" ]
null
null
null
0402_reg_salary_1925.ipynb
loveactualry/dss12_reg_ops
1e581848bcded1f8c02a4600941c58d3beb02e28
[ "MIT" ]
null
null
null
0402_reg_salary_1925.ipynb
loveactualry/dss12_reg_ops
1e581848bcded1f8c02a4600941c58d3beb02e28
[ "MIT" ]
null
null
null
41.156002
135
0.323974
[ [ [ "# 초기 설정\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width: 100% !important; }</style>\"))\npd.set_option(\"display.max_columns\", 40)\nimport missingno as msno\n%matplotlib inline\n\nimport pprint\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nfrom datetime import datetime", "_____no_output_____" ] ], [ [ "##### 하단 정제 데이터 부분에서 모델 돌리세용 ######", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "##### 변수 추가", "_____no_output_____" ] ], [ [ "#BABIP\nsdata['BABIP'] = (sdata['안타'] - sdata['홈런'])/(sdata['타수'] -sdata['삼진']-sdata['홈런'] -sdata['희비'])", "_____no_output_____" ], [ "#외국인\nimport re\nnames = sdata.이름.unique()\nsdata['외국인'] = None\nfor name in names:\n if re.findall('[에, 브, 워, 대, 피, 히, 버, 러, 칸, 루, 필, 파, 스, 아,마, 가, 초, 모, 로, 발, 번, 테, 호]', name[0]):\n sdata['외국인'][sdata['이름'] == name] = 1\n else:\n sdata['외국인'][sdata['이름'] == name] = 0\n \n \na = ['나바로', '조쉬벨', '고메즈']\nfor name in a:\n sdata['외국인'][sdata['이름'] == name] = 1", "_____no_output_____" ], [ "# 나이 컬럼화\nsdata['나이C'] = sdata.나이.apply(lambda x : 0 if x <= 23 else x)\nsdata['나이C'] = sdata.나이C.apply(lambda x : 1 if 26 >= x > 23 else x)\nsdata['나이C'] = sdata.나이C.apply(lambda x : 2 if 33 >= x >26 else x)\nsdata['나이C'] = sdata.나이C.apply(lambda x : 3 if 37 >= x >33 else x)\nsdata['나이C']= sdata.나이C.apply(lambda x : 4 if 40 >= x > 37 else x) \nsdata['나이C']= sdata.나이C.apply(lambda x : 5 if x >= 40 else x) ", "_____no_output_____" ], [ "# 이적\nsdata['이적'] = None\nfor x in df['이름']:\n sdata['이적'][sdata['이름'] == x] = len(sdata[sdata['이름'] == x]['팀'].unique())\n ", "_____no_output_____" ], [ "df['이적C'] = df.이적.apply(lambda x: 0 if x == 1 else x)\ndf['이적C'] = df.이적.apply(lambda x: 1 if x == 2 else x)\ndf['이적C'] = df.이적.apply(lambda x: 2 if x == 3 else x)\ndf['이적C'] = df.이적.apply(lambda x: 3 if x >= 3 else x)", "_____no_output_____" ], [ "sdata.columns", "_____no_output_____" ] ], [ [ "#### 정제 데이터 넣고 돌리는 부분", "_____no_output_____" ] ], [ [ "test = pd.read_csv('test_datas0402.csv')\ntrain = pd.read_csv('train_datas0402.csv')", "_____no_output_____" ], [ "#sdata", "_____no_output_____" ], [ "#train = sdata[(sdata['시즌'] <= 2018)]\n#test = sdata[(sdata['시즌'] == 2019)]", "_____no_output_____" ], [ "train", "_____no_output_____" ], [ "import statsmodels.api as sm\nmodel = sm.OLS.from_formula('연봉 ~ np.log(나이) + scale(np.log(G)) + scale(타석) + np.log(scale(안타)) + (np.log(볼넷)) + \\\n (np.log(사구)) + scale(고4) + scale(np.log(삼진)) + np.log(병살) +\\\n np.log(scale(희타)) + scale(희비) + np.log((twoBLUCK)) + np.log(scale(threeBLUCK)) + (scale(ISO)) +\\\n np.log(scale(ISOD)) ' , data=train)\n# model = sm.OLS.from_formula('연봉 ~ ' + '+'.join(s_col) , data=train)\nresult = model.fit()\n#print(result.summary())", "_____no_output_____" ], [ "train.columns", "_____no_output_____" ], [ "\ntrain_x = train[['시즌', '팀', '포지션', '나이', 'G', '타석', '타수', '득점', '안타', '타1',\n '타2', '타3', '홈런', '루타', '타점', '도루', '도실', '볼넷', '사구', '고4', '삼진', '병살',\n '희타', '희비', '타율', '출루', '장타', 'OPS', 'wOBA', 'wRC', 'twoBLUCK',\n 'threeBLUCK', 'ISO', 'BBK', 'ISOD', '횟수', '경험', 'BABIP', \n '외국인', '나이C', '이적', '이적C']]\ntrain_y = train[['연봉']]\ntrain_y_log = train[['로그연봉']]\n\ntest_x = test[['시즌', '팀', '포지션', '나이', 'G', '타석', '타수', '득점', '안타', '타1',\n '타2', '타3', '홈런', '루타', '타점', '도루', '도실', '볼넷', '사구', '고4', '삼진', '병살',\n '희타', '희비', '타율', '출루', '장타', 'OPS', 'wOBA', 'wRC', 'twoBLUCK',\n 'threeBLUCK', 'ISO', 'BBK', 'ISOD', '횟수', '경험', 'BABIP', \n '외국인', '나이C', '이적', '이적C']]\ntest_y = test[['연봉']]\ntest_y_log = test['로그연봉']", "_____no_output_____" ], [ "#col2 = ['시즌', '나이', 'G', '타석', '도실', '볼넷',\\\n# '사구', '고4', '삼진', '병살', '희타', '희비', '타율', 'WAR', 'twoBLUCK',\\\n# 'threeBLUCK', 'ISO', 'ISOD']\n#s_col = [f'scale({c})' for c in col2]\nmodel = sm.OLS.from_formula('연봉 ~ G' , data=train)\n# model = sm.OLS.from_formula('연봉 ~ ' + '+'.join(s_col) , data=train)\nresult = model.fit()\nprint(result.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: 연봉 R-squared: 0.038\nModel: OLS Adj. R-squared: 0.036\nMethod: Least Squares F-statistic: 18.94\nDate: Thu, 02 Apr 2020 Prob (F-statistic): 1.65e-05\nTime: 21:07:15 Log-Likelihood: -5723.5\nNo. Observations: 485 AIC: 1.145e+04\nDf Residuals: 483 BIC: 1.146e+04\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept 2.317e+04 4989.912 4.644 0.000 1.34e+04 3.3e+04\nG 197.9022 45.471 4.352 0.000 108.558 287.247\n==============================================================================\nOmnibus: 236.282 Durbin-Watson: 0.959\nProb(Omnibus): 0.000 Jarque-Bera (JB): 1347.008\nSkew: 2.098 Prob(JB): 3.17e-293\nKurtosis: 10.004 Cond. No. 373.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "##### 1. 전체 모형", "_____no_output_____" ] ], [ [ "model = sm.OLS.from_formula('연봉 ~ 나이+ G+ 타석+ 타수+ 득점+ 안타+ 타1+ 타2+ 타3+\\\n 홈런+ 루타+ 타점+ 도루+ 도실+ 볼넷+ 사구+ 고4+ 삼진+ 병살+ 희타+ 희비+ \\\n 타율+ 출루+ 장타+ OPS+ wOBA+ wRC+ twoBLUCK+ threeBLUCK+ ISO+ BBK+ ISOD+ 횟수+\\\n 경험 + BABIP+외국인+ 나이C+ 이적+ 이적C', data=train)\nresult = model.fit()\nprint(result.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: 연봉 R-squared: 0.547\nModel: OLS Adj. R-squared: 0.513\nMethod: Least Squares F-statistic: 15.99\nDate: Thu, 02 Apr 2020 Prob (F-statistic): 1.02e-57\nTime: 21:12:14 Log-Likelihood: -5540.8\nNo. Observations: 485 AIC: 1.115e+04\nDf Residuals: 450 BIC: 1.130e+04\nDf Model: 34 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -6.651e+04 1.74e+04 -3.825 0.000 -1.01e+05 -3.23e+04\n나이 3553.3862 743.938 4.776 0.000 2091.362 5015.411\nG -441.4748 121.122 -3.645 0.000 -679.510 -203.439\n타석 -5611.1595 6847.867 -0.819 0.413 -1.91e+04 7846.608\n타수 5788.4290 6847.721 0.845 0.398 -7669.053 1.92e+04\n득점 -293.1761 155.705 -1.883 0.060 -599.174 12.822\n안타 -56.8339 138.074 -0.412 0.681 -328.183 214.515\n타1 -4.4633 249.702 -0.018 0.986 -495.190 486.263\n타2 103.4887 542.844 0.191 0.849 -963.334 1170.312\n타3 -517.8702 1319.486 -0.392 0.695 -3110.990 2075.250\n홈런 362.0110 721.023 0.502 0.616 -1054.980 1779.002\n루타 96.9474 204.903 0.473 0.636 -305.739 499.634\n타점 46.4028 145.760 0.318 0.750 -240.051 332.857\n도루 135.9134 203.509 0.668 0.505 -264.034 535.860\n도실 -345.3943 519.066 -0.665 0.506 -1365.489 674.700\n볼넷 5739.3244 6851.651 0.838 0.403 -7725.881 1.92e+04\n사구 6753.8300 6875.838 0.982 0.327 -6758.908 2.03e+04\n고4 2289.8764 612.862 3.736 0.000 1085.450 3494.303\n삼진 -394.2455 149.302 -2.641 0.009 -687.662 -100.829\n병살 611.3243 334.167 1.829 0.068 -45.398 1268.047\n희타 4752.4955 6867.252 0.692 0.489 -8743.368 1.82e+04\n희비 4739.7652 6881.625 0.689 0.491 -8784.346 1.83e+04\n타율 -2.032e+06 1.06e+06 -1.910 0.057 -4.12e+06 5.9e+04\n출루 -2.856e+06 1.59e+06 -1.795 0.073 -5.98e+06 2.71e+05\n장타 -3.028e+06 1.59e+06 -1.904 0.057 -6.15e+06 9.68e+04\nOPS 4.275e+06 2.12e+06 2.020 0.044 1.16e+05 8.43e+06\nwOBA -6.876e+05 2.49e+05 -2.757 0.006 -1.18e+06 -1.97e+05\nwRC -200.9255 100.065 -2.008 0.045 -397.578 -4.273\ntwoBLUCK -9.476e+04 1.41e+05 -0.671 0.503 -3.72e+05 1.83e+05\nthreeBLUCK -1028.5482 5.09e+05 -0.002 0.998 -1e+06 9.98e+05\nISO -9.964e+05 5.3e+05 -1.881 0.061 -2.04e+06 4.45e+04\nBBK 5.045e+04 3.99e+04 1.263 0.207 -2.8e+04 1.29e+05\nISOD -8.245e+05 5.32e+05 -1.550 0.122 -1.87e+06 2.21e+05\n횟수 196.3755 1454.697 0.135 0.893 -2662.467 3055.218\n경험 3219.8066 1390.507 2.316 0.021 487.113 5952.500\nBABIP 1.39e+05 6.38e+04 2.179 0.030 1.36e+04 2.64e+05\n외국인 2.012e+04 4516.897 4.454 0.000 1.12e+04 2.9e+04\n나이C -9990.4196 3264.044 -3.061 0.002 -1.64e+04 -3575.759\n이적 -3.585e+04 8821.800 -4.063 0.000 -5.32e+04 -1.85e+04\n이적C 3.066e+04 8661.814 3.540 0.000 1.36e+04 4.77e+04\n==============================================================================\nOmnibus: 146.037 Durbin-Watson: 1.332\nProb(Omnibus): 0.000 Jarque-Bera (JB): 516.480\nSkew: 1.354 Prob(JB): 7.04e-113\nKurtosis: 7.269 Cond. No. 1.08e+16\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n[2] The smallest eigenvalue is 1.75e-24. This might indicate that there are\nstrong multicollinearity problems or that the design matrix is singular.\n" ], [ "df = train\nfrom sklearn.model_selection import KFold\n\nscores = np.zeros(5)\ncv = KFold(5, shuffle=True, random_state=0)\nfor i, (idx_train, idx_test) in enumerate(cv.split(df)):\n df_train = df.iloc[idx_train]\n df_test = df.iloc[idx_test]\n \n model = sm.OLS.from_formula('연봉 ~ 나이+ G+ 타석+ 타수+ 득점+ 안타+ 타1+ 타2+ 타3+\\\n 홈런+ 루타+ 타점+ 도루+ 도실+ 볼넷+ 사구+ 고4+ 삼진+ 병살+ 희타+ 희비+ \\\n 타율+ 출루+ 장타+ OPS+ wOBA+ wRC+ twoBLUCK+ threeBLUCK+ ISO+ BBK+ ISOD+ 횟수+\\\n 경험 + BABIP+외국인+ 나이C+ 이적+ 이적C' , data=df_train)\n result = model.fit()\n \n pred = result.predict(df_test)\n rss = ((df_test.연봉 - pred) ** 2).sum()\n tss = ((df_test.연봉 - df_test.연봉.mean())** 2).sum()\n rsquared = 1 - rss / tss\n \n scores[i] = rsquared\n print(\"학습 R2 = {:.8f}, 검증 R2 = {:.8f}\".format(result.rsquared, rsquared))", "학습 R2 = 0.55592879, 검증 R2 = 0.45577089\n학습 R2 = 0.55656713, 검증 R2 = 0.46154012\n학습 R2 = 0.53599672, 검증 R2 = 0.53552159\n학습 R2 = 0.56871605, 검증 R2 = 0.35868853\n학습 R2 = 0.56080194, 검증 R2 = 0.43331498\n" ] ], [ [ "#### 2. 모델2", "_____no_output_____" ] ], [ [ "model = sm.OLS.from_formula('로그연봉 ~ C(시즌) + 포지션 + scale(G) + C(팀) + scale(홈런) + scale(타3)\\\n + scale(타석) + scale(득점) + scale(사구) + scale(고4) + scale(도루)\\\n + scale(삼진) + scale(병살) + scale(희타) + scale(희비) + scale(타율) + 외국인\\\n + scale(OPS) + C(나이C):횟수', data=train)\nresult = model.fit()\nprint(result.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: 로그연봉 R-squared: 0.647\nModel: OLS Adj. R-squared: 0.608\nMethod: Least Squares F-statistic: 16.67\nDate: Thu, 02 Apr 2020 Prob (F-statistic): 8.64e-72\nTime: 21:35:21 Log-Likelihood: -243.89\nNo. Observations: 485 AIC: 585.8\nDf Residuals: 436 BIC: 790.8\nDf Model: 48 \nCovariance Type: nonrobust \n=================================================================================\n coef std err t P>|t| [0.025 0.975]\n---------------------------------------------------------------------------------\nIntercept 9.5062 0.218 43.608 0.000 9.078 9.935\nC(시즌)[T.2011] -0.0214 0.086 -0.249 0.804 -0.191 0.148\nC(시즌)[T.2012] 0.0089 0.089 0.100 0.920 -0.166 0.183\nC(시즌)[T.2013] 0.0247 0.092 0.267 0.790 -0.157 0.206\nC(시즌)[T.2014] 0.1897 0.105 1.801 0.072 -0.017 0.397\nC(시즌)[T.2015] 0.5346 0.108 4.953 0.000 0.322 0.747\nC(시즌)[T.2016] 0.4753 0.118 4.013 0.000 0.243 0.708\nC(시즌)[T.2017] 0.5632 0.114 4.956 0.000 0.340 0.786\nC(시즌)[T.2018] 0.5286 0.113 4.698 0.000 0.307 0.750\n포지션[T.2B] -0.1015 0.093 -1.095 0.274 -0.284 0.081\n포지션[T.3B] -0.0295 0.081 -0.366 0.715 -0.188 0.129\n포지션[T.C] 0.1176 0.088 1.338 0.182 -0.055 0.290\n포지션[T.CF] 0.0156 0.093 0.168 0.866 -0.167 0.198\n포지션[T.DH] 0.2055 0.080 2.575 0.010 0.049 0.362\n포지션[T.LF] -0.2349 0.086 -2.747 0.006 -0.403 -0.067\n포지션[T.RF] 0.0527 0.079 0.666 0.505 -0.103 0.208\n포지션[T.SS] -0.0827 0.091 -0.906 0.365 -0.262 0.097\nC(팀)[T.KIA] 0.3531 0.212 1.666 0.096 -0.063 0.770\nC(팀)[T.LG] 0.5362 0.215 2.496 0.013 0.114 0.958\nC(팀)[T.NC] 0.3713 0.217 1.707 0.088 -0.056 0.799\nC(팀)[T.SK] 0.3995 0.211 1.890 0.059 -0.016 0.815\nC(팀)[T.kt] 0.4113 0.233 1.761 0.079 -0.048 0.870\nC(팀)[T.넥센] 0.3289 0.218 1.510 0.132 -0.099 0.757\nC(팀)[T.두산] 0.3652 0.214 1.708 0.088 -0.055 0.785\nC(팀)[T.롯데] 0.3748 0.215 1.745 0.082 -0.047 0.797\nC(팀)[T.삼성] 0.3810 0.210 1.813 0.071 -0.032 0.794\nC(팀)[T.한화] 0.4520 0.213 2.121 0.034 0.033 0.871\nC(팀)[T.히어로즈] 0.5261 0.326 1.613 0.108 -0.115 1.167\nscale(G) -0.3008 0.073 -4.148 0.000 -0.443 -0.158\nscale(홈런) 0.1319 0.062 2.130 0.034 0.010 0.254\nscale(타3) -0.0188 0.026 -0.723 0.470 -0.070 0.032\nscale(타석) 0.4440 0.117 3.787 0.000 0.214 0.674\nscale(득점) -0.0710 0.080 -0.890 0.374 -0.228 0.086\nscale(사구) 0.0216 0.027 0.803 0.423 -0.031 0.075\nscale(고4) 0.0820 0.027 3.085 0.002 0.030 0.134\nscale(도루) -0.0272 0.033 -0.827 0.409 -0.092 0.037\nscale(삼진) -0.1526 0.041 -3.684 0.000 -0.234 -0.071\nscale(병살) 0.0284 0.030 0.936 0.350 -0.031 0.088\nscale(희타) -0.0098 0.030 -0.331 0.741 -0.068 0.048\nscale(희비) 0.0209 0.027 0.772 0.441 -0.032 0.074\nscale(타율) -0.0364 0.089 -0.408 0.684 -0.212 0.139\n외국인 0.3457 0.095 3.658 0.000 0.160 0.531\nscale(OPS) 0.0476 0.103 0.461 0.645 -0.155 0.250\nC(나이C)[0]:횟수 0.0069 0.129 0.053 0.958 -0.248 0.261\nC(나이C)[1]:횟수 -0.0115 0.049 -0.235 0.814 -0.107 0.084\nC(나이C)[2]:횟수 0.0950 0.019 5.017 0.000 0.058 0.132\nC(나이C)[3]:횟수 0.0874 0.015 5.692 0.000 0.057 0.118\nC(나이C)[4]:횟수 0.0606 0.016 3.767 0.000 0.029 0.092\nC(나이C)[5]:횟수 0.0716 0.027 2.644 0.008 0.018 0.125\n==============================================================================\nOmnibus: 21.071 Durbin-Watson: 1.170\nProb(Omnibus): 0.000 Jarque-Bera (JB): 22.642\nSkew: 0.501 Prob(JB): 1.21e-05\nKurtosis: 3.341 Cond. No. 103.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "df = train\nfrom sklearn.model_selection import KFold\n\nscores = np.zeros(5)\ncv = KFold(5, shuffle=True, random_state=0)\nfor i, (idx_train, idx_test) in enumerate(cv.split(df)):\n df_train = df.iloc[idx_train]\n df_test = df.iloc[idx_test]\n \n model = sm.OLS.from_formula('로그연봉 ~ C(시즌) + 포지션 + scale(G) + C(팀) + scale(홈런) + scale(타3)\\\n + scale(타석) + scale(득점) + scale(사구) + scale(고4) + scale(도루)\\\n + scale(삼진) + scale(병살) + scale(희타) + scale(희비) + scale(타율) + 외국인\\\n + scale(OPS) + C(나이C):횟수' , data=df_train)\n result = model.fit()\n \n pred = result.predict(df_test)\n rss = ((df_test.로그연봉 - pred) ** 2).sum()\n tss = ((df_test.로그연봉 - df_test.로그연봉.mean())** 2).sum()\n rsquared = 1 - rss / tss\n \n scores[i] = rsquared\n print(\"학습 R2 = {:.8f}, 검증 R2 = {:.8f}\".format(result.rsquared, rsquared))", "학습 R2 = 0.65949481, 검증 R2 = 0.53668681\n학습 R2 = 0.66369142, 검증 R2 = 0.46238439\n학습 R2 = 0.65373213, 검증 R2 = 0.55063842\n학습 R2 = 0.65665239, 검증 R2 = 0.53377046\n학습 R2 = 0.65596735, 검증 R2 = 0.54717767\n" ] ], [ [ " \n ", "_____no_output_____" ], [ "#### 3. 모델3", "_____no_output_____" ] ], [ [ "model = sm.OLS.from_formula('로그연봉 ~ C(시즌) +C(팀) + C(나이C):scale(횟수) + C(포지션) + scale(G) + scale(홈런) + scale(루타) +\\\n scale(도루) +scale(고4) + scale(타3) + C(외국인) + scale(BABIP) +C(이적C) + scale(ISOD) + \\\n scale(BBK) + scale(타율) + scale(경험)', data=train)\nresult = model.fit()\nprint(result.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: 로그연봉 R-squared: 0.685\nModel: OLS Adj. R-squared: 0.651\nMethod: Least Squares F-statistic: 19.79\nDate: Thu, 02 Apr 2020 Prob (F-statistic): 4.50e-82\nTime: 21:17:30 Log-Likelihood: -216.09\nNo. Observations: 485 AIC: 530.2\nDf Residuals: 436 BIC: 735.2\nDf Model: 48 \nCovariance Type: nonrobust \n=======================================================================================\n coef std err t P>|t| [0.025 0.975]\n---------------------------------------------------------------------------------------\nIntercept 9.9805 0.214 46.722 0.000 9.561 10.400\nC(시즌)[T.2011] -0.0463 0.081 -0.575 0.566 -0.205 0.112\nC(시즌)[T.2012] -0.0234 0.082 -0.284 0.776 -0.185 0.138\nC(시즌)[T.2013] -0.0477 0.087 -0.549 0.584 -0.219 0.123\nC(시즌)[T.2014] 0.1030 0.103 0.999 0.319 -0.100 0.306\nC(시즌)[T.2015] 0.4673 0.108 4.335 0.000 0.255 0.679\nC(시즌)[T.2016] 0.4147 0.118 3.513 0.000 0.183 0.647\nC(시즌)[T.2017] 0.5543 0.115 4.820 0.000 0.328 0.780\nC(시즌)[T.2018] 0.5407 0.119 4.546 0.000 0.307 0.775\nC(팀)[T.KIA] 0.3148 0.205 1.539 0.125 -0.087 0.717\nC(팀)[T.LG] 0.4806 0.205 2.344 0.020 0.078 0.884\nC(팀)[T.NC] 0.3174 0.209 1.516 0.130 -0.094 0.729\nC(팀)[T.SK] 0.3036 0.203 1.494 0.136 -0.096 0.703\nC(팀)[T.kt] 0.3953 0.221 1.788 0.075 -0.039 0.830\nC(팀)[T.넥센] 0.3191 0.209 1.529 0.127 -0.091 0.729\nC(팀)[T.두산] 0.3215 0.207 1.555 0.121 -0.085 0.728\nC(팀)[T.롯데] 0.3133 0.207 1.513 0.131 -0.094 0.720\nC(팀)[T.삼성] 0.2978 0.202 1.471 0.142 -0.100 0.696\nC(팀)[T.한화] 0.3360 0.204 1.645 0.101 -0.065 0.737\nC(팀)[T.히어로즈] 0.4105 0.312 1.315 0.189 -0.203 1.024\nC(포지션)[T.2B] -0.1262 0.086 -1.473 0.142 -0.295 0.042\nC(포지션)[T.3B] 0.0125 0.073 0.171 0.864 -0.132 0.157\nC(포지션)[T.C] 0.0729 0.084 0.863 0.388 -0.093 0.239\nC(포지션)[T.CF] 0.0080 0.086 0.093 0.926 -0.161 0.177\nC(포지션)[T.DH] 0.1919 0.075 2.570 0.010 0.045 0.339\nC(포지션)[T.LF] -0.2754 0.080 -3.451 0.001 -0.432 -0.119\nC(포지션)[T.RF] -0.0486 0.074 -0.653 0.514 -0.195 0.098\nC(포지션)[T.SS] -0.0323 0.084 -0.387 0.699 -0.196 0.132\nC(외국인)[T.1] 0.3564 0.093 3.845 0.000 0.174 0.539\nC(이적C)[T.1] -0.0408 0.046 -0.891 0.373 -0.131 0.049\nC(이적C)[T.2] -0.3295 0.121 -2.728 0.007 -0.567 -0.092\nC(이적C)[T.3] 0.3550 0.172 2.066 0.039 0.017 0.693\nC(나이C)[0]:scale(횟수) 0.4617 0.182 2.533 0.012 0.104 0.820\nC(나이C)[1]:scale(횟수) 0.7467 0.127 5.860 0.000 0.496 0.997\nC(나이C)[2]:scale(횟수) 0.3913 0.067 5.842 0.000 0.260 0.523\nC(나이C)[3]:scale(횟수) 0.2039 0.062 3.275 0.001 0.082 0.326\nC(나이C)[4]:scale(횟수) 0.0257 0.066 0.388 0.698 -0.105 0.156\nC(나이C)[5]:scale(횟수) 0.1430 0.116 1.231 0.219 -0.085 0.371\nscale(G) -0.2415 0.046 -5.212 0.000 -0.333 -0.150\nscale(홈런) -0.1310 0.063 -2.086 0.038 -0.254 -0.008\nscale(루타) 0.4866 0.094 5.171 0.000 0.302 0.671\nscale(도루) -0.0344 0.027 -1.260 0.208 -0.088 0.019\nscale(고4) 0.0785 0.025 3.169 0.002 0.030 0.127\nscale(타3) -0.0463 0.025 -1.869 0.062 -0.095 0.002\nscale(BABIP) -0.0122 0.032 -0.379 0.705 -0.076 0.051\nscale(ISOD) 0.0315 0.025 1.278 0.202 -0.017 0.080\nscale(BBK) -0.0751 0.027 -2.797 0.005 -0.128 -0.022\nscale(타율) -0.0352 0.037 -0.961 0.337 -0.107 0.037\nscale(경험) -0.0679 0.063 -1.072 0.284 -0.192 0.057\n==============================================================================\nOmnibus: 29.803 Durbin-Watson: 1.166\nProb(Omnibus): 0.000 Jarque-Bera (JB): 37.281\nSkew: 0.535 Prob(JB): 8.03e-09\nKurtosis: 3.838 Cond. No. 77.2\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "df = train\nfrom sklearn.model_selection import KFold\n\nscores = np.zeros(5)\ncv = KFold(5, shuffle=True, random_state=0)\nfor i, (idx_train, idx_test) in enumerate(cv.split(df)):\n df_train = df.iloc[idx_train]\n df_test = df.iloc[idx_test]\n \n model = sm.OLS.from_formula('로그연봉 ~ C(시즌) +C(팀) + C(나이C):scale(횟수) + C(포지션) + scale(G) + scale(홈런) + scale(루타) +\\\n scale(도루) +scale(고4) + scale(타3) + C(외국인) + scale(BABIP) +C(이적C) + scale(ISOD) + \\\n scale(BBK) + scale(타율) + scale(경험)' , data=df_train)\n result = model.fit()\n \n pred = result.predict(df_test)\n rss = ((df_test.로그연봉 - pred) ** 2).sum()\n tss = ((df_test.로그연봉 - df_test.로그연봉.mean())** 2).sum()\n rsquared = 1 - rss / tss\n \n scores[i] = rsquared\n print(\"학습 R2 = {:.8f}, 검증 R2 = {:.8f}\".format(result.rsquared, rsquared))", "학습 R2 = 0.69761969, 검증 R2 = 0.61152107\n학습 R2 = 0.70061104, 검증 R2 = 0.54803942\n학습 R2 = 0.67896484, 검증 R2 = 0.66771108\n학습 R2 = 0.69243782, 검증 R2 = 0.60517417\n학습 R2 = 0.69336666, 검증 R2 = 0.60310650\n" ] ], [ [ "#### 4. 모델 4", "_____no_output_____" ] ], [ [ "model = sm.OLS.from_formula('로그연봉 ~ C(시즌) + C(팀) + C(포지션) + G +\\\n 홈런 + 루타 + 타석 + 득점 +\\\n 도루 + 고4 + 타3 + 희타 + 희비 + 타율 + OPS +\\\n C(외국인) + C(나이C):횟수', data=train)\nresult = model.fit()\nprint(result.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: 로그연봉 R-squared: 0.635\nModel: OLS Adj. R-squared: 0.597\nMethod: Least Squares F-statistic: 16.56\nDate: Thu, 02 Apr 2020 Prob (F-statistic): 6.02e-70\nTime: 21:38:58 Log-Likelihood: -252.23\nNo. Observations: 485 AIC: 598.5\nDf Residuals: 438 BIC: 795.1\nDf Model: 46 \nCovariance Type: nonrobust \n=================================================================================\n coef std err t P>|t| [0.025 0.975]\n---------------------------------------------------------------------------------\nIntercept 9.4750 0.253 37.457 0.000 8.978 9.972\nC(시즌)[T.2011] -0.0473 0.087 -0.542 0.588 -0.219 0.124\nC(시즌)[T.2012] -0.0212 0.090 -0.236 0.814 -0.198 0.156\nC(시즌)[T.2013] -0.0244 0.093 -0.262 0.793 -0.207 0.158\nC(시즌)[T.2014] 0.1264 0.107 1.186 0.236 -0.083 0.336\nC(시즌)[T.2015] 0.4499 0.108 4.172 0.000 0.238 0.662\nC(시즌)[T.2016] 0.4058 0.120 3.388 0.001 0.170 0.641\nC(시즌)[T.2017] 0.4920 0.114 4.328 0.000 0.269 0.715\nC(시즌)[T.2018] 0.4437 0.112 3.950 0.000 0.223 0.664\nC(팀)[T.KIA] 0.4087 0.216 1.892 0.059 -0.016 0.833\nC(팀)[T.LG] 0.5444 0.218 2.492 0.013 0.115 0.974\nC(팀)[T.NC] 0.3786 0.221 1.713 0.087 -0.056 0.813\nC(팀)[T.SK] 0.3992 0.215 1.859 0.064 -0.023 0.821\nC(팀)[T.kt] 0.4654 0.236 1.975 0.049 0.002 0.929\nC(팀)[T.넥센] 0.3628 0.222 1.634 0.103 -0.074 0.799\nC(팀)[T.두산] 0.4419 0.218 2.025 0.043 0.013 0.871\nC(팀)[T.롯데] 0.3894 0.219 1.782 0.075 -0.040 0.819\nC(팀)[T.삼성] 0.4026 0.214 1.880 0.061 -0.018 0.824\nC(팀)[T.한화] 0.5086 0.217 2.340 0.020 0.081 0.936\nC(팀)[T.히어로즈] 0.5406 0.331 1.633 0.103 -0.110 1.191\nC(포지션)[T.2B] -0.0843 0.094 -0.897 0.370 -0.269 0.100\nC(포지션)[T.3B] 0.0211 0.080 0.264 0.792 -0.136 0.178\nC(포지션)[T.C] 0.1551 0.087 1.775 0.077 -0.017 0.327\nC(포지션)[T.CF] 0.0347 0.094 0.369 0.713 -0.150 0.220\nC(포지션)[T.DH] 0.2336 0.081 2.899 0.004 0.075 0.392\nC(포지션)[T.LF] -0.2306 0.087 -2.657 0.008 -0.401 -0.060\nC(포지션)[T.RF] 0.0555 0.080 0.693 0.489 -0.102 0.213\nC(포지션)[T.SS] -0.0562 0.093 -0.607 0.544 -0.238 0.126\nC(외국인)[T.1] 0.3681 0.095 3.870 0.000 0.181 0.555\nG -0.0103 0.002 -4.530 0.000 -0.015 -0.006\n홈런 -0.0004 0.009 -0.045 0.964 -0.018 0.017\n루타 0.0028 0.002 1.360 0.174 -0.001 0.007\n타석 0.0015 0.001 1.815 0.070 -0.000 0.003\n득점 -0.0020 0.003 -0.699 0.485 -0.008 0.004\n도루 -0.0028 0.003 -0.824 0.411 -0.009 0.004\n고4 0.0365 0.011 3.371 0.001 0.015 0.058\n타3 -0.0261 0.018 -1.461 0.145 -0.061 0.009\n희타 0.0019 0.006 0.293 0.769 -0.011 0.015\n희비 0.0155 0.011 1.464 0.144 -0.005 0.036\n타율 -0.3441 1.564 -0.220 0.826 -3.417 2.729\nOPS 0.1804 0.581 0.310 0.756 -0.962 1.322\nC(나이C)[0]:횟수 -0.0027 0.131 -0.021 0.983 -0.260 0.255\nC(나이C)[1]:횟수 -0.0001 0.049 -0.003 0.998 -0.097 0.097\nC(나이C)[2]:횟수 0.1031 0.019 5.373 0.000 0.065 0.141\nC(나이C)[3]:횟수 0.0962 0.015 6.238 0.000 0.066 0.126\nC(나이C)[4]:횟수 0.0674 0.016 4.171 0.000 0.036 0.099\nC(나이C)[5]:횟수 0.0758 0.027 2.768 0.006 0.022 0.130\n==============================================================================\nOmnibus: 20.133 Durbin-Watson: 1.128\nProb(Omnibus): 0.000 Jarque-Bera (JB): 21.622\nSkew: 0.480 Prob(JB): 2.02e-05\nKurtosis: 3.386 Cond. No. 4.14e+04\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n[2] The condition number is large, 4.14e+04. This might indicate that there are\nstrong multicollinearity or other numerical problems.\n" ], [ "df = train\nfrom sklearn.model_selection import KFold\n\nscores = np.zeros(5)\ncv = KFold(5, shuffle=True, random_state=0)\nfor i, (idx_train, idx_test) in enumerate(cv.split(df)):\n df_train = df.iloc[idx_train]\n df_test = df.iloc[idx_test]\n \n model = sm.OLS.from_formula('로그연봉 ~ C(시즌) + C(팀) + C(포지션) + G +\\\n 홈런 + 루타 + 타석 + 득점 +\\\n 도루 + 고4 + 타3 + 희타 + 희비 + 타율 + OPS +\\\n C(외국인) + C(나이C):횟수' , data=df_train)\n result = model.fit()\n \n pred = result.predict(df_test)\n rss = ((df_test.로그연봉 - pred) ** 2).sum()\n tss = ((df_test.로그연봉 - df_test.로그연봉.mean())** 2).sum()\n rsquared = 1 - rss / tss\n \n scores[i] = rsquared\n print(\"학습 R2 = {:.8f}, 검증 R2 = {:.8f}\".format(result.rsquared, rsquared))", "학습 R2 = 0.65085263, 검증 R2 = 0.49514884\n학습 R2 = 0.65990360, 검증 R2 = 0.40265988\n학습 R2 = 0.63610197, 검증 R2 = 0.55490774\n학습 R2 = 0.64067558, 검증 R2 = 0.53617170\n학습 R2 = 0.64395462, 검증 R2 = 0.53501300\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a68424270da91344a65e92b1f6a718e98a90183
31,240
ipynb
Jupyter Notebook
src/Python/main.ipynb
LuisFranciscoHS/InteractionsClassification
59b1555b95f7d57d27fb01194df041521e574906
[ "Apache-2.0" ]
null
null
null
src/Python/main.ipynb
LuisFranciscoHS/InteractionsClassification
59b1555b95f7d57d27fb01194df041521e574906
[ "Apache-2.0" ]
14
2019-08-03T20:22:40.000Z
2019-09-30T13:52:14.000Z
src/Python/main.ipynb
LuisFranciscoHS/InteractionsClassification
59b1555b95f7d57d27fb01194df041521e574906
[ "Apache-2.0" ]
null
null
null
99.490446
1,451
0.453617
[ [ [ "from tensorflow.keras.datasets import imdb\n(x_train, y_train), (x_test, y_test) = imdb.load_data(path=\"imdb.npz\",\n num_words=None,\n skip_top=0,\n maxlen=None,\n seed=113,\n start_char=1,\n oov_char=2,\n index_from=3)\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)", "(25000,) (25000,) (25000,) (25000,)\n" ], [ "from tensorflow.keras.datasets import mnist\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\ntype(x_train[:,0])", "(60000, 28, 28) (10000, 28, 28) (60000,) (10000,)\n" ], [ "from tensorflow.keras.datasets import cifar10\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\ntype(x_train[:,0])", "Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n\r 8192/170498071 [..............................] - ETA: 46:57\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 40960/170498071 [..............................] - ETA: 19:03\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 90112/170498071 [..............................] - ETA: 12:51\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 172032/170498071 [..............................] - ETA: 8:57 \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 352256/170498071 [..............................] - ETA: 5:28\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 729088/170498071 [..............................] - ETA: 3:09\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 1482752/170498071 [..............................] - ETA: 1:48\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 2973696/170498071 [..............................] - ETA: 56s \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 2990080/170498071 [..............................] - ETA: 1:00\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 5922816/170498071 [>.............................] - ETA: 31s \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 6004736/170498071 [>.............................] - ETA: 33s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 8675328/170498071 [>.............................] - ETA: 23s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 9117696/170498071 [>.............................] - ETA: 24s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 10805248/170498071 [>.............................] - ETA: 20s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 12034048/170498071 [=>............................] - ETA: 19s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 13852672/170498071 [=>............................] - ETA: 17s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 15179776/170498071 [=>............................] - ETA: 16s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 16949248/170498071 [=>............................] - ETA: 15s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 18259968/170498071 [==>...........................] - ETA: 14s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 19357696/170498071 [==>...........................] - ETA: 14s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 21282816/170498071 [==>...........................] - ETA: 12s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 21405696/170498071 [==>...........................] - ETA: 13s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 23093248/170498071 [===>..........................] - ETA: 12s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 24125440/170498071 [===>..........................] - ETA: 12s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 26157056/170498071 [===>..........................] - ETA: 11s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 27205632/170498071 [===>..........................] - ETA: 11s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 29237248/170498071 [====>.........................] - ETA: 10s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 30285824/170498071 [====>.........................] - ETA: 10s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 32317440/170498071 [====>.........................] - ETA: 10s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 33349632/170498071 [====>.........................] - ETA: 10s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 35446784/170498071 [=====>........................] - ETA: 9s \b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 36429824/170498071 [=====>........................] - ETA: 9s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 38666240/170498071 [=====>........................] - ETA: 9s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 39493632/170498071 [=====>........................] - ETA: 9s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 41836544/170498071 [======>.......................] - ETA: 8s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 42524672/170498071 [======>.......................] - ETA: 8s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 44736512/170498071 [======>.......................] - ETA: 8s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 45654016/170498071 [=======>......................] - ETA: 8s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 47702016/170498071 [=======>......................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 48783360/170498071 [=======>......................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 50831360/170498071 [=======>......................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 51912704/170498071 [========>.....................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 53878784/170498071 [========>.....................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 54714368/170498071 [========>.....................] - ETA: 7s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 56958976/170498071 [=========>....................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 57466880/170498071 [=========>....................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 60039168/170498071 [=========>....................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 60596224/170498071 [=========>....................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 63152128/170498071 [==========>...................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 63741952/170498071 [==========>...................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 66297856/170498071 [==========>...................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 66887680/170498071 [==========>...................] - ETA: 6s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 69410816/170498071 [===========>..................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 70017024/170498071 [===========>..................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 72196096/170498071 [===========>..................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 73146368/170498071 [===========>..................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 75243520/170498071 [============>.................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 76292096/170498071 [============>.................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 78405632/170498071 [============>.................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 79388672/170498071 [============>.................] - ETA: 5s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 81420288/170498071 [=============>................] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 82141184/170498071 [=============>................] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 84467712/170498071 [=============>................] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 85254144/170498071 [==============>...............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 87728128/170498071 [==============>...............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 88383488/170498071 [==============>...............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 90857472/170498071 [==============>...............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 91496448/170498071 [===============>..............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 93921280/170498071 [===============>..............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 94642176/170498071 [===============>..............] - ETA: 4s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 97181696/170498071 [================>.............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 97771520/170498071 [================>.............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r 99770368/170498071 [================>.............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r100917248/170498071 [================>.............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r102981632/170498071 [=================>............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r104046592/170498071 [=================>............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r105930752/170498071 [=================>............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r107110400/170498071 [=================>............] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r108994560/170498071 [==================>...........] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r109961216/170498071 [==================>...........] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r112041984/170498071 [==================>...........] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r112828416/170498071 [==================>...........] - ETA: 3s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r114647040/170498071 [===================>..........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r115924992/170498071 [===================>..........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r117628928/170498071 [===================>..........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r118988800/170498071 [===================>..........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r120561664/170498071 [====================>.........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r122003456/170498071 [====================>.........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r123625472/170498071 [====================>.........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r125083648/170498071 [=====================>........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r126705664/170498071 [=====================>........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r128131072/170498071 [=====================>........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r128917504/170498071 [=====================>........] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r131260416/170498071 [======================>.......] - ETA: 2s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r132046848/170498071 [======================>.......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r134053888/170498071 [======================>.......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r134406144/170498071 [======================>.......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r135880704/170498071 [======================>.......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r137469952/170498071 [=======================>......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r138125312/170498071 [=======================>......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r139886592/170498071 [=======================>......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r140632064/170498071 [=======================>......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r141582336/170498071 [=======================>......] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r143663104/170498071 [========================>.....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r144252928/170498071 [========================>.....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r146169856/170498071 [========================>.....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r146825216/170498071 [========================>.....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r147775488/170498071 [=========================>....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r149790720/170498071 [=========================>....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r150413312/170498071 [=========================>....] - ETA: 1s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r151543808/170498071 [=========================>....] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r152969216/170498071 [=========================>....] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r153952256/170498071 [==========================>...] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r156098560/170498071 [==========================>...] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r156999680/170498071 [==========================>...] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r158842880/170498071 [==========================>...] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r159973376/170498071 [===========================>..] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r160669696/170498071 [===========================>..] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r162226176/170498071 [===========================>..] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r163127296/170498071 [===========================>..] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r164913152/170498071 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r166141952/170498071 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r166338560/170498071 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r168026112/170498071 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r169287680/170498071 [============================>.] - ETA: 0s\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r170500096/170498071 [==============================] - 9s 0us/step\n(50000, 32, 32, 3) (10000, 32, 32, 3) (50000, 1) (10000, 1)\n" ], [ "from src.Python.datasets import toronto\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import Sequential\n\nX_train, X_test, y_train, y_test = toronto.load_data()\n\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\n\nmodel = Sequential()\nmodel.add(layers.Dense(60, input_dim=60, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\n# Compile model\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n## Make simple neural network with same attributes\n\n## Extend attributes\n\n## Prepare data for neural networks\n\n## Make full size neural networks\n\n# Classification of sets of edges\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a68598d0b5b8796de7c40daaf6d31fcd4961e33
16,345
ipynb
Jupyter Notebook
Cache product catalogue access.ipynb
EarthLab-Luxembourg/satellite_job_access
d702fbf9e07ceb167fdf6c425015c28d09c029ac
[ "Apache-2.0" ]
null
null
null
Cache product catalogue access.ipynb
EarthLab-Luxembourg/satellite_job_access
d702fbf9e07ceb167fdf6c425015c28d09c029ac
[ "Apache-2.0" ]
null
null
null
Cache product catalogue access.ipynb
EarthLab-Luxembourg/satellite_job_access
d702fbf9e07ceb167fdf6c425015c28d09c029ac
[ "Apache-2.0" ]
null
null
null
48.791045
255
0.684368
[ [ [ "# Demonstration notebook to search, list product and download a band", "_____no_output_____" ], [ "This notebook is defined in four different sections:\n1. Import requirements and definition of the parameters\n2. Search for cached products\n3. Listing the bands of one product\n4. Downloading a band", "_____no_output_____" ], [ "## Import requirements and definition of the parameters", "_____no_output_____" ] ], [ [ "import os\nimport os.path as path\nimport requests\nimport json", "_____no_output_____" ] ], [ [ "Access to the Catalogue API\nThis API is listing the EO products that have been downloaded within Max-ICS Cache\nThe cache is populated thanks to user jobs that can be created using the user job api", "_____no_output_____" ] ], [ [ "API_CATALOGUE_BASE_URL = \"https://api.earthlab.lu/priv/sat-job-catalog\"\nAPI_PRODUCT_BANDS_BASE_URL = \"https://api.earthlab.lu/priv/sat-product-job/v1/products/{product_id}/bands\"\nAPI_PRODUCT_BAND_BASE_URL = \"https://api.earthlab.lu/priv/sat-product-job/v1/products/{product_id}/bands/{file_id}\"\nAPI_AUTH_BASE_URL = \"https://max-ics.earthlab.lu/authServer/api/v1/connection\"", "_____no_output_____" ], [ "PROVIDER = \"sentinel-2\"\nGEO_AREA = {\"type\":\"Polygon\",\"coordinates\":[[[-104.852226585911,39.600325260831596],[-97.9901366137382,39.600325260831596],[-97.9901366137382,43.20496589647098],[-104.852226585911,43.20496589647098],[-104.852226585911,39.600325260831596]]]}\nUSERNAME = \"<user>\" ## Please modify\nPASSWORD = \"<pass>\" ## Please modify", "_____no_output_____" ] ], [ [ "Various function to can be used to authenticate, search for products, list bands and download bands", "_____no_output_____" ] ], [ [ "def get_auth_token() -> type:\n \"\"\"Function to get an authentication Bearer\n :return: JWT token\n :rtype: str\"\"\"\n payload = {\"user_name\": USERNAME,\"password\": PASSWORD,\"with_token\": False}\n req_auth = requests.post(API_AUTH_BASE_URL, json=payload, )\n req_auth.raise_for_status()\n bearer = req_auth.headers['Set-Cookie'].split(\";\")[0]\n return \"Bearer \" + bearer.split(\"=\")[1]\n\ndef search_for_cached_products(geo_area: dict, provider: str, limit: int) -> list:\n \"\"\"Function to search for cached product within a certain area\n :param geo_area: GeoJSON for the rectangled searched area\n :type geo_area: dict (GeoJSON)\n :param provider: name of the provider (one of: landsat-8, sentinel-2...)\n :type provider: str\n :param limit: Number of result (max 1000)\n :type limit: int\n :return: List of products\n :rtype: list\n \"\"\"\n ## Filter results within given flattened coordinates as a single array.\n ## It can contains 4 coordinates (bbox) or more than 8 (polygon)\n within = []\n for point in geo_area['coordinates'][0]:\n within.append(point[0])\n within.append(point[1])\n url = API_CATALOGUE_BASE_URL + \"/products?provider=\" + provider \n for coord in within:\n url = url + \"&within=\" + str(coord)\n url = url + \"&limit=\" + str(limit)\n req = requests.get(url=url, headers={\"authorization\": get_auth_token()})\n req.raise_for_status()\n return req.json()\n \n\ndef download_product_bands(product_id: str) -> list:\n \"\"\"Function to get the bands information for one particular product\n :param product_id: ID of the product\n :type product_id: str\n :return: list of products\n :rtype: list\n \"\"\"\n url = API_PRODUCT_BANDS_BASE_URL.format(product_id=product_id)\n req = requests.request(method=\"get\", url=url, headers={\"authorization\": get_auth_token()})\n req.raise_for_status()\n return req.json()\n\ndef download_one_band(product_id: str, file_id: str, filename: str, out_path: str = \"./\") -> None:\n \"\"\"Function to download a product from the API\n :param product_id: ID of the product\n :type product_id: str\n :param file_id: ID of the band\n :type file_id: str\n :param filename: Name of the file to save\n :type filename: str\n :param out_path: folder to use for saving the file [Default to: './']\n :type out_path: str\n \"\"\"\n url = API_PRODUCT_BAND_BASE_URL.format(product_id=product_id, file_id=file_id)\n req = requests.get(url=url, headers={\"authorization\": get_auth_token()}, stream=True)\n req.raise_for_status()\n handle = open(os.path.join(out_path, filename), \"wb\")\n for chunk in req.iter_content(chunk_size=8192):\n if chunk: # filter out keep-alive new chunks\n handle.write(chunk)\n", "_____no_output_____" ] ], [ [ "# First making the search within the cached products", "_____no_output_____" ] ], [ [ "product_list = search_for_cached_products(geo_area=GEO_AREA, provider=PROVIDER, limit=1000)\nprint(\"Got \", len(product_list), \" products\")", "Got 921 products\n" ] ], [ [ "# Second listing the bands", "_____no_output_____" ] ], [ [ "one_product_id = product_list[0]['id']\none_product_bands = download_product_bands(product_id=one_product_id)\nprint(\"The first product has \", len(one_product_bands), \" pseudo-bands (files respective to band and resolutions):\")\nprint(\"------------------------------------|---------------------------------------------------------------\")\nprint(\"Filename | Band ID\")\nprint(\"------------------------------------|---------------------------------------------------------------\")\nfor band in one_product_bands:\n print(band['filename'], \" | \", band['id'])", "The first product has 35 pseudo-bands (files respective to band and resolutions):\n------------------------------------|---------------------------------------------------------------\nFilename | Band ID\n------------------------------------|---------------------------------------------------------------\nT14TKL_20200609T172911_B07_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDdfNjBtLmpwMg==\nT14TKL_20200609T172911_B8A_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9COEFfNjBtLmpwMg==\nT14TKL_20200609T172911_WVP_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9XVlBfNjBtLmpwMg==\nT14TKL_20200609T172911_TCI_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9UQ0lfNjBtLmpwMg==\nT14TKL_20200609T172911_B06_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDZfNjBtLmpwMg==\nT14TKL_20200609T172911_B03_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDNfNjBtLmpwMg==\nT14TKL_20200609T172911_B11_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMTFfNjBtLmpwMg==\nT14TKL_20200609T172911_B12_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMTJfNjBtLmpwMg==\nT14TKL_20200609T172911_B01_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDFfNjBtLmpwMg==\nT14TKL_20200609T172911_B02_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDJfNjBtLmpwMg==\nT14TKL_20200609T172911_B05_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDVfNjBtLmpwMg==\nT14TKL_20200609T172911_SCL_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9TQ0xfNjBtLmpwMg==\nT14TKL_20200609T172911_B04_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDRfNjBtLmpwMg==\nT14TKL_20200609T172911_B09_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDlfNjBtLmpwMg==\nT14TKL_20200609T172911_AOT_60m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9BT1RfNjBtLmpwMg==\nT14TKL_20200609T172911_SCL_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9TQ0xfMjBtLmpwMg==\nT14TKL_20200609T172911_B11_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMTFfMjBtLmpwMg==\nT14TKL_20200609T172911_B03_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDNfMjBtLmpwMg==\nT14TKL_20200609T172911_B02_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDJfMjBtLmpwMg==\nT14TKL_20200609T172911_B05_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDVfMjBtLmpwMg==\nT14TKL_20200609T172911_B06_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDZfMjBtLmpwMg==\nT14TKL_20200609T172911_B12_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMTJfMjBtLmpwMg==\nT14TKL_20200609T172911_AOT_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9BT1RfMjBtLmpwMg==\nT14TKL_20200609T172911_B8A_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9COEFfMjBtLmpwMg==\nT14TKL_20200609T172911_TCI_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9UQ0lfMjBtLmpwMg==\nT14TKL_20200609T172911_B07_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDdfMjBtLmpwMg==\nT14TKL_20200609T172911_WVP_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9XVlBfMjBtLmpwMg==\nT14TKL_20200609T172911_B04_20m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDRfMjBtLmpwMg==\nT14TKL_20200609T172911_WVP_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9XVlBfMTBtLmpwMg==\nT14TKL_20200609T172911_B02_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDJfMTBtLmpwMg==\nT14TKL_20200609T172911_AOT_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9BT1RfMTBtLmpwMg==\nT14TKL_20200609T172911_TCI_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9UQ0lfMTBtLmpwMg==\nT14TKL_20200609T172911_B03_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDNfMTBtLmpwMg==\nT14TKL_20200609T172911_B04_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDRfMTBtLmpwMg==\nT14TKL_20200609T172911_B08_10m.jp2 | UzJBX01TSUwyQV8yMDIwMDYwOVQxNzI5MTFfTjAyMTRfUjA1NV9UMTRUS0xfMjAyMDA2MDlUMjE1MDEyLlNBRkUvVDE0VEtMXzIwMjAwNjA5VDE3MjkxMV9CMDhfMTBtLmpwMg==\n" ] ], [ [ "Getting more information about the band", "_____no_output_____" ] ], [ [ "## Example of band information:\nprint(\"Example of information for \", band['filename'])\nprint(\"Name: \", band['details']['name'])\nprint(\"Start of wavelength: \", band['details']['start_wavelength_nm'], \" nm\")\nprint(\"End of wavelength: \", band['details']['end_wavelength_nm'], \" nm\")\nprint(\"Resolution: \", band['details']['pixel_size_m'], \" m/px\")", "Example of information for T14TKL_20200609T172911_B08_10m.jp2\nName: NIR\nStart of wavelength: 784.5 nm\nEnd of wavelength: 899.5 nm\nResolution: 10 m/px\n" ] ], [ [ "# Downoading the band as file", "_____no_output_____" ] ], [ [ "## Example of code to download the mentioned band\ndownload_one_band(product_id=one_product_id, file_id=band['id'], filename=band['filename'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a686b0d5385b717f9160664abcffd69f8e93085
99,723
ipynb
Jupyter Notebook
FERMAT-XiR.ipynb
skariel/Lensing
4c90e61b1694c393f665a04dd0659999a1c8e001
[ "Unlicense" ]
null
null
null
FERMAT-XiR.ipynb
skariel/Lensing
4c90e61b1694c393f665a04dd0659999a1c8e001
[ "Unlicense" ]
null
null
null
FERMAT-XiR.ipynb
skariel/Lensing
4c90e61b1694c393f665a04dd0659999a1c8e001
[ "Unlicense" ]
null
null
null
246.22963
53,698
0.921312
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a687587095a485fb9046482a2aab97c34b1ad8c
535,158
ipynb
Jupyter Notebook
FAI02_old/Lesson9/neural-style-GPU.ipynb
WNoxchi/Kawkasos
42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2
[ "MIT" ]
7
2017-07-28T06:17:29.000Z
2021-03-19T08:43:07.000Z
FAI02_old/Lesson9/neural-style-GPU.ipynb
WNoxchi/Kawkasos
42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2
[ "MIT" ]
null
null
null
FAI02_old/Lesson9/neural-style-GPU.ipynb
WNoxchi/Kawkasos
42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2
[ "MIT" ]
1
2018-06-17T12:08:25.000Z
2018-06-17T12:08:25.000Z
1,386.419689
525,838
0.946291
[ [ [ "Wayne H Nixalo - 09 Aug 2017\n\nThis JNB is an attempt to do the neural artistic style transfer and super-resolution examples done in class, on a GPU using PyTorch for speed.\n\nLesson NB: [neural-style-pytorch](https://github.com/fastai/courses/blob/master/deeplearning2/neural-style-pytorch.ipynb)", "_____no_output_____" ], [ "## Neural Style Transfer\n\nStyle Transfer / Super Resolution Implementation in PyTorch", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport importlib\nimport os, sys; sys.path.insert(1, os.path.join('../utils'))\nfrom utils2 import *", "Using TensorFlow backend.\n/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n" ], [ "import torch, torch.nn as nn, torch.nn.functional as F, torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.serialization import load_lua\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, models, datasets", "_____no_output_____" ] ], [ [ "### Setup", "_____no_output_____" ] ], [ [ "path = '../data/nst/'\nfnames = pickle.load(open(path+'fnames.pkl','rb'))", "_____no_output_____" ], [ "img = Image.open(path + fnames[0]); img", "_____no_output_____" ], [ "rn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((1,1,1,3))\npreproc = lambda x: (x - rn_mean)[:,:,:,::-1]\n\nimg_arr = preproc(np.expand_dims(np.array(img),0))\nshp = img_arr.shape\n\ndeproc = lambda x: x[:,:,:,::-1] + rn_mena", "_____no_output_____" ] ], [ [ "### Create Model", "_____no_output_____" ] ], [ [ "def download_convert_vgg16_model():\n model_url = 'http://cs.stanford.edu/people/jcjohns/fast-neural-style/models/vgg16.t7'\n file = get_file(model_url, cache_subdir='models')\n vgglua = load_lua(file).parameters()\n vgg = models.VGGFeature()\n for (src, dst) in zip(vgglua[0], vgg.parameters()): dst[:] = src[:]\n torch.save(vgg.state_dict(), path + 'vgg16_feature.pth')", "_____no_output_____" ], [ "url = 'https://s3-us-west-2.amazonaws.com/jcjohns-models/'\nfname = 'vgg16-00b39a1b.pth'\nfile = get_file(fname, url+fname, cache_subdir='models')", "Downloading data from https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth\n" ], [ "vgg = models.vgg.vgg16()\nvgg.load_state_dict(torch.load(file))\noptimizer = optim.Adam(vgg.parameters())", "_____no_output_____" ], [ "vgg.cuda();", "_____no_output_____" ], [ "arr_lr = bcolz.open(path + 'trn_resized_72.bc')[:]\narr_hr = bcolz.open(path + 'trn_resized_288.bc')[:]\n\narr = bcolz.open(dpath + 'trn_resized.bc')[:]", "_____no_output_____" ], [ "x = Variable(arr[0])\ny = model(x)", "_____no_output_____" ], [ "url = 'http://www.files.fast.ai/models/'\nfname = 'imagenet_class_index.json'\nfpath = get_file(fname, url + fname, cache_subdir='models')", "_____no_output_____" ], [ "class ResidualBlock(nn.Module):\n def __init__(self, num):\n super(ResideualBlock, self).__init__()\n self.c1 = nn.Conv2d(num, num, kernel_size=3, stride=1, padding=1)\n self.c2 = nn.Conv2d(num, num, kernel_size=3, stride=1, padding=1)\n self.b1 = nn.BatchNorm2d(num)\n self.b2 = nn.BatchNorm2d(num)\n \n def forward(self, x):\n h = F.relu(self.b1(self.c1(x)))\n h = self.b2(self.c2(h))\n return h + x\n\nclass FastStyleNet(nn.Module):\n def __init__(self):\n super(FastStyleNet, self).__init__()\n self.cs = [nn.Conv2d(3, 32, kernel_size=9, stride=1, padding=4),\n nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1),\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding1)]\n self.b1s = [nn.BatchNorm2d(i) for i in [32, 64, 128]]\n self.rs = [ResidualBlock(128) for i in range(5)]\n self.ds = [nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),\n nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1)]\n self.b2s = [nn.BatchNorm2d(i) for i in [64, 32]]\n self.d3 = nn.Conv2d(32, 3, kernel_size=9, stride=1, padding=4)\n \n def forward(self, h):\n for i in range(3): h = F.relu(self.b1s[i](self.cs[i](x)))\n for r in self.rs: h = r(h)\n for i in range(2): h = F.relu(self.b2s[i](self.ds[i](x)))\n return self.d3(h)", "_____no_output_____" ] ], [ [ "### Loss Functions and Processing", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a6878ec1eaa3afe4c4d69ccd392030f3455ab07
17,159
ipynb
Jupyter Notebook
content/lessons/04/Class-Coding-Lab/CCL-Conditionals.ipynb
cspelz-su/Final-Project
01c4703ea69eb1f215800e4b21939e4836bc4c8c
[ "MIT" ]
null
null
null
content/lessons/04/Class-Coding-Lab/CCL-Conditionals.ipynb
cspelz-su/Final-Project
01c4703ea69eb1f215800e4b21939e4836bc4c8c
[ "MIT" ]
null
null
null
content/lessons/04/Class-Coding-Lab/CCL-Conditionals.ipynb
cspelz-su/Final-Project
01c4703ea69eb1f215800e4b21939e4836bc4c8c
[ "MIT" ]
null
null
null
35.972746
528
0.578472
[ [ [ "# In-Class Coding Lab: Conditionals\n\nThe goals of this lab are to help you to understand:\n\n- Relational and Logical Operators \n- Boolean Expressions\n- The if statement\n- Try / Except statement\n- How to create a program from a complex idea.\n", "_____no_output_____" ], [ "# Understanding Conditionals\n\nConditional statements permit the non-linear execution of code. Take the following example, which detects whether the input integer is odd or even:", "_____no_output_____" ] ], [ [ "number = int(input(\"Enter an integer: \"))\nif number %2==0:\n print(\"%d is even\" % (number))\nelse:\n print(\"%d is odd\" % (number))", "Enter an integer: 5\n5 is odd\n" ] ], [ [ "###### Make sure to run the cell more than once, inputting both an odd and even integers to try it out. After all, we don't know if the code really works until we test out both options. \n\nOn line 2, you see `number %2 == 0` this is a Boolean expression at the center of the logic of this program. The expression says **number when divided by 2 has a reminder (%) equal to (==) zero**. The key to deciphering this is knowing how the `%` and `==` operators work. Understanding the basics, such as these, areessential to problem solving with programming, for once you understand the basics programming becomes an exercise in assembling them together into a workable solution.\n\nThe `if` statement evaluates this Boolean expression and when the expression is `True`, Python executes all of the code indented underneath the `if`. In the event the Boolean expression is `False`, Python executes the code indented under the `else`.\n\n\n## Now Try It\n\nWrite a similar program to input a integer and print \"Zero or Positive\" when the number is greater than or equal to zero, and \"Negative\" otherwise.\n\nTo accomplish this you **must** write a Boolean expression for **number greater than or equal to zero**, which is left up to the reader.", "_____no_output_____" ] ], [ [ "# TODO write your program here:\nnumber = int(input(\"Enter an integer: \"))\nif number >=0:\n print(\"%d is greater than or equal to zero\" % (number))\nelse:\n print(\"%d is not greater than or equal to zero\" % (number))", "Enter an integer: 5\n5 is greater than or equal to zero\n" ] ], [ [ "# Rock, Paper Scissors\n\nIn this part of the lab we'll build out a game of Rock, Paper, Scissors. If you're not familiar with the game, I suggest reading this: [https://en.wikipedia.org/wiki/Rock%E2%80%93paper%E2%80%93scissor](https://en.wikipedia.org/wiki/Rock%E2%80%93paper%E2%80%93scissors) Knowledge of the game will help you understand the lab much better.\n\nThe objective of the lab is to teach you how to use conditionals but also get you thinking of how to solve problems with programming. We've said before its non-linear, with several attempts before you reach the final solution. You'll experience this first-hand in this lab as we figure things out one piece at a time and add them to our program.", "_____no_output_____" ] ], [ [ "## Here's our initial To-Do list, we've still got lots to figure out.\n# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\n# 3. play the game and determine a winnner... (not sure how to do this yet.)\n", "_____no_output_____" ] ], [ [ "## Randomizing the Computer's Selection \nLet's start by coding the TO-DO list. First we need to make the computer select from \"rock\", \"paper\" or \"scissors\" at random.\n\n\nTo accomplish this, we need to use python's `random` library, which is documented here: [https://docs.python.org/3/library/random.html](https://docs.python.org/3/library/random.html) \nIt would appear we need to use the `choice()` function, which takes a sequence of choices and returns one at random. Let's try it out.", "_____no_output_____" ] ], [ [ "import random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\ncomputer", "_____no_output_____" ] ], [ [ "Run the cell a couple of times. It should make a random selection from `choices` each time you run it.\n\nHow did I figure this out? Well I started with a web search and then narrowed it down from the Python documentation. You're not there yet, but at some point in the course you will be. When you get there you will be able to teach yourself just about anything!", "_____no_output_____" ], [ "## Getting input and guarding against stupidity\n\nWith step one out of the way, its time to move on to step 2. Getting input from the user.", "_____no_output_____" ] ], [ [ "# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\nimport random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\n\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\nyou = input(\"Enter your choice: rock, paper, or scissors: \")\nprint(\"You chose %s and the computer chose %s\" % (you,computer))\n", "Enter your choice: rock, paper, or scissors: rock\nYou chose rock and the computer chose rock\n" ] ], [ [ "This is taking shape, but if you re-run the example and enter `pizza` you'll notice a problem. \n\nWe should guard against the situation when someone enters something other than 'rock', 'paper' or 'scissors' This is where our first conditional comes in to play.\n\n### In operator\n\nThe `in` operator returns a Boolean based on whether a value is in a list of values. Let's try it:\n", "_____no_output_____" ] ], [ [ "# TODO Try these:\n'rock' in choices, 'mike' in choices", "_____no_output_____" ] ], [ [ "### You Do It!\nNow modify the code below to only print your and the computer's selections when your input is one of the valid choices. Replace `TODO` on line `8` with a correct Boolean expression to verify what you entered is one of the valid choices.", "_____no_output_____" ] ], [ [ "# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\nimport random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\n\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\nyou = input(\"Enter your choice: rock, paper, or scissors: \")\nif (you in choices):\n print(\"You chose %s and the computer chose %s\" % (you,computer))\n # 3. play the game and determine a winnner... (not sure how to do this yet.)\nelse: \n print(\"You didn't enter 'rock', 'paper' or 'scissors'!!!\")", "Enter your choice: rock, paper, or scissors: rock\nYou chose rock and the computer chose rock\n" ] ], [ [ "## Playing the game\n\nWith the input figured out, it's time to work our final step, playing the game. The game itself has some simple rules:\n\n- rock beats scissors (rock smashes scissors)\n- scissors beats paper (scissors cuts paper)\n- paper beats rock (paper covers rock)\n\nSo for example:\n\n- If you choose rock and the computer chooses paper, you lose because paper covers rock. \n- Likewise if you select rock and the computer choose scissors, you win because rock smashes scissors.\n- If you both choose rock, it's a tie.\n\n## It's too complicated!\n\nIt still might seem too complicated to program this game, so let's use a process called **problem simplification** where we solve an easier version of the problem, then as our understanding grows, we increase the complexity until we solve the entire problem.\n\nOne common way we simplify a problem is to constrain our input. If we force us to always choose 'rock', the program becomes a little easier to write. \n\n", "_____no_output_____" ] ], [ [ "# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\nimport random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\n\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\n# for now, make this 'rock'\nyou = 'rock' #input(\"Enter your choice: rock, paper, or scissors: \")\nif (you in choices): \n print(\"You chose %s and the computer chose %s\" % (you,computer))\n \n # 3. play the game and determine a winnner (assuming rock only for user)\n if (you == 'rock' and computer == 'scissors'):\n print(\"You win! Rock smashes scissors.\")\n elif (you == 'rock' and computer=='paper'):\n print(\"You lose! Paper covers rock.\")\n else:\n print(\"It's a tie!\") \nelse: \n print(\"You didn't enter 'rock', 'paper' or 'scissors'!!!\")", "You chose rock and the computer chose scissors\nYou win! Rock smashes scissors.\n" ] ], [ [ "Run the code in the cell above enough times to verify it works. (You win, you lose and you tie.) That will ensure the code you have works as intended.\n\n## Paper: Making the program a bit more complex.\n\nWith the rock logic out of the way, its time to focus on paper. We will assume you always type `paper` and then add the conditional logic to our existing code handle it.\n\nAt this point you might be wondering should I make a separate `if` statement or should I chain the conditions off the current if with `elif` ? Since this is part of the same input, it should be an extension of the existing `if` statement. You should **only** introduce an additional conditional if you're making a separate decision, for example asking the user if they want to play again. Since this is part of the same decision (did you enter 'rock', 'paper' or 'scissors' it should be in the same `if...elif` ladder.\n\n\n### You Do It\n\nIn the code below, I've added the logic to address your input of 'paper' You only have to replace the `TODO` in the `print()` statements with the appropriate message. ", "_____no_output_____" ] ], [ [ "# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\nimport random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\n\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\n# for now, make this 'rock'\nyou = 'paper' #input(\"Enter your choice: rock, paper, or scissors: \")\nif (you in choices): \n print(\"You chose %s and the computer chose %s\" % (you,computer))\n\n # 3. play the game and determine a winnner (assuming paper only for user)\n if (you == 'rock' and computer == 'scissors'):\n print(\"You win! Rock smashes scissors.\")\n elif (you == 'rock' and computer=='paper'):\n print(\"You lose! Paper covers rock.\")\n elif (you == 'paper' and computer =='rock'):\n print(\"You win! Paper covers rock\")\n elif (you == 'paper' and computer == 'scissors'):\n print(\"You lose! Scissors cut paper\")\n else:\n print(\"It's a tie!\") \nelse: \n print(\"You didn't enter 'rock', 'paper' or 'scissors'!!!\")", "You chose paper and the computer chose paper\nIt's a tie!\n" ] ], [ [ "## The final program\n\nWith the 'rock' and 'paper' cases out of the way, we only need to add 'scissors' logic. We leave part to you as your final exercise. \n\nSimilat to the 'paper' example you will need to add two `elif` statements to handle winning and losing when you select 'paper' and should also include the appropriate output messages.\n\n", "_____no_output_____" ] ], [ [ "# 1. computer opponent select one of \"rock\", \"paper\" or \"scissors\" at random\nimport random\nchoices = ['rock','paper','scissors']\ncomputer = random.choice(choices)\n\n# 2. you input one of \"rock\", \"paper\" or \"scissors\"\n# for now, make this 'rock'\nyou = input(\"Enter your choice: rock, paper, or scissors: \")\nif (you in choices): \n print(\"You chose %s and the computer chose %s\" % (you,computer))\n\n # 3. play the game and determine a winnner \n if (you == 'rock' and computer == 'scissors'):\n print(\"You win! Rock smashes scissors.\")\n elif (you == 'rock' and computer=='paper'):\n print(\"You lose! Paper covers rock.\")\n elif (you == 'paper' and computer == 'rock'):\n print(\"TODO\")\n elif (you == 'paper' and computer == 'scissors'):\n print(\"TODO\")\n elif (you == 'scissors' and computer == 'rock'):\n print(\"You lose! Rock smashes scissors.\")\n elif (you == 'scissors' and computer == 'paper'):\n print(\"You win! Scissors cut paper.\")\n else:\n print(\"It's a tie!\") \nelse: \n print(\"You didn't enter 'rock', 'paper' or 'scissors'!!!\")", "Enter your choice: rock, paper, or scissors: rock\nYou chose rock and the computer chose paper\nYou lose! Paper covers rock.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a688027dad14ea7765210715f750016f07e72f6
24,936
ipynb
Jupyter Notebook
FeatureVectorsComp.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
5
2019-06-03T21:17:07.000Z
2022-01-30T19:43:56.000Z
FeatureVectorsComp.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
null
null
null
FeatureVectorsComp.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
5
2018-10-22T20:43:07.000Z
2022-01-19T03:29:15.000Z
24,936
24,936
0.869145
[ [ [ "<a href=\"https://colab.research.google.com/github/gabilodeau/INF6804/blob/master/FeatureVectorsComp.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "INF6804 Vision par ordinateur\n\nPolytechnique Montréal\n\nDistances entre histogrammes (L1, L2, MDPA, Bhattacharyya)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics.pairwise import cosine_similarity", "_____no_output_____" ] ], [ [ "Fonction pour calculer la distance MDPA", "_____no_output_____" ] ], [ [ "def distMDPA(V1, V2):\n Dist=0;\n for i in range(0,len(V1)):\n dint=0;\n for j in range(0,i):\n dint=dint+V1[j]-V2[j]\n Dist=Dist+abs(dint)\n return Dist;", "_____no_output_____" ] ], [ [ "Création de 5 vecteurs. On comparera avec Vecteur1 comme base.", "_____no_output_____" ] ], [ [ "Vecteur1 = np.array([3.0, 4.0, 3.0, 1.0, 6.0])\nVecteur2 = np.array([2.0, 5.0, 3.0, 1.0, 6.0])\nVecteur3 = np.array([2.0, 4.0, 3.0, 1.0, 7.0])\nVecteur4 = np.array([1.0, 5.0, 4.0, 1.0, 6.0])\nVecteur5 = np.array([3.0, 5.0, 2.0, 2.0, 5.0])", "_____no_output_____" ] ], [ [ "Distance ou norme L1. Les résultats seront affichés sur un graphique.", "_____no_output_____" ] ], [ [ "dist1 = cv2.norm(Vecteur1, Vecteur2, cv2.NORM_L1)\ndist2 = cv2.norm(Vecteur1, Vecteur3, cv2.NORM_L1)\ndist3 = cv2.norm(Vecteur1, Vecteur4, cv2.NORM_L1)\ndist4 = cv2.norm(Vecteur1, Vecteur5, cv2.NORM_L1)\n#Pour affichage...\nx = [0, 0.1, 0.2, 0.3]\ncolor = ['r','g','b','k']\ndist = [dist1, dist2, dist3, dist4]", "_____no_output_____" ] ], [ [ "Distance ou norme L2.", "_____no_output_____" ] ], [ [ "dist1 = cv2.norm(Vecteur1, Vecteur2, cv2.NORM_L2)\ndist2 = cv2.norm(Vecteur1, Vecteur3, cv2.NORM_L2)\ndist3 = cv2.norm(Vecteur1, Vecteur4, cv2.NORM_L2)\ndist4 = cv2.norm(Vecteur1, Vecteur5, cv2.NORM_L2)\nx = x + [1, 1.1, 1.2, 1.3]\ndist = dist + [dist1, dist2, dist3, dist4]\ncolor = color + ['r','g','b','k']", "_____no_output_____" ] ], [ [ "Distance MDPA (Maximum distance of pair assignments).", "_____no_output_____" ] ], [ [ "dist1 = distMDPA(Vecteur1, Vecteur2)\ndist2 = distMDPA(Vecteur1, Vecteur3)\ndist3 = distMDPA(Vecteur1, Vecteur4)\ndist4 = distMDPA(Vecteur1, Vecteur5)\nx = x + [2, 2.1, 2.2, 2.3]\ndist = dist + [dist1, dist2, dist3, dist4]\ncolor = color + ['r','g','b','k']", "_____no_output_____" ] ], [ [ "Distance de Bhattacharyya avec les valeurs normalisées entre 0 et 1. ", "_____no_output_____" ] ], [ [ "Vecteur1 = Vecteur1/np.sum(Vecteur1)\nVecteur2 = Vecteur2/np.sum(Vecteur2)\nVecteur3 = Vecteur3/np.sum(Vecteur3)\nVecteur4 = Vecteur4/np.sum(Vecteur3)\ndist1 = cv2.compareHist(Vecteur1.transpose().astype('float32'), Vecteur2.transpose().astype('float32'), cv2.HISTCMP_BHATTACHARYYA) \ndist2 = cv2.compareHist(Vecteur1.transpose().astype('float32'), Vecteur3.transpose().astype('float32'), cv2.HISTCMP_BHATTACHARYYA) \ndist3 = cv2.compareHist(Vecteur1.transpose().astype('float32'), Vecteur4.transpose().astype('float32'), cv2.HISTCMP_BHATTACHARYYA)\ndist4 = cv2.compareHist(Vecteur1.transpose().astype('float32'), Vecteur5.transpose().astype('float32'), cv2.HISTCMP_BHATTACHARYYA) \nx = x + [3, 3.1, 3.2, 3.3]\ndist = dist + [dist1, dist2, dist3, dist4]\ncolor = color + ['r','g','b', 'k']\n\n\n", "_____no_output_____" ] ], [ [ "Similarité cosinus.", "_____no_output_____" ] ], [ [ "dist1 = cosine_similarity(Vecteur1.reshape(1, -1), Vecteur2.reshape(1, -1)) \ndist2 = cosine_similarity(Vecteur1.reshape(1, -1), Vecteur3.reshape(1, -1)) \ndist3 = cosine_similarity(Vecteur1.reshape(1, -1), Vecteur4.reshape(1, -1))\ndist4 = cosine_similarity(Vecteur1.reshape(1, -1), Vecteur5.reshape(1, -1)) \n\nx = x + [4, 4.1, 4.2, 4.3]\ndist = dist + [dist1, dist2, dist3, dist4]\ncolor = color + ['r','g','b', 'k']", "_____no_output_____" ] ], [ [ "Affichage des distances.", "_____no_output_____" ] ], [ [ "plt.scatter(x, dist, c = color)\nplt.text(0,0, 'Distance L1')\nplt.text(0.8,1, 'Distance L2')\nplt.text(1.6,0, 'Distance MDPA')\nplt.text(2.6,0.5, 'Bhattacharyya')\nplt.text(3.8,0.3, 'Similarité\\n cosinus')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a688622cf7586046166ff913890bb239921df5c
48,703
ipynb
Jupyter Notebook
Modulo1/SolucionGraficaPL.ipynb
valetsg/SimulacionM2018
027520ae9e2b5b2fdc0294b13474fe19d1cc73a2
[ "MIT" ]
null
null
null
Modulo1/SolucionGraficaPL.ipynb
valetsg/SimulacionM2018
027520ae9e2b5b2fdc0294b13474fe19d1cc73a2
[ "MIT" ]
null
null
null
Modulo1/SolucionGraficaPL.ipynb
valetsg/SimulacionM2018
027520ae9e2b5b2fdc0294b13474fe19d1cc73a2
[ "MIT" ]
null
null
null
135.286111
16,516
0.884504
[ [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np", "_____no_output_____" ] ], [ [ "\\begin{equation}\n\\begin{array}{ll}\n\\min_{x_1,x_2} & -x_1-x_2 \\\\\n\\text{s. a. } & 50x_1+24x_2\\leq 2400 \\\\\n & 30x_1+33x_2\\leq 2100 \\\\\n & -x_1\\leq -45 \\\\\n & -x_2\\leq -5,\n\\end{array}\n\\end{equation}", "_____no_output_____" ] ], [ [ "def x2_v1(x1):\n return (2400 - 50*x1)/24\ndef x2_v2(x1):\n return (2100 - 30*x1)/33", "_____no_output_____" ], [ "x1 = np.linspace(43, 47)", "_____no_output_____" ], [ "x1 = np.linspace(0, 100)", "_____no_output_____" ], [ "plt.plot(x1, x2_v1(x1), 'magenta', label = \"Eq1\") #desigualdad 1\nplt.plot(x1, x2_v2(x1), label = \"Eq2\") # desigualdad 2\n#plt.plot([45, 45], [0, 25], 'k')\n#plt.plot([43, 47], [5, 5], 'r')\n#plt.xlim(xmin = 44, xmax = 46)\n#plt.ylim(ymin = 4.5, ymax = 6.5)\n#plt.scatter([45], [6.25], color = 'red')\n#plt.fill_between(np.array([45, 45.6]), x2_v1(np.array([45, 45.6])),\n# 5*np.ones(2), alpha = .2, color = 'orange')\n\nplt.hlines(5, 0, 100, label = \"Eq4\")\nplt.vlines(45, -100, 100, color = \"r\", label = \"Eq3\")\nplt.legend()\nplt.xlabel(\"$x_1$\", fontsize = 18)\nplt.ylabel(\"$x_2$\", fontsize = 18)\nplt.show()", "_____no_output_____" ], [ "plt.plot(x1, x2_v1(x1), 'magenta', label = \"Eq1\") #desigualdad 1\nplt.plot(x1, x2_v2(x1), label = \"Eq2\") # desigualdad 2\nplt.hlines(5, 0, 100, label = \"Eq4\")\nplt.vlines(45, -100, 100, color = \"r\", label = \"Eq3\")\nplt.legend()\nplt.xlabel(\"$x_1$\", fontsize = 18)\nplt.ylabel(\"$x_2$\", fontsize = 18)\nplt.xlim(xmin = 44, xmax = 70)\nplt.ylim(ymin = 2, ymax = 48)\nplt.show()", "_____no_output_____" ], [ "plt.plot(x1, x2_v1(x1), 'magenta', label = \"Eq1\") #desigualdad 1\nplt.plot(x1, x2_v2(x1), label = \"Eq2\") # desigualdad 2\nplt.hlines(5, 0, 100, label = \"Eq4\")\nplt.vlines(45, -100, 100, color = \"r\", label = \"Eq3\")\nplt.scatter([45], [6.25], color = 'k', s = 105)\n#plt.fill_between(np.array([45, 45.6]), x2_v1(np.array([45, 45.6])),\n# 5*np.ones(2), alpha = .2, color = 'orange')\nplt.legend()\nplt.xlabel(\"$x_1$\", fontsize = 18)\nplt.ylabel(\"$x_2$\", fontsize = 18)\nplt.xlim(xmin = 44, xmax = 47)\nplt.ylim(ymin = 2, ymax = 10)\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "import scipy.optimize as opt", "_____no_output_____" ], [ "c = np.array([-1, -1]) ## Coeficientes de la función objetivo\nA = np.array([[50, 24], [30, 33]])\nb = np.array([2400, 2100])\nx1_bound = (45, None)\nx2_bound = (5, None)", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "A", "_____no_output_____" ], [ "B = np.array([[2, 1, 0],[0, 1, -2],[1, 0, 4]]); B", "_____no_output_____" ], [ "opt.linprog?", "_____no_output_____" ], [ "res = opt.linprog(c, A_ub = A, b_ub = b,\n bounds=(x1_bound, x2_bound), options={'disp':True})", "Optimization terminated successfully.\n Current function value: -51.250000 \n Iterations: 4\n" ], [ "res", "_____no_output_____" ], [ "res.x", "_____no_output_____" ] ], [ [ "## Actividad", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a689f832fd3ac7dfdd8ca0b435d14062ab02165
9,562
ipynb
Jupyter Notebook
ihaskell-display/ihaskell-widgets/Examples/00 Introduction to Widgets.ipynb
brandon-leapyear/IHaskell
0832764259967449a27fd595bc027d0aea67e10d
[ "MIT" ]
1,972
2015-01-02T17:57:26.000Z
2022-03-15T19:06:25.000Z
ihaskell-display/ihaskell-widgets/Examples/00 Introduction to Widgets.ipynb
brandon-leapyear/IHaskell
0832764259967449a27fd595bc027d0aea67e10d
[ "MIT" ]
742
2015-01-02T16:54:02.000Z
2022-03-14T18:24:42.000Z
ihaskell-display/ihaskell-widgets/Examples/00 Introduction to Widgets.ipynb
brandon-leapyear/IHaskell
0832764259967449a27fd595bc027d0aea67e10d
[ "MIT" ]
284
2015-01-02T16:57:35.000Z
2022-03-01T11:36:28.000Z
23.378973
230
0.556474
[ [ [ "# The IPython widgets, now in IHaskell !!", "_____no_output_____" ], [ "It is highly recommended that users new to jupyter/ipython take the *User Interface Tour* from the toolbar above (Help -> User Interface Tour).", "_____no_output_____" ], [ "> This notebook introduces the [IPython widgets](https://github.com/ipython/ipywidgets), as implemented in [IHaskell](https://github.com/gibiansky/IHaskell). The `Button` widget is also demonstrated as a live action example.", "_____no_output_____" ], [ "### The Widget Hierarchy\n\nThese are all the widgets available from IPython/Jupyter.", "_____no_output_____" ], [ "#### Uncategorized Widgets\n\n+ Button\n+ Image*Widget*\n+ Output*Widget*\n\n#### Box Widgets\n\n+ Box\n+ FlexBox\n+ Accordion\n+ Tab*Widget*\n\n#### Boolean Widgets\n\n+ CheckBox\n+ ToggleButton\n\n#### Integer Widgets\n\n+ IntText\n+ BoundedIntText\n+ IntProgress\n+ IntSlider\n+ IntRangeSlider\n\n#### Float Widgets\n\n+ FloatText\n+ BoundedFloatText\n+ FloatProgress\n+ FloatSlider\n+ FloatRangeSlider\n\n#### Selection Widgets\n\n+ Selection\n+ Dropdown\n+ RadioButtons\n+ Select\n+ SelectMultiple\n+ ToggleButtons\n\n#### String Widgets\n\n+ HTML*Widget*\n+ Latex*Widget*\n+ TextArea\n+ Text*Widget*", "_____no_output_____" ], [ "### Using Widgets", "_____no_output_____" ], [ "#### Necessary Extensions and Imports\n\nAll the widgets and related functions are available from a single module, `IHaskell.Display.Widgets`. It is strongly recommended that users use the `OverloadedStrings` extension, as widgets make extensive use of `Text`.", "_____no_output_____" ] ], [ [ "{-# LANGUAGE OverloadedStrings #-}\nimport IHaskell.Display.Widgets", "_____no_output_____" ] ], [ [ "The module can be imported unqualified. Widgets with common names, such as `Text`, `Image` etc. have a `-Widget` suffix to prevent name collisions.", "_____no_output_____" ], [ "#### Widget interface\n\nEach widget has different properties, but the surface level API is the same.\n\nEvery widget has:\n\n1. A constructor:\n An `IO <widget>` value/function of the form `mk<widget_name>`.\n2. A set of properties, which can be manipulated using `setField` and `getField`.\n\nThe `setField` and `getField` functions have nasty type signatures, but they can be used by just intuitively understanding them.", "_____no_output_____" ] ], [ [ ":t setField", "_____no_output_____" ] ], [ [ "The `setField` function takes three arguments:\n\n1. A widget\n2. A `Field`\n3. A value for the `Field`", "_____no_output_____" ] ], [ [ ":t getField", "_____no_output_____" ] ], [ [ "The `getField` function takes a `Widget` and a `Field` and returns the value of that `Field` for the `Widget`.", "_____no_output_____" ], [ "Another utility function is `properties`, which shows all properties of a widget.", "_____no_output_____" ] ], [ [ ":t properties", "_____no_output_____" ] ], [ [ "#### Displaying Widgets\n\nIHaskell automatically displays anything *displayable* given to it directly.", "_____no_output_____" ] ], [ [ "-- Showables\n1 + 2\n\"abc\"", "_____no_output_____" ] ], [ [ "Widgets can either be displayed this way, or explicitly using the `display` function from `IHaskell.Display`.", "_____no_output_____" ] ], [ [ "import IHaskell.Display\n:t display", "_____no_output_____" ] ], [ [ "#### Multiple displays\n\nA widget can be displayed multiple times. All these *views* are representations of a single object, and thus are linked.\n\nWhen a widget is created, a model representing it is created in the frontend. This model is used by all the views, and any modification to it propagates to all of them.", "_____no_output_____" ], [ "#### Closing widgets\n\nWidgets can be closed using the `closeWidget` function.", "_____no_output_____" ] ], [ [ ":t closeWidget", "_____no_output_____" ] ], [ [ "### Our first widget: `Button`", "_____no_output_____" ], [ "Let's play with buttons as a starting example:\n\nAs noted before, all widgets have a constructor of the form `mk<Widget>`. Thus, to create a `Button`, we use `mkButton`.", "_____no_output_____" ] ], [ [ "button <- mkButton -- Construct a Button\n:t button", "_____no_output_____" ] ], [ [ "Widgets can be displayed by just entering them into a cell.", "_____no_output_____" ] ], [ [ "button -- Display the button", "_____no_output_____" ] ], [ [ "To view a widget's properties, we use the `properties` function. It also shows the type represented by the `Field`, which generally are not visible in type signatures due to high levels of type-hackery.", "_____no_output_____" ] ], [ [ "-- The button widget has many properties.\nproperties button", "_____no_output_____" ] ], [ [ "Let's try making the button widget wider.", "_____no_output_____" ] ], [ [ "import qualified IHaskell.Display.Widgets.Layout as L\nbtnLayout <- getField button Layout\nsetField btnLayout L.Width $ Just \"100%\"", "_____no_output_____" ] ], [ [ "There is a lot that can be customized. For example:", "_____no_output_____" ] ], [ [ "setField button Description \"Click Me (._.\\\")\"\nsetField button ButtonStyle SuccessButton\nsetField btnLayout L.Border $ Just \"ridge 2px\"\nsetField btnLayout L.Padding $ Just \"10\"\nsetField btnLayout L.Height $ Just \"7em\"", "_____no_output_____" ] ], [ [ "The button widget also provides a click handler. We can make it do anything, except console input. Universally, no widget event can trigger console input.", "_____no_output_____" ] ], [ [ "setField button ClickHandler $ putStrLn \"fO_o\"\nbutton -- Displaying again for convenience", "_____no_output_____" ] ], [ [ "Now try clicking the button, and see the output.\n\n> Note: If you display to stdout using Jupyter Lab, it will be displayed in a log entry, not as the cell output.\n\nWe can't do console input, though, but you can always use another widget! See the other notebooks with examples for more information", "_____no_output_____" ] ], [ [ "setField button ClickHandler $ getLine >>= putStrLn", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a68bc526cde4a72542cf0c57df92d7f5af52bed
29,276
ipynb
Jupyter Notebook
examples/VARLiNGAM.ipynb
lenetherunaway/lingam
a27113c797f0218624fedc4f3790a37638038c0e
[ "MIT" ]
null
null
null
examples/VARLiNGAM.ipynb
lenetherunaway/lingam
a27113c797f0218624fedc4f3790a37638038c0e
[ "MIT" ]
null
null
null
examples/VARLiNGAM.ipynb
lenetherunaway/lingam
a27113c797f0218624fedc4f3790a37638038c0e
[ "MIT" ]
null
null
null
31.71831
893
0.450779
[ [ [ "# VARLiNGAM", "_____no_output_____" ], [ "## Import and settings\nIn this example, we need to import `numpy`, `pandas`, and `graphviz` in addition to `lingam`.", "_____no_output_____" ] ], [ [ "import os\nos.environ[\"PATH\"] += os.pathsep + '/Users/elena/opt/anaconda3/lib/python3.7/site-packages/graphviz'", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport graphviz\nimport lingam\nfrom lingam.utils import make_dot, print_causal_directions, print_dagc\n\nprint([np.__version__, pd.__version__, graphviz.__version__, lingam.__version__])\n\nnp.set_printoptions(precision=3, suppress=True)\nnp.random.seed(8)", "['1.17.2', '0.25.1', '0.14', '1.2.1']\n" ] ], [ [ "## Test data\nWe create test data consisting of 5 variables.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "df = pd.read_csv('/Users/elena/Documents/Диплом/Data/russia.csv')", "_____no_output_____" ], [ "df['lnGDP'] = np.log(df['gdp'])\ndf['lnCO2'] = np.log(df['x5'])\ndf['lnEn'] = np.log(df['x11'])", "_____no_output_____" ], [ "df['dlnGDP']=df['lnGDP'].diff()\ndf['dlnCO2'] = df['lnCO2'].diff()\ndf['dlnEn'] = df['lnEn'].diff()\ndf['dlnTr'] = df['lntr'].diff()", "_____no_output_____" ], [ "X_raw = df[['temp', 'dlnGDP', 'dlnCO2', 'dlnEn', 'dlnTr']]", "_____no_output_____" ], [ "X_raw", "_____no_output_____" ], [ "standard_scaler = StandardScaler(with_std=False)\nX = standard_scaler.fit_transform(X_raw)", "_____no_output_____" ], [ "#X = np.array(df[['temp', 'lnGDP', 'lnCO2', 'lnEn']])", "_____no_output_____" ], [ "# B0 = [\n# [0,-0.12,0,0,0],\n# [0,0,0,0,0],\n# [-0.41,0.01,0,-0.02,0],\n# [0.04,-0.22,0,0,0],\n# [0.15,0,-0.03,0,0],\n# ]\n# B1 = [\n# [-0.32,0,0.12,0.32,0],\n# [0,-0.35,-0.1,-0.46,0.4],\n# [0,0,0.37,0,0.46],\n# [-0.38,-0.1,-0.24,0,-0.13],\n# [0,0,0,0,0],\n# ]\ncausal_order = [3, 2, 1, 0, 0]\n\n# data generated from B0 and B1\n#X = pd.read_csv('data/sample_data_var_lingam.csv')", "_____no_output_____" ] ], [ [ "## Causal Discovery\nTo run causal discovery, we create a `VARLiNGAM` object and call the `fit` method.", "_____no_output_____" ] ], [ [ "model = lingam.VARLiNGAM()\nmodel.fit(X[1:])", "_____no_output_____" ] ], [ [ "Using the `causal_order_` properties, we can see the causal ordering as a result of the causal discovery.", "_____no_output_____" ] ], [ [ "model.causal_order_", "_____no_output_____" ] ], [ [ "Also, using the `adjacency_matrices_` properties, we can see the adjacency matrix as a result of the causal discovery.", "_____no_output_____" ] ], [ [ "model.adjacency_matrices_", "_____no_output_____" ], [ "# B0\nmodel.adjacency_matrices_[0]", "_____no_output_____" ], [ "# B1\nmodel.adjacency_matrices_[1]", "_____no_output_____" ] ], [ [ "We can draw a causal graph by utility funciton.", "_____no_output_____" ] ], [ [ "labels = ['temp(t)', 'lnGDP(t)', 'lnCO2(t)', 'lnEn(t)', 'lnTr(t)']\nmake_dot(np.hstack(model.adjacency_matrices_), ignore_shape=True, lower_limit=0.05, labels=labels)", "_____no_output_____" ] ], [ [ "## Bootstrap", "_____no_output_____" ], [ "### Bootstrapping\nWe call `bootstrap()` method instead of `fit()`. Here, the second argument specifies the number of bootstrap sampling.", "_____no_output_____" ] ], [ [ "model = lingam.VARLiNGAM()\nresult = model.bootstrap(X[1:], 100)", "_____no_output_____" ], [ "labels = ['temp(t)', 'dlnGDP(t)', 'dlnCO2(t)', 'dlnEn(t)', 'dlnTr']", "_____no_output_____" ] ], [ [ "Since `BootstrapResult` object is returned, we can get the ranking of the causal directions extracted by `get_causal_direction_counts()` method. In the following sample code, `n_directions` option is limited to the causal directions of the top 8 rankings, and `min_causal_effect` option is limited to causal directions with a coefficient of 0.3 or more.", "_____no_output_____" ] ], [ [ "cdc = result.get_causal_direction_counts(n_directions=20, min_causal_effect=0.3, split_by_causal_effect_sign=True)", "_____no_output_____" ], [ "cdc", "_____no_output_____" ] ], [ [ "We can check the result by utility function.", "_____no_output_____" ] ], [ [ "print_causal_directions(cdc, 100, labels=labels)", "_____no_output_____" ] ], [ [ "Also, using the `get_directed_acyclic_graph_counts()` method, we can get the ranking of the DAGs extracted. In the following sample code, `n_dags` option is limited to the dags of the top 3 rankings, and `min_causal_effect` option is limited to causal directions with a coefficient of 0.2 or more.", "_____no_output_____" ] ], [ [ "dagc = result.get_directed_acyclic_graph_counts(n_dags=3, min_causal_effect=0.2, split_by_causal_effect_sign=True)", "_____no_output_____" ] ], [ [ "We can check the result by utility function.", "_____no_output_____" ] ], [ [ "print_dagc(dagc, 100, labels=labels)", "DAG[0]: 36.0%\n\ttemp(t) <--- dlnGDP(t) (b>0)\n\ttemp(t) <--- temp(t-1) (b<0)\n\ttemp(t) <--- dlnGDP(t-1) (b>0)\n\ttemp(t) <--- dlnEn(t-1) (b<0)\n\tdlnGDP(t) <--- dlnCO2(t-1) (b<0)\n\tdlnGDP(t) <--- dlnEn(t-1) (b>0)\n\tdlnCO2(t) <--- dlnEn(t) (b>0)\n\tdlnEn(t) <--- dlnGDP(t) (b>0)\n\tdlnEn(t) <--- dlnGDP(t-1) (b<0)\n\tdlnEn(t) <--- dlnCO2(t-1) (b<0)\n\tdlnEn(t) <--- dlnEn(t-1) (b>0)\nDAG[1]: 23.0%\n\ttemp(t) <--- temp(t-1) (b<0)\n\ttemp(t) <--- dlnGDP(t-1) (b>0)\n\tdlnGDP(t) <--- dlnCO2(t-1) (b<0)\n\tdlnGDP(t) <--- dlnEn(t-1) (b>0)\n\tdlnCO2(t) <--- dlnEn(t) (b>0)\n\tdlnEn(t) <--- dlnGDP(t-1) (b<0)\n\tdlnEn(t) <--- dlnCO2(t-1) (b<0)\n\tdlnEn(t) <--- dlnEn(t-1) (b>0)\nDAG[2]: 23.0%\n\ttemp(t) <--- temp(t-1) (b<0)\n\ttemp(t) <--- dlnGDP(t-1) (b>0)\n\tdlnGDP(t) <--- dlnCO2(t-1) (b<0)\n\tdlnGDP(t) <--- dlnEn(t-1) (b>0)\n\tdlnCO2(t) <--- dlnEn(t) (b>0)\n\tdlnEn(t) <--- dlnGDP(t) (b>0)\n\tdlnEn(t) <--- dlnGDP(t-1) (b<0)\n\tdlnEn(t) <--- dlnCO2(t-1) (b<0)\n\tdlnEn(t) <--- dlnEn(t-1) (b>0)\n" ] ], [ [ "Using the `get_probabilities()` method, we can get the probability of bootstrapping.", "_____no_output_____" ] ], [ [ "prob = result.get_probabilities(min_causal_effect=0.1)\nprint('Probability of B0:\\n', prob[0])\nprint('Probability of B1:\\n', prob[1])", "Probability of B0:\n [[0. 0.45 0.03 0.07]\n [0.31 0. 0.19 0.35]\n [0.02 0.24 0. 0.56]\n [0.06 0.24 0.44 0. ]]\nProbability of B1:\n [[1. 1. 0.29 0.45]\n [0.71 0.87 0.78 0.9 ]\n [0.15 0.53 0.57 0.77]\n [0.03 0.8 0.98 0.99]]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a68cdcc4df511c49122cb76d627e99a312903b2
2,803
ipynb
Jupyter Notebook
Inventory Management System/IMS.ipynb
harsha-kaithal/Skill-India-AI-ML-Scholarship
5af204f9bc7e4d1fd898847f9ee359ce1b426fe1
[ "Apache-2.0" ]
305
2021-08-23T14:11:49.000Z
2022-03-24T17:47:32.000Z
Inventory Management System/IMS.ipynb
Shrijayanth-Suresh/Skill-India-AI-ML-Scholarship
8d5851ebe12e452cc38a789ee4e67b6951d95d1c
[ "Apache-2.0" ]
1
2021-09-04T14:28:51.000Z
2021-09-04T14:28:51.000Z
Inventory Management System/IMS.ipynb
Shrijayanth-Suresh/Skill-India-AI-ML-Scholarship
8d5851ebe12e452cc38a789ee4e67b6951d95d1c
[ "Apache-2.0" ]
462
2021-08-23T14:15:46.000Z
2022-03-25T06:54:21.000Z
22.97541
103
0.407064
[ [ [ "fd = open(\"Records.txt\",\"r\")\ntxt = fd.read()\nfd.close()", "_____no_output_____" ], [ "products = txt.split(\"\\n\")\n\nui_prod = str(input(\"Enter the product_Id: \"))\nui_quant = input(\"Enter the quantity: \")\n\nfor product in products:\n prod = product.split(\",\")\n \n \n if(prod[0] == ui_prod):\n print(\"***********************\")\n print(\"Product ID: \", prod[0])\n print(\"Name: \", prod[1])\n print(\"Price: \",prod[2])\n print(\"Quant: \",ui_quant)\n print(\"------------------------\")\n print(\"Billing Amout: \", ui_quant * int(prod[2]))\n print(\"***********************\")\n ", "Enter the product_Id: 1002\nEnter the quantity: 3\n***********************\n('Product ID: ', '1002')\n('Name: ', 'Cake')\n('Price: ', '300')\n('Quant: ', 3)\n------------------------\n('Billing Amout: ', 900)\n***********************\n" ], [ "new_record = []\n\nfor product in products:\n prod = product.split(\",\")\n if(ui_prod == prod[0]):\n prod[3] = str(int(prod[3]) - ui_quant)\n \n new_record.append(prod[0] + \",\" + prod[1] + \",\" + prod[2] + \",\" + prod[3] + \"\\n\")\n \nnew_record[-1] = new_record[-1][:-1]", "_____no_output_____" ], [ "fd = open(\"Records.txt\", 'w')\n\nfor i in new_record:\n fd.write(i)\n \nfd.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a68d40023fa5e27685e34537aa36b43a3522b61
50,867
ipynb
Jupyter Notebook
notebooks/contanimate_DNS.ipynb
alistairwgillespie/deep_dga_detection
0dfda4a26113dad179266b4afafda29ca00f6ae3
[ "MIT" ]
2
2020-10-18T19:29:46.000Z
2020-11-30T09:36:58.000Z
notebooks/contanimate_DNS.ipynb
alistairwgillespie/deep_dga_detection
0dfda4a26113dad179266b4afafda29ca00f6ae3
[ "MIT" ]
1
2021-03-20T01:50:12.000Z
2021-03-20T01:50:12.000Z
notebooks/contanimate_DNS.ipynb
alistairwgillespie/deep_dga_detection
0dfda4a26113dad179266b4afafda29ca00f6ae3
[ "MIT" ]
2
2020-07-07T07:46:20.000Z
2021-02-03T16:01:27.000Z
35.348853
228
0.366603
[ [ [ "# Contanimate DNS Data", "_____no_output_____" ] ], [ [ "\"\"\"\nMake dataset pipeline\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nfrom collections import Counter\nimport math", "_____no_output_____" ], [ "import torch\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\nfrom dga.models.dga_classifier import DGAClassifier\nfrom dga.datasets.domain_dataset import DomainDataset", "_____no_output_____" ], [ "!pip install tldextract", "Requirement already satisfied: tldextract in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (2.2.3)\nRequirement already satisfied: requests-file>=1.4 in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from tldextract) (1.5.1)\nRequirement already satisfied: requests>=2.1.0 in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from tldextract) (2.24.0)\nRequirement already satisfied: idna in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from tldextract) (2.10)\nRequirement already satisfied: six in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from requests-file>=1.4->tldextract) (1.15.0)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from requests>=2.1.0->tldextract) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from requests>=2.1.0->tldextract) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\\users\\gilleal\\appdata\\local\\continuum\\anaconda3\\envs\\threat_science\\lib\\site-packages (from requests>=2.1.0->tldextract) (1.25.10)\n" ], [ "import tldextract", "_____no_output_____" ], [ "df = pd.read_csv(\"../data/raw/dns.csv\")\n\na_aaaa_df = df.loc[(df.qtype_name == 'A') | (df.qtype_name == 'AAAA')]\n\n# Take subset by nxdomain response\nnxdomain_df = a_aaaa_df.loc[(df['rcode_name'] == 'NXDOMAIN')]\n\n# Drop subset from full records \na_aaaa_df = a_aaaa_df[a_aaaa_df['rcode_name'] != 'NXDOMAIN'] ", "_____no_output_____" ], [ "# Load known DGAs\nmal_df = pd.read_csv(\"../data/processed/validation.csv\")\nmal_df = mal_df.loc[mal_df['label'] == 1]", "_____no_output_____" ], [ "# Inject dga domains randomly\nnxdomain_df['query'] = np.random.choice(list(mal_df['domain'].values), len(nxdomain_df))", "_____no_output_____" ], [ "# Put dataset back together\na_aaaa_df = pd.concat([a_aaaa_df, nxdomain_df])\n# a_aaaa_df['domain_name'] = a_aaaa_df['query'].str.replace('www.', '')\n\na_aaaa_df.drop(['QR', 'AA', 'TC', 'RD', 'Z', 'answers'], axis=1, inplace=True)\na_aaaa_df.sort_values(by=['ts'])\n# a_aaaa_df['domain_name'].unique()\na_aaaa_df = a_aaaa_df.reset_index(drop=True)", "_____no_output_____" ], [ "def extract_domain(url):\n return tldextract.extract(url).domain\n\na_aaaa_df['domain'] = a_aaaa_df['query'].apply(extract_domain)", "_____no_output_____" ], [ "def extract_tld(url):\n return tldextract.extract(url).suffix\n\na_aaaa_df['tld'] = a_aaaa_df['query'].apply(extract_tld)", "_____no_output_____" ], [ "a_aaaa_df['domain_name'] = a_aaaa_df['domain'] + '.' + a_aaaa_df['tld']", "_____no_output_____" ], [ "a_aaaa_df.head()", "_____no_output_____" ], [ "model_dir = '../models/'\nmodel_info = {}\nmodel_info_path = os.path.join(model_dir, '1595825381_dga_model_info.pth')\n\nwith open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\nprint(\"model_info: {}\".format(model_info))\n\n# Determine the device and construct the model.\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = DGAClassifier(input_features=model_info['input_features'],\n hidden_dim=model_info['hidden_dim'],\n n_layers=model_info['n_layers'],\n output_dim=model_info['output_dim'],\n embedding_dim=model_info['embedding_dim'],\n batch_size=model_info['batch_size'])\n\n# Load the stored model parameters.\nmodel_path = os.path.join(model_dir, '1595825381_dga_model.pth')\nwith open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n# set to eval mode, could use no_grad\nmodel.to(device).eval()", "model_info: {'input_features': 68, 'hidden_dim': 30, 'n_layers': 2, 'embedding_dim': 5, 'batch_size': 32, 'output_dim': 1}\n" ], [ "def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())", "_____no_output_____" ], [ "def pad_collate_pred(batch):\n x_lens = [len(x) for x in batch]\n xx_pad = pad_sequence(batch, batch_first=True, padding_value=0)\n return xx_pad, x_lens", "_____no_output_____" ], [ "def get_predict_loader(batch_size, df):\n print(\"Getting test and train data loaders.\")\n dataset = DomainDataset(df, train=False)\n predict_dl = DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=pad_collate_pred)\n return predict_dl", "_____no_output_____" ], [ "def get_prediction(df):\n predict_dl = get_predict_loader(1000, df)\n classes = {0: 'Benign', 1: 'DGA'}\n model.eval()\n predictions = []\n\n with torch.no_grad():\n for batch_num, (x_padded, x_lens) in enumerate(predict_dl):\n output = model(x_padded, x_lens)\n y_hat = torch.round(output.data)\n predictions += [classes[int(key)] for key in y_hat.flatten().numpy()]\n\n return predictions", "_____no_output_____" ], [ "a_aaaa_df = a_aaaa_df[~a_aaaa_df['domain_name'].str.contains('\\(')].reset_index(drop=True)", "_____no_output_____" ], [ "a_aaaa_df = a_aaaa_df[~a_aaaa_df['domain_name'].str.contains(',')].reset_index(drop=True)", "_____no_output_____" ], [ "a_aaaa_df[['domain_name']]", "_____no_output_____" ], [ "a_aaaa_df['dga'] = get_prediction(a_aaaa_df[['domain_name']])", "Getting test and train data loaders.\n" ], [ "a_aaaa_df['entropy'] = a_aaaa_df['domain_name'].apply(entropy)", "_____no_output_____" ], [ "print(a_aaaa_df.shape)\na_aaaa_df.head(25)", "(301032, 22)\n" ], [ "a_aaaa_df.to_csv('../data/processed/demo_dns_logs.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a68f04c24903bc14acd0a080fe97a4dd7dc01d3
2,759
ipynb
Jupyter Notebook
lessons/02-Variables/WMC2-String-Formatting.ipynb
IST256/fall2021
3bf2c7a480c53e6d45733c8ba292601ea98094b1
[ "Apache-2.0" ]
null
null
null
lessons/02-Variables/WMC2-String-Formatting.ipynb
IST256/fall2021
3bf2c7a480c53e6d45733c8ba292601ea98094b1
[ "Apache-2.0" ]
null
null
null
lessons/02-Variables/WMC2-String-Formatting.ipynb
IST256/fall2021
3bf2c7a480c53e6d45733c8ba292601ea98094b1
[ "Apache-2.0" ]
null
null
null
18.393333
59
0.422254
[ [ [ "# Watch Me Code 2: String Formatting", "_____no_output_____" ] ], [ [ "name = \"Mike\"\nage = 45\nsalary = 15.75\n", "_____no_output_____" ], [ "# string formatting\nprint(\"Hello there %s. How are you? \" % (name))", "Hello there Mike. How are you? \n" ], [ "# formatting redux\nprint(\"%s makes %f per hour.\" % (name, salary))", "Mike makes 15.750000 per hour.\n" ], [ "# let's use spacing\nprint(\"%s makes %.2f per hour.\" % (name, salary))", "Mike makes 15.75 per hour.\n" ], [ "# right alignment \nprint(\"-\" * 10) # print 10 dashes\nprint(\"%10d\" %(age))", "----------\n 45\n" ], [ "# left alignment\nprint(\"-\" * 10)\nprint(\"%-10d\" % (age))", "----------\n45 \n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a690c0965d9fbb94c48980f28d9f98adfc9a91e
165,605
ipynb
Jupyter Notebook
lessons/ETLPipelines/14_outlierspart2_exercise/14_outliers_exercise-solution.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
lessons/ETLPipelines/14_outlierspart2_exercise/14_outliers_exercise-solution.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
lessons/ETLPipelines/14_outlierspart2_exercise/14_outliers_exercise-solution.ipynb
s-arora-1987/DSND_Term2
3268d52d2271de59695f17b5bbfd618781295748
[ "MIT" ]
null
null
null
624.924528
115,888
0.944084
[ [ [ "# Eliminating Outliers\n\nEliminating outliers is a big topic. There are many different ways to eliminate outliers. A data engineer's job isn't necessarily to decide what counts as an outlier and what does not. A data scientist would determine that. The data engineer would code the algorithms that eliminate outliers from a data set based on any criteria that a data scientist has decided.\n\nIn this exercise, you'll write code to eliminate outliers based on the Tukey rule.\n\nRun the code cell below to read in the data and visualize the data.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline \n\n# read in the projects data set and do basic wrangling \ngdp = pd.read_csv('../data/gdp_data.csv', skiprows=4)\ngdp.drop(['Unnamed: 62', 'Country Code', 'Indicator Name', 'Indicator Code'], inplace=True, axis=1)\npopulation = pd.read_csv('../data/population_data.csv', skiprows=4)\npopulation.drop(['Unnamed: 62', 'Country Code', 'Indicator Name', 'Indicator Code'], inplace=True, axis=1)\n\n\n# Reshape the data sets so that they are in long format\ngdp_melt = gdp.melt(id_vars=['Country Name'], \n var_name='year', \n value_name='gdp')\n\n# Use back fill and forward fill to fill in missing gdp values\ngdp_melt['gdp'] = gdp_melt.sort_values('year').groupby('Country Name')['gdp'].fillna(method='ffill').fillna(method='bfill')\n\npopulation_melt = population.melt(id_vars=['Country Name'], \n var_name='year', \n value_name='population')\n\n# Use back fill and forward fill to fill in missing population values\npopulation_melt['population'] = population_melt.sort_values('year').groupby('Country Name')['population'].fillna(method='ffill').fillna(method='bfill')\n\n# merge the population and gdp data together into one data frame\ndf_country = gdp_melt.merge(population_melt, on=('Country Name', 'year'))\n\n# filter data for the year 2016\ndf_2016 = df_country[df_country['year'] == '2016']\n\n# filter out values that are not countries\nnon_countries = ['World',\n 'High income',\n 'OECD members',\n 'Post-demographic dividend',\n 'IDA & IBRD total',\n 'Low & middle income',\n 'Middle income',\n 'IBRD only',\n 'East Asia & Pacific',\n 'Europe & Central Asia',\n 'North America',\n 'Upper middle income',\n 'Late-demographic dividend',\n 'European Union',\n 'East Asia & Pacific (excluding high income)',\n 'East Asia & Pacific (IDA & IBRD countries)',\n 'Euro area',\n 'Early-demographic dividend',\n 'Lower middle income',\n 'Latin America & Caribbean',\n 'Latin America & the Caribbean (IDA & IBRD countries)',\n 'Latin America & Caribbean (excluding high income)',\n 'Europe & Central Asia (IDA & IBRD countries)',\n 'Middle East & North Africa',\n 'Europe & Central Asia (excluding high income)',\n 'South Asia (IDA & IBRD)',\n 'South Asia',\n 'Arab World',\n 'IDA total',\n 'Sub-Saharan Africa',\n 'Sub-Saharan Africa (IDA & IBRD countries)',\n 'Sub-Saharan Africa (excluding high income)',\n 'Middle East & North Africa (excluding high income)',\n 'Middle East & North Africa (IDA & IBRD countries)',\n 'Central Europe and the Baltics',\n 'Pre-demographic dividend',\n 'IDA only',\n 'Least developed countries: UN classification',\n 'IDA blend',\n 'Fragile and conflict affected situations',\n 'Heavily indebted poor countries (HIPC)',\n 'Low income',\n 'Small states',\n 'Other small states',\n 'Not classified',\n 'Caribbean small states',\n 'Pacific island small states']\n\n# remove non countries from the data\ndf_2016 = df_2016[~df_2016['Country Name'].isin(non_countries)]\n\n\n# plot the data\nx = list(df_2016['population'])\ny = list(df_2016['gdp'])\ntext = df_2016['Country Name']\n\nfig, ax = plt.subplots(figsize=(15,10))\nax.scatter(x, y)\nplt.title('GDP vs Population')\nplt.xlabel('GDP')\nplt.ylabel('Population')\nfor i, txt in enumerate(text):\n ax.annotate(txt, (x[i],y[i]))", "_____no_output_____" ] ], [ [ "# Exercise\n\nWrite a function that uses the Tukey rule to eliminate outliers from an array of data.", "_____no_output_____" ] ], [ [ "# TODO: Write a function that uses the Tukey rule to detect outliers in a dataframe column \n# and then removes that entire row from the data frame. For example, if the United States \n# is detected to be a GDP outlier, then remove the entire row of United States data.\n# The function inputs should be a data frame and a column name.\n# The output is a data_frame with the outliers eliminated\n\n# HINT: Re-use code from the previous exercise\n\ndef tukey_rule(data_frame, column_name):\n data = data_frame[column_name]\n Q1 = data.quantile(0.25)\n Q3 = data.quantile(0.75)\n\n IQR = Q3 - Q1\n\n max_value = Q3 + 1.5 * IQR\n min_value = Q1 - 1.5 * IQR\n\n return data_frame[(data_frame[column_name] < max_value) & (data_frame[column_name] > min_value)]\n", "_____no_output_____" ] ], [ [ "Now use the function to eliminate population outliers and then gdp outliers from the dataframe. Store results in the df_outlier_removed variable.", "_____no_output_____" ] ], [ [ "# TODO: Use the tukey_rule() function to make a new data frame with gdp and population outliers removed\n# Put the results in the df_outlier_removed variable\n\ndf_outlier_removed = df_2016.copy()\n\nfor column in ['population', 'gdp']:\n df_outlier_removed = tukey_rule(df_outlier_removed, column)", "_____no_output_____" ] ], [ [ "Run the code cell below to plot the results.", "_____no_output_____" ] ], [ [ "# plot the data\nx = list(df_outlier_removed['population'])\ny = list(df_outlier_removed['gdp'])\ntext = df_outlier_removed['Country Name']\n\nfig, ax = plt.subplots(figsize=(15,10))\nax.scatter(x, y)\nplt.title('GDP vs Population')\nplt.xlabel('GDP')\nplt.ylabel('Population')\nfor i, txt in enumerate(text):\n ax.annotate(txt, (x[i],y[i]))", "_____no_output_____" ], [ "# ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a69335473f1478425e157a92b322babba69335a
45,885
ipynb
Jupyter Notebook
Exercise 1 Solution.ipynb
ColCarroll/pydata_nyc2017
8470c6be217b3092c2008943c402c8222c38e1b9
[ "MIT" ]
16
2017-11-29T20:10:28.000Z
2020-08-11T09:59:04.000Z
Exercise 1 Solution.ipynb
ColCarroll/pydata_nyc2017
8470c6be217b3092c2008943c402c8222c38e1b9
[ "MIT" ]
null
null
null
Exercise 1 Solution.ipynb
ColCarroll/pydata_nyc2017
8470c6be217b3092c2008943c402c8222c38e1b9
[ "MIT" ]
8
2017-11-29T20:36:55.000Z
2021-11-11T14:09:58.000Z
314.280822
22,738
0.926011
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "# Exercise 1\n\nYou can fit the length of the day reasonably well with a sine curve. The file `day_length.csv` has two years of day length data for Times Square (collected using `astropy` and `astroplan`). You will want to fit the data using the formula\n\n$$\n\\text{length_of_day} = A \\sin\\left(\\frac{2\\pi}{365} (\\text{day_of_year} - 81) \\right) + B\n$$\n\nNotice this is a 1-d linear model, with your feature being that sine term!\n\n1. Read the data with `pd.read_csv`, and plot the `length_of_day` column using `plt.plot`.\n2. Create a 730 x 1 `features` matrix using the above formula.\n3. Use `LinearRegression` to fit the above model. Find `A` (`.coef_`) and `B` (`.intercept_`)\n4. Plot your prediction along with the actual data.\n5. (bonus) Why might the model be a little wrong?\n6. (bonus) Can you engineer a better feature?", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression", "_____no_output_____" ], [ "df = pd.read_csv('day_length.csv')\nplt.plot(df.length_of_day);", "_____no_output_____" ], [ "features = np.atleast_2d(np.sin(2 * np.pi / 365 * (df.day_of_year - 81))).T", "_____no_output_____" ], [ "reg = LinearRegression().fit(features, df.length_of_day)\nprint(\"A = {:,.1f}\\nB = {:,.1f}\".format(reg.coef_[0], reg.intercept_))", "A = 10,219.3\nB = 43,372.4\n" ], [ "plt.plot(df.length_of_day)\nplt.plot(reg.predict(features));", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a6946821fdd05219a44565442815edf1a0e8dbf
3,813
ipynb
Jupyter Notebook
notebooks/2.06.input_output.ipynb
sairamkonuru/aaic
e5b3feaf55b8834be2245437fecc3946ea8ad6d6
[ "Apache-2.0" ]
null
null
null
notebooks/2.06.input_output.ipynb
sairamkonuru/aaic
e5b3feaf55b8834be2245437fecc3946ea8ad6d6
[ "Apache-2.0" ]
null
null
null
notebooks/2.06.input_output.ipynb
sairamkonuru/aaic
e5b3feaf55b8834be2245437fecc3946ea8ad6d6
[ "Apache-2.0" ]
null
null
null
19.160804
99
0.47679
[ [ [ "# Python Input and Output", "_____no_output_____" ], [ "# Python Output", "_____no_output_____" ], [ "We use the print() function to output data to the standard output device", "_____no_output_____" ] ], [ [ "print(\"Hello World\")", "Hello World\n" ], [ "a = 10\nprint(\"The value of a is\", a) #python 3\nprint \"The value of a is \" + str(a)", "('The value of a is', 10)\nThe value of a is 10\n" ] ], [ [ "# Output Formatting", "_____no_output_____" ] ], [ [ "a = 10; b = 20 #multiple statements in single line.\n\nprint(\"The value of a is {} and b is {}\".format(a, b)) #default", "The value of a is 10 and b is 20\n" ], [ "a = 10; b = 20 #multiple statements in single line\n\nprint(\"The value of b is {1} and a is {0}\".format(a, b)) #specify position of arguments", "The value of b is 20 and a is 10\n" ], [ "#we can use keyword arguments to format the string\nprint(\"Hello {name}, {greeting}\".format(name=\"satish\", greeting=\"Good Morning\"))", "Hello satish, Good Morning\n" ], [ "#we can combine positional arguments with keyword arguments\nprint('The story of {0}, {1}, and {other}'.format('Bill', 'Manfred',\n other='Georg'))", "The story of Bill, Manfred, and Georg\n" ] ], [ [ "# Python Input", "_____no_output_____" ], [ "want to take the input from the user. In Python, we have the input() function to allow this. ", "_____no_output_____" ] ], [ [ "num = input(\"Enter a number: \")\nprint num", "Enter a number: 10\n10\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a6952a9e1e65d85fd87c29422d830b31bf747c5
46,430
ipynb
Jupyter Notebook
nativesql/0-WholeRun-RowBased.ipynb
oap-project/solution-navigator
22f173a3307f9c87662301785c8d1edab3ad7fa8
[ "Apache-2.0" ]
null
null
null
nativesql/0-WholeRun-RowBased.ipynb
oap-project/solution-navigator
22f173a3307f9c87662301785c8d1edab3ad7fa8
[ "Apache-2.0" ]
null
null
null
nativesql/0-WholeRun-RowBased.ipynb
oap-project/solution-navigator
22f173a3307f9c87662301785c8d1edab3ad7fa8
[ "Apache-2.0" ]
4
2021-04-27T05:26:59.000Z
2021-12-14T01:56:17.000Z
38.213992
135
0.438854
[ [ [ "###################### Query 1 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q1\")\\\n .getOrCreate()\ndf = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\ndf.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n l_returnflag, \\\n l_linestatus, \\\n sum(l_quantity) as sum_qty, \\\n sum(l_extendedprice) as sum_base_price, \\\n sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, \\\n sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, \\\n avg(l_quantity) as avg_qty, \\\n avg(l_extendedprice) as avg_price, \\\n avg(l_discount) as avg_disc, \\\n count(*) as count_order \\\n from \\\n lineitem \\\n where \\\n l_shipdate <= date '1998-09-02' \\\n group by \\\n l_returnflag, l_linestatus \\\n order by \\\n l_returnflag, l_linestatus\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 2 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q2\")\\\n .getOrCreate()\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nps_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/partsupp\")\nps_df.createOrReplaceTempView(\"partsupp\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nr_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/region\")\nr_df.createOrReplaceTempView(\"region\")\nquery = \"select \\\n s_acctbal, \\\n s_name, \\\n n_name, \\\n p_partkey, \\\n ps_supplycost, \\\n p_mfgr, \\\n s_address, \\\n s_phone, \\\n s_comment \\\n from \\\n part, \\\n supplier, \\\n partsupp, \\\n nation, \\\n region \\\n where \\\n p_partkey = ps_partkey \\\n and s_suppkey = ps_suppkey \\\n and p_size = 15 \\\n and p_type like '%BRASS' \\\n and s_nationkey = n_nationkey \\\n and n_regionkey = r_regionkey \\\n and r_name = 'EUROPE' \\\n and ps_supplycost = ( \\\n select \\\n min(ps_supplycost) \\\n from \\\n partsupp, \\\n supplier, \\\n nation, \\\n region \\\n where \\\n p_partkey = ps_partkey \\\n and s_suppkey = ps_suppkey \\\n and s_nationkey = n_nationkey \\\n and n_regionkey = r_regionkey \\\n and r_name = 'EUROPE') \\\n order by \\\n s_acctbal desc, n_name, s_name, p_partkey\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show(100)\n#%time sqlDF.show(100)\nspark.stop()", "_____no_output_____" ], [ "###################### Query 3 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q3\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n l_orderkey, \\\n sum(l_extendedprice * (1 - l_discount)) as revenue, \\\n o_orderdate, \\\n o_shippriority \\\n from \\\n customer, orders, lineitem \\\n where \\\n c_mktsegment = 'BUILDING' \\\n and c_custkey = o_custkey \\\n and l_orderkey = o_orderkey \\\n and o_orderdate < date '1995-03-15' \\\n and l_shipdate > date '1995-03-15' \\\n group by \\\n l_orderkey, o_orderdate, o_shippriority \\\n order by \\\n revenue desc, o_orderdate\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show(10)\n%time sqlDF.show(10)\nspark.stop()", "_____no_output_____" ], [ "###################### Query 4 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q4\")\\\n .getOrCreate()\norders_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\ndf = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\norders_df.createOrReplaceTempView(\"orders\")\ndf.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n o_orderpriority, \\\n count(*) as order_count \\\n from \\\n orders \\\n where \\\n o_orderdate >= date '1993-07-01' \\\n and o_orderdate < date '1993-10-01' \\\n and exists \\\n (select * \\\n from \\\n lineitem \\\n where \\\n l_orderkey = o_orderkey \\\n and l_commitdate < l_receiptdate) \\\n group by \\\n o_orderpriority \\\n order by \\\n o_orderpriority\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 5 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q5\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nr_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/region\")\nr_df.createOrReplaceTempView(\"region\")\nquery = \"select \\\n n_name, \\\n sum(l_extendedprice * (1 - l_discount)) as revenue \\\n from \\\n customer, \\\n orders, \\\n lineitem, \\\n supplier, \\\n nation, \\\n region \\\n where \\\n c_custkey = o_custkey \\\n and l_orderkey = o_orderkey \\\n and l_suppkey = s_suppkey \\\n and c_nationkey = s_nationkey \\\n and s_nationkey = n_nationkey \\\n and n_regionkey = r_regionkey \\\n and r_name = 'ASIA' \\\n and o_orderdate >= date '1994-01-01' \\\n and o_orderdate < date '1995-01-01' \\\n group by \\\n n_name \\\n order by \\\n revenue desc \"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 6 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q6\")\\\n .getOrCreate()\ndf = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\ndf.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n sum(l_extendedprice * l_discount) as revenue \\\n from \\\n lineitem \\\n where \\\n l_shipdate >= date '1994-01-01' \\\n and l_shipdate < date '1995-01-01' \\\n and l_discount between 0.05 and 0.07 \\\n and l_quantity < 24\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 7 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q7\")\\\n .getOrCreate()\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nquery = \"select \\\n supp_nation, \\\n cust_nation, \\\n l_year, \\\n sum(volume) as revenue \\\n from \\\n (select \\\n n1.n_name as supp_nation, \\\n n2.n_name as cust_nation, \\\n year(l_shipdate) as l_year, \\\n l_extendedprice * (1 - l_discount) as volume \\\n from \\\n supplier, \\\n lineitem, \\\n orders, \\\n customer, \\\n nation n1, \\\n nation n2 \\\n where \\\n s_suppkey = l_suppkey \\\n and o_orderkey = l_orderkey \\\n and c_custkey = o_custkey \\\n and s_nationkey = n1.n_nationkey \\\n and c_nationkey = n2.n_nationkey \\\n and (\\\n (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') \\\n or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') \\\n ) \\\n and l_shipdate between date '1995-01-01' and date '1996-12-31' \\\n ) as shipping \\\n group by \\\n supp_nation, \\\n cust_nation, \\\n l_year \\\n order by \\\n supp_nation, \\\n cust_nation, \\\n l_year\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 8 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q8\")\\\n .getOrCreate()\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nr_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/region\")\nr_df.createOrReplaceTempView(\"region\")\nquery = \"select \\\n o_year, \\\n sum(case when nation = ':1' then volume else 0 end) / sum(volume) as mkt_share \\\n from \\\n (select \\\n extract(year from o_orderdate) as o_year, \\\n l_extendedprice * (1 - l_discount) as volume, \\\n n2.n_name as nation \\\n from \\\n part, \\\n supplier, \\\n lineitem, \\\n orders, \\\n customer, \\\n nation n1, \\\n nation n2, \\\n region \\\n where \\\n p_partkey = l_partkey \\\n and s_suppkey = l_suppkey \\\n and l_orderkey = o_orderkey \\\n and o_custkey = c_custkey \\\n and c_nationkey = n1.n_nationkey \\\n and n1.n_regionkey = r_regionkey \\\n and r_name = ':2' \\\n and s_nationkey = n2.n_nationkey \\\n and o_orderdate between date '1995-01-01' and date '1996-12-31' \\\n and p_type = ':3' \\\n ) as all_nations \\\n group by \\\n o_year \\\n order by \\\n o_year\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 9 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q9\")\\\n .getOrCreate()\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nps_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/partsupp\")\nps_df.createOrReplaceTempView(\"partsupp\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nquery = \"select \\\n nation, \\\n o_year, \\\n sum(amount) as sum_profit \\\n from \\\n (select \\\n n_name as nation, \\\n year(o_orderdate) as o_year, \\\n l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount \\\n from \\\n part, \\\n supplier, \\\n lineitem, \\\n partsupp, \\\n orders, \\\n nation \\\n where \\\n s_suppkey = l_suppkey \\\n and ps_suppkey = l_suppkey \\\n and ps_partkey = l_partkey \\\n and p_partkey = l_partkey \\\n and o_orderkey = l_orderkey \\\n and s_nationkey = n_nationkey \\\n and p_name like '%green%'\\\n ) as profit \\\n group by \\\n nation, \\\n o_year \\\n order by \\\n nation, o_year desc\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 10 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q10\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nquery = \"select \\\n c_custkey, \\\n c_name, \\\n sum(l_extendedprice * (1 - l_discount)) as revenue, \\\n c_acctbal, \\\n n_name, \\\n c_address, \\\n c_phone, \\\n c_comment \\\n from \\\n customer, \\\n orders, \\\n lineitem, \\\n nation \\\n where \\\n c_custkey = o_custkey \\\n and l_orderkey = o_orderkey \\\n and o_orderdate >= date '1993-10-01' \\\n and o_orderdate < date '1994-01-01' \\\n and l_returnflag = 'R' \\\n and c_nationkey = n_nationkey \\\n group by \\\n c_custkey, \\\n c_name, \\\n c_acctbal, \\\n c_phone, \\\n n_name, \\\n c_address, \\\n c_comment \\\n order by \\\n revenue desc\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show(20)\n%time sqlDF.show(20)\nspark.stop()", "_____no_output_____" ], [ "###################### Query 11 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q11\")\\\n .getOrCreate()\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/partsupp\")\np_df.createOrReplaceTempView(\"partsupp\")\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nquery = \"select \\\n ps_partkey, \\\n sum(ps_supplycost * ps_availqty) as value \\\n from \\\n partsupp, \\\n supplier, \\\n nation \\\n where \\\n ps_suppkey = s_suppkey \\\n and s_nationkey = n_nationkey \\\n and n_name = 'GERMANY' \\\n group by \\\n ps_partkey \\\n having \\\n sum(ps_supplycost * ps_availqty) > \\\n (select \\\n sum(ps_supplycost * ps_availqty) * 0.0001000000 \\\n from \\\n partsupp, \\\n supplier, \\\n nation \\\n where \\\n ps_suppkey = s_suppkey \\\n and s_nationkey = n_nationkey \\\n and n_name = 'GERMANY') \\\n order by \\\n value desc\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 12 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q12\")\\\n .getOrCreate()\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n l_shipmode, \\\n sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, \\\n sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count \\\n from \\\n orders, \\\n lineitem \\\n where \\\n o_orderkey = l_orderkey \\\n and l_shipmode in ('MAIL', 'SHIP') \\\n and l_commitdate < l_receiptdate \\\n and l_shipdate < l_commitdate \\\n and l_receiptdate >= date '1994-01-01' \\\n and l_receiptdate < date '1995-01-01' \\\n group by \\\n l_shipmode \\\n order by \\\n l_shipmode\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 13 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q13\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nquery = \"select \\\n c_count, \\\n count(*) as custdist \\\n from \\\n (select \\\n c_custkey, \\\n count(o_orderkey) as c_count \\\n from \\\n customer \\\n left outer join \\\n orders \\\n on \\\n c_custkey = o_custkey \\\n and o_comment not like '%special%requests%' \\\n group by \\\n c_custkey) as c_orders \\\n group by \\\n c_count \\\n order by \\\n custdist desc, c_count desc\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 14 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q14\")\\\n .getOrCreate()\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\nquery = \"select \\\n 100.00 * sum( \\\n case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end \\\n ) / sum(l_extendedprice * (1 - l_discount)) \\\n as promo_revenue \\\n from \\\n lineitem, \\\n part \\\n where \\\n l_partkey = p_partkey \\\n and l_shipdate >= date '1995-09-01' \\\n and l_shipdate < date '1995-10-01'\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 15 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q15\")\\\n .getOrCreate()\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nquery = \"with revenue0 as \\\n (select \\\n l_suppkey as supplier_no, \\\n sum(l_extendedprice * (1 - l_discount)) as total_revenue \\\n from \\\n lineitem \\\n where \\\n l_shipdate >= date '1996-01-01' \\\n and l_shipdate < date '1996-04-01' \\\n group by \\\n l_suppkey) \\\n select \\\n s_suppkey, \\\n s_name, \\\n s_address, \\\n s_phone, \\\n total_revenue \\\n from \\\n supplier, \\\n revenue0 \\\n where \\\n s_suppkey = supplier_no \\\n and total_revenue = \\\n (select \\\n max(total_revenue) \\\n from \\\n revenue0) \\\n order by \\\n s_suppkey\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 16 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q16\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/partsupp\")\nc_df.createOrReplaceTempView(\"partsupp\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\nl_df.createOrReplaceTempView(\"part\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\no_df.createOrReplaceTempView(\"supplier\")\nquery = \"select \\\n p_brand, \\\n p_type, \\\n p_size, \\\n count(distinct ps_suppkey) as supplier_cnt \\\n from \\\n partsupp, \\\n part \\\n where \\\n p_partkey = ps_partkey \\\n and p_brand <> 'Brand#45' \\\n and p_type not like 'MEDIUM POLISHED%' \\\n and p_size in (49, 14, 23, 45, 19, 3, 36, 9) \\\n and ps_suppkey not in \\\n (select \\\n s_suppkey \\\n from \\\n supplier \\\n where \\\n s_comment like '%Customer%Complaints%') \\\n group by \\\n p_brand, \\\n p_type, \\\n p_size \\\n order by \\\n supplier_cnt desc, p_brand, p_type, p_size\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 17 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q17\")\\\n .getOrCreate()\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\nquery = \"select \\\n sum(l_extendedprice) / 7.0 as avg_yearly \\\n from \\\n lineitem, \\\n part \\\n where \\\n p_partkey = l_partkey \\\n and p_brand = 'Brand#23' \\\n and p_container = 'MED BOX' \\\n and l_quantity < ( \\\n select \\\n 0.2 * avg(l_quantity) \\\n from \\\n lineitem \\\n where \\\n l_partkey = p_partkey)\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 18 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q18\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nquery = \"select \\\n c_name, \\\n c_custkey, \\\n o_orderkey, \\\n o_orderdate, \\\n o_totalprice, \\\n sum(l_quantity) \\\n from \\\n customer, \\\n orders, \\\n lineitem \\\n where \\\n o_orderkey in ( \\\n select \\\n l_orderkey \\\n from \\\n lineitem \\\n group by \\\n l_orderkey \\\n having \\\n sum(l_quantity) > 300 )\\\n and c_custkey = o_custkey \\\n and o_orderkey = l_orderkey \\\n group by \\\n c_name, \\\n c_custkey, \\\n o_orderkey, \\\n o_orderdate, \\\n o_totalprice \\\n order by \\\n o_totalprice desc, o_orderdate\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show(100)\n%time sqlDF.show(100)\nspark.stop()", "_____no_output_____" ], [ "###################### Query 19 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q19\")\\\n .getOrCreate()\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\nquery = \"select \\\n sum(l_extendedprice* (1 - l_discount)) as revenue \\\n from \\\n lineitem, \\\n part where ( \\\n p_partkey = l_partkey \\\n and p_brand = 'Brand#12' \\\n and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') \\\n and l_quantity >= 1 \\\n and l_quantity <= 1 + 10 \\\n and p_size between 1 and 5 \\\n and l_shipmode in ('AIR', 'AIR REG') \\\n and l_shipinstruct = 'DELIVER IN PERSON' \\\n ) or ( \\\n p_partkey = l_partkey \\\n and p_brand = 'Brand#23' \\\n and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') \\\n and l_quantity >= 10 \\\n and l_quantity <= 10 + 10 \\\n and p_size between 1 and 10 \\\n and l_shipmode in ('AIR', 'AIR REG') \\\n and l_shipinstruct = 'DELIVER IN PERSON' \\\n ) or ( \\\n p_partkey = l_partkey \\\n and p_brand = 'Brand#34' \\\n and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') \\\n and l_quantity >= 20 \\\n and l_quantity <= 20 + 10 \\\n and p_size between 1 and 15 \\\n and l_shipmode in ('AIR', 'AIR REG') \\\n and l_shipinstruct = 'DELIVER IN PERSON' \\\n )\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show(20)\n%time sqlDF.show(20)\nspark.stop()", "_____no_output_____" ], [ "###################### Query 20 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q20\")\\\n .getOrCreate()\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\nps_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/partsupp\")\nps_df.createOrReplaceTempView(\"partsupp\")\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\nquery = \"select \\\n s_name, \\\n s_address \\\n from \\\n supplier, \\\n nation \\\n where \\\n s_suppkey in \\\n (select \\\n ps_suppkey \\\n from \\\n partsupp \\\n where \\\n ps_partkey in \\\n (select \\\n p_partkey \\\n from \\\n part \\\n where \\\n p_name like 'forest%') \\\n and ps_availqty > \\\n (select \\\n 0.5 * sum(l_quantity) \\\n from \\\n lineitem \\\n where \\\n l_partkey = ps_partkey \\\n and l_suppkey = ps_suppkey \\\n and l_shipdate >= date '1994-01-01' \\\n and l_shipdate < date '1995-01-01')\\\n ) \\\n and s_nationkey = n_nationkey \\\n and n_name = 'CANADA' \\\n order by \\\n s_name\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 21 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q21\")\\\n .getOrCreate()\ns_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/supplier\")\ns_df.createOrReplaceTempView(\"supplier\")\nl_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/lineitem\")\nl_df.createOrReplaceTempView(\"lineitem\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nn_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/nation\")\nn_df.createOrReplaceTempView(\"nation\")\np_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/part\")\np_df.createOrReplaceTempView(\"part\")\nquery = \"select \\\n s_name, \\\n count(*) as numwait \\\n from \\\n supplier, \\\n lineitem l1, \\\n orders, \\\n nation \\\n where \\\n s_suppkey = l1.l_suppkey \\\n and o_orderkey = l1.l_orderkey \\\n and o_orderstatus = 'F' \\\n and l1.l_receiptdate > l1.l_commitdate \\\n and exists ( \\\n select \\\n * \\\n from \\\n lineitem l2 \\\n where \\\n l2.l_orderkey = l1.l_orderkey \\\n and l2.l_suppkey <> l1.l_suppkey \\\n ) and not exists ( \\\n select \\\n * \\\n from \\\n lineitem l3 \\\n where \\\n l3.l_orderkey = l1.l_orderkey \\\n and l3.l_suppkey <> l1.l_suppkey \\\n and l3.l_receiptdate > l3.l_commitdate\\\n ) and s_nationkey = n_nationkey \\\n and n_name = 'SAUDI ARABIA' \\\n group by \\\n s_name \\\n order by \\\n numwait desc, s_name\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ], [ "###################### Query 22 #####################\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession\\\n .builder\\\n .master('yarn-client')\\\n .appName(\"TPCH_Q22\")\\\n .getOrCreate()\nc_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/customer\")\nc_df.createOrReplaceTempView(\"customer\")\no_df = spark.read.format(\"parquet\").load(\"/orin_tpchnp_100/orders\")\no_df.createOrReplaceTempView(\"orders\")\nquery = \"select \\\n cntrycode, \\\n count(*) as numcust, \\\n sum(c_acctbal) as totacctbal \\\n from \\\n ( \\\n select \\\n substring(c_phone, 1, 2) as cntrycode, \\\n c_acctbal \\\n from \\\n customer \\\n where \\\n substring(c_phone, 1, 2) in \\\n ('13', '31', '23', '29', '30', '18', '17') \\\n and c_acctbal > ( \\\n select \\\n avg(c_acctbal) \\\n from \\\n customer \\\n where \\\n c_acctbal > 0.00 \\\n and substring(c_phone, 1, 2) in \\\n ('13', '31', '23', '29', '30', '18', '17') \\\n ) \\\n and not exists ( \\\n select \\\n * \\\n from \\\n orders \\\n where \\\n o_custkey = c_custkey \\\n ) \\\n ) as custsale \\\n group by \\\n cntrycode \\\n order by \\\n cntrycode\"\nsqlDF = spark.sql(query)\nprint(\"RowBased Process\")\n%time sqlDF.show()\n%time sqlDF.show()\nspark.stop()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a695308159924b5912de1919713dfe7ffe3a383
7,136
ipynb
Jupyter Notebook
01_INRIX_data_preprocessing_cdc16/INRIX_data_preprocessing_01_create_road_seg_inr_capac.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
01_INRIX_data_preprocessing_cdc16/INRIX_data_preprocessing_01_create_road_seg_inr_capac.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
01_INRIX_data_preprocessing_cdc16/INRIX_data_preprocessing_01_create_road_seg_inr_capac.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
39.425414
98
0.59361
[ [ [ "%run ../Python_files/util.py\n\n##### read in raw data\n\nimport openpyxl\n\ndata_folder = '/home/jzh/Dropbox/Research/\\\nData-driven_estimation_inverse_optimization/INRIX/Raw_data/'\n\n# load filtered INRIX attribute table raw data\nwb_INRIX = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table.xlsx')\n\n# load filtered capacity attribute table raw data\nwb_capac = openpyxl.load_workbook(data_folder + 'filtered_capacity_attribute_table.xlsx')\n\n# load lookup table raw data\nwb_lookup = openpyxl.load_workbook(data_folder + 'roadinv_id_to_tmc_lookup.xlsx') \n\n# get sheet name from workbook\nsheet_INRIX_name = wb_INRIX.sheetnames[0].encode('utf-8')\nsheet_capac_name = wb_capac.sheetnames[0].encode('utf-8')\nsheet_lookup_name = wb_lookup.sheetnames[0].encode('utf-8')\n\n# get sheet of filtered INRIX attribute table raw data\nsheet_INRIX = wb_INRIX.get_sheet_by_name(sheet_INRIX_name)\n\n# get sheet of filtered capacity attribute table raw data\nsheet_capac = wb_capac.get_sheet_by_name(sheet_capac_name)\n\n# get sheet of lookup table raw data\nsheet_lookup = wb_lookup.get_sheet_by_name(sheet_lookup_name)\n\n\n##### extract attributes of interest from INRIX sheet\n\ntmc_list = []\nroad_num_list = []\nshape_length_list = []\n\nfor i in xrange(2, 1 + sheet_INRIX.max_row):\n tmc_list.append(sheet_INRIX.cell(row=i, column=2).value.encode('utf-8'))\n road_num_list.append(sheet_INRIX.cell(row=i, column=3).value.encode('utf-8'))\n shape_length_list.append(sheet_INRIX.cell(row=i, column=13).value)\n\nassert(len(tmc_list) == len(road_num_list) and \\\n len(road_num_list) == len(shape_length_list)) \n\n\n##### extract attributes of interest from capacity sheet\n\nroad_invent_list = []\nlength_list = []\nroute_num_list = []\nAB_AM_capac_list = [] \nAB_MD_capac_list = []\nAB_PM_capac_list = []\nAB_NT_capac_list = []\n\nfor i in xrange(2, 1 + sheet_capac.max_row):\n road_invent_list.append(sheet_capac.cell(row=i, column=26).value) \n length_list.append(sheet_capac.cell(row=i, column=2).value)\n route_num_list.append(sheet_capac.cell(row=i, column=8).value)\n # take the period capacity factor into consideration\n AB_AM_capac_list.append((1.0/2.5)*sheet_capac.cell(row=i, column=18).value)\n AB_MD_capac_list.append((1.0/4.75)*sheet_capac.cell(row=i, column=20).value)\n AB_PM_capac_list.append((1.0/2.5)*sheet_capac.cell(row=i, column=22).value)\n AB_NT_capac_list.append((1.0/7.0)*sheet_capac.cell(row=i, column=24).value)\n\nassert(len(road_invent_list) == len(length_list) and \\\n len(length_list) == len(route_num_list) and \\\n len(route_num_list) == len(AB_AM_capac_list) and \\\n len(AB_AM_capac_list) == len(AB_MD_capac_list) and \\\n len(AB_MD_capac_list) == len(AB_PM_capac_list) and \\\n len(AB_PM_capac_list) == len(AB_NT_capac_list)) \n\n\n##### extract attributes of interest from lookup sheet\n\nroad_inv_ID_lookup_list = []\ntmc_lookup_list = []\n\nfor i in xrange(2, 1 + sheet_lookup.max_row):\n road_inv_ID_lookup_list.append(sheet_lookup.cell(row=i, column=1).value) \n tmc_lookup_list.append(str(sheet_lookup.cell(row=i, column=4).value))\n\nassert(len(road_inv_ID_lookup_list) == len(tmc_lookup_list)) \n\n\n# instantiation of RoadSegInr class\nroad_seg_inr = RoadSegInr(tmc_list, road_num_list, shape_length_list)\n\n# instantiation of RoadSegCapac class\nroad_seg_capac = RoadSegCapac(road_invent_list, length_list, route_num_list, \\\n AB_AM_capac_list, AB_MD_capac_list, \\\n AB_PM_capac_list, AB_NT_capac_list)\n\n# instantiation of LookUp class\nlook_up = LookUp(road_inv_ID_lookup_list, tmc_lookup_list)\n\n# make a dictionary from look_up\ntmc_roadInv_dict = {i:j for (i, j) in zip(look_up.tmc, look_up.road_inv_ID)}\n\n# make dictionaries from road_seg_capac\nroadInv_capac_dict_AM = {i:j for (i, j) in zip(road_seg_capac.road_invent, \\\n road_seg_capac.AB_AM_capac)}\nroadInv_capac_dict_MD = {i:j for (i, j) in zip(road_seg_capac.road_invent, \\\n road_seg_capac.AB_MD_capac)}\nroadInv_capac_dict_PM = {i:j for (i, j) in zip(road_seg_capac.road_invent, \\\n road_seg_capac.AB_PM_capac)}\nroadInv_capac_dict_NT = {i:j for (i, j) in zip(road_seg_capac.road_invent, \\\n road_seg_capac.AB_NT_capac)}\n\ncapac_AM = []\ncapac_MD = []\ncapac_PM = []\ncapac_NT = []\n\nfor i in range(len(road_seg_inr.tmc)):\n capac_AM.append(roadInv_capac_dict_AM[tmc_roadInv_dict[road_seg_inr.tmc[i]]])\n capac_MD.append(roadInv_capac_dict_MD[tmc_roadInv_dict[road_seg_inr.tmc[i]]])\n capac_PM.append(roadInv_capac_dict_PM[tmc_roadInv_dict[road_seg_inr.tmc[i]]])\n capac_NT.append(roadInv_capac_dict_NT[tmc_roadInv_dict[road_seg_inr.tmc[i]]])\n\n# instantiation of RoadSegInrCapac class\nroad_seg_inr_capac = RoadSegInrCapac(tmc_list, road_num_list, shape_length_list, \\\n capac_AM, capac_MD, capac_PM, capac_NT)\n\nzdump(road_seg_inr_capac, '../temp_files/road_seg_inr_capac.pkz')", "No dicts found; please check load_dicts...\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a6963abbdfa571e8da189a709ecba7a6e856a3e
41,642
ipynb
Jupyter Notebook
README.ipynb
pwwang/dpipe
4efafbb1b13f8a70cc692943473d716b66e9e947
[ "MIT" ]
null
null
null
README.ipynb
pwwang/dpipe
4efafbb1b13f8a70cc692943473d716b66e9e947
[ "MIT" ]
null
null
null
README.ipynb
pwwang/dpipe
4efafbb1b13f8a70cc692943473d716b66e9e947
[ "MIT" ]
null
null
null
27.163731
432
0.383603
[ [ [ "# pipda\n\nA framework for data piping in python\n\nInspired by [siuba][1], [dfply][2], [plydata][3] and [dplython][4], but with simple yet powerful APIs to mimic the `dplyr` and `tidyr` packages in python\n\n## Installation\n```shell\npip install -U pipda\n```\n\n## Usage\n\nCheckout [plyrda][6] for more detailed usages.\n\n### Verbs\n\nVerbs are functions next to the piping sign (i.e. `>>`) receiving the data directly.\n", "_____no_output_____" ] ], [ [ "try:\n import pandas\nexcept ImportError:\n !pip install pandas", "_____no_output_____" ], [ "import pandas as pd\nfrom pipda import (\n register_verb, \n register_func, \n register_operator, \n evaluate_expr,\n Operator, \n Symbolic,\n Context\n)\n\nf = Symbolic()\n\ndf = pd.DataFrame({\n 'x': [0, 1, 2, 3],\n 'y': ['zero', 'one', 'two', 'three']\n})\n\ndf\n", "_____no_output_____" ], [ "@register_verb(pd.DataFrame)\ndef head(data, n=5):\n return data.head(n)\n\ndf >> head(2)", "_____no_output_____" ], [ "@register_verb(pd.DataFrame, context=Context.EVAL)\ndef mutate(data, **kwargs):\n data = data.copy()\n for key, val in kwargs.items():\n data[key] = val\n return data\n\ndf >> mutate(z=1)", "_____no_output_____" ], [ "df >> mutate(z=f.x)", "_____no_output_____" ], [ "# Verbs that don't compile f.a to data, but just the column name\n@register_verb(pd.DataFrame, context=Context.SELECT)\ndef select(data, *columns):\n return data.loc[:, columns]\n\n# f.x won't be compiled as df.x but just 'x'\ndf >> mutate(z=2*f.x) >> select(f.x, f.z)\n", "_____no_output_____" ], [ "# Compile the args inside the verb\n@register_verb(pd.DataFrame, context=Context.PENDING)\ndef mutate_existing(data, column, value):\n column = evaluate_expr(column, data, Context.SELECT)\n value = evaluate_expr(value, data, Context.EVAL)\n data = data.copy()\n data[column] = value\n return data\n\n# First f.x compiled as column name, and second as Series data\ndf2 = df >> mutate_existing(f.x, 10 * f.x)\ndf2", "_____no_output_____" ], [ "# Evaluate the arguments by yourself\n@register_verb(pd.DataFrame, context=Context.PENDING)\ndef mutate_existing2(data, column, value):\n column = evaluate_expr(column, data, Context.SELECT)\n value = evaluate_expr(value, df2, Context.EVAL)\n data = data.copy()\n data[column] = value\n return data\n\ndf >> mutate_existing2(f.x, 2 * f.x)", "_____no_output_____" ], [ "# register for multiple types\n@register_verb(int, context=Context.EVAL)\ndef add(data, other):\n return data + other\n\n# add is actually a singledispatch generic function\[email protected](float, context=Context.EVAL)\ndef _(data, other):\n return data * other\n\n1 >> add(1)\n", "_____no_output_____" ], [ "1.1 >> add(1.0)\n", "_____no_output_____" ], [ "# As it's a singledispatch generic function, we can do it for multiple types\n# with the same logic\n@register_verb(context=Context.EVAL)\ndef mul(data, other):\n raise NotImplementedError # not invalid until types registered\n\[email protected](int, context=Context.EVAL)\[email protected](float, context=Context.EVAL) \n# or you could do @mul.register((int, float), context=Context.EVAL)\n# context is also supported\ndef _(data, other):\n return data * other\n\n3 >> mul(2)", "_____no_output_____" ], [ "3.2 >> mul(2)", "_____no_output_____" ] ], [ [ "### Functions used in verb arguments", "_____no_output_____" ] ], [ [ "@register_func(context=Context.EVAL)\ndef if_else(data, cond, true, false):\n cond.loc[cond.isin([True]), ] = true\n cond.loc[cond.isin([False]), ] = false\n return cond\n\n# The function is then also a singledispatch generic function\n\ndf >> mutate(z=if_else(f.x>1, 20, 10))", "_____no_output_____" ], [ "# function without data argument\n@register_func(None)\ndef length(strings):\n return [len(s) for s in strings]\n\ndf >> mutate(z=length(f.y))", "_____no_output_____" ], [ "# register existing functions\nfrom numpy import vectorize\nlen = register_func(None, context=Context.EVAL, func=vectorize(len))\n\n# original function still works\nprint(len('abc'))\n\ndf >> mutate(z=len(f.y))", "3\n" ] ], [ [ "### Operators\nYou may also redefine the behavior of the operators", "_____no_output_____" ] ], [ [ "@register_operator\nclass MyOperators(Operator):\n def xor(self, a, b):\n \"\"\"Inteprete X ^ Y as pow(X, Y).\"\"\"\n return a ** b\n\ndf >> mutate(z=f.x ^ 2)", "_____no_output_____" ] ], [ [ "### Context\n\nThe context defines how a reference (`f.A`, `f['A']`, `f.A.B` is evaluated)", "_____no_output_____" ] ], [ [ "from pipda import ContextBase\n\nclass MyContext(ContextBase):\n name = 'my'\n def getattr(self, parent, ref):\n # double it to distinguish getattr\n return getattr(parent, ref)\n def getitem(self, parent, ref):\n return parent[ref] * 2\n @property\n def ref(self):\n # how we evaluate the ref in f[ref]\n return self \n \n \n@register_verb(context=MyContext())\ndef mutate_mycontext(data, **kwargs):\n for key, val in kwargs.items():\n data[key] = val\n return data\n\ndf >> mutate_mycontext(z=f.x + f['x'])\n ", "_____no_output_____" ], [ "# when ref in f[ref] is also needed to be evaluated\ndf = df >> mutate(zero=0, one=1, two=2, three=3)\ndf", "_____no_output_____" ], [ "df >> mutate_mycontext(m=f[f.y][:1].values[0])\n# f.y returns ['zero', 'one', 'two', 'three']\n# f[f.y] gets [[0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6]]\n# f[f.y][:1].values gets [[0, 4, 8, 16]]\n# f[f.y][:1].values[0] returns [0, 8, 16, 32]\n# Notes that each subscription ([]) will double the values", "_____no_output_____" ] ], [ [ "### Caveats\n\n- You have to use `and_` and `or_` for bitwise and/or (`&`/`|`) operators, as `and` and `or` are python keywords.\n\n- Limitations:\n\n Any limitations apply to `executing` to detect the AST node will apply to `pipda`. It may not work in some circumstances where other AST magics apply.\n\n- Calling registered verbs/functions regularly:\n\n The piping syntax (`>>`) is recommended with `pipda`. Because everything is determined with this syntax.\n\n However, `pipda` tries to support regular calling. The ambiguity can come from the situations where the arguments passed in can shift one position right (such that they fit the piping calling), and first value passed in can also be dispatched and fit in the second argument.\n\n For example:\n\n ```python\n @register_verb(int)\n def add(a: int, b: int = 1):\n return a + b\n ```\n\n If you call it like this `add(2)`, then we have no idea if this is calling `add(2, b=1)`, or `add(b=2)` and it's waiting for the data (`a`) to be piped in. In such a case, the function is called in the former way, but a warning will be showing.\n\n To avoid this, as it states in the warning message, according to the reasons of the ambiguity, we should make sure that the values passed in cannot be shifted one position right (given values for all arguments would do it):\n\n ```python\n add(2, 1) # or add(2, b=1)\n ```\n\n or try not to use optional arguments while defining the function;\n\n or make sure the first value cannot be dispatched:\n\n ```python\n @register_verb(int)\n def add(a: int, b: float = 1.0):\n return a + b\n\n add(2.0)\n ```\n In such a case, it is for sure that it is called like `add(b=2.0)` and wait for `a` to be piped in.\n\n You can even have a different type annotation for the second argument, even the same value can be accepted:\n\n ```python\n @register_verb(int)\n def add(a: int, b: Optional[int] = 1):\n return a + b\n\n add(2)\n ```\n\n This will force it to call `add(2, b=1)`, but this definitely has some side effects:\n ```python\n verb(data, add(2))\n ```\n Here `add(2)` is intended to be called like `add(b=2)`, but unexpectedly, it will call like `add(2, b=1)`. Using the piping syntax will perfectly solve this:\n ```python\n data >> verb(add(2))\n ```\n since we know the function called in a verb is supposed to wait for the data to be piped in.\n\n See also: [Piping vs regular calling][7]\n\n- Use another piping sign\n\n ```python\n from pipda import register_piping\n register_piping('^')\n\n # register verbs and functions\n df ^ verb1(...) ^ verb2(...)\n ```\n\n Allowed signs are: `+`, `-`, `*`, `@`, `/`, `//`, `%`, `**`, `<<`, `>>`, `&`, `^` and `|`.\n\n## How it works\n### The verbs\n```R\ndata %>% verb(arg1, ..., key1=kwarg1, ...)\n```\nThe above is a typical `dplyr`/`tidyr` data piping syntax.\n\nThe counterpart R syntax we expect is:\n```python\ndata >> verb(arg1, ..., key1=kwarg1, ...)\n```\nTo implement that, we need to defer the execution of the `verb` by turning it into a `Verb` object, which holds all information of the function to be executed later. The `Verb` object won't be executed until the `data` is piped in. It all thanks to the [`executing`][5] package to let us determine the ast nodes where the function is called. So that we are able to determine whether the function is called in a piping mode.\n\nIf an argument is referring to a column of the data and the column will be involved in the later computation, the it also needs to be deferred. For example, with `dplyr` in `R`:\n```R\ndata %>% mutate(z=a)\n```\nis trying add a column named `z` with the data from column `a`.\n\nIn python, we want to do the same with:\n```python\ndata >> mutate(z=f.a)\n```\nwhere `f.a` is a `Reference` object that carries the column information without fetching the data while python sees it immmediately.\n\nHere the trick is `f`. Like other packages, we introduced the `Symbolic` object, which will connect the parts in the argument and make the whole argument an `Expression` object. This object is holding the execution information, which we could use later when the piping is detected. \n\n### The functions\nThen what if we want to use some functions in the arguments of the `verb`?\nFor example:\n```python\ndata >> select(starts_with('a'))\n```\nto select the columns with names start with `'a'`.\n\nNo doubt that we need to defer the execution of the function, too. The trick is that we let the function return a `function` object as well, and evaluate it as the argument of the verb.\n\n### The operators\n`pipda` also opens oppotunities to change the behavior of the operators in verb/function arguments. This allows us to mimic something like this:\n```python\ndata >> select(-f.a) # select all columns but `a`\n```\n\nTo do that, we turn it into an `Operator` object. Just like a `Verb` or a `Function` object, the execution is deferred. By default, the operators we used are from the python standard library `operator`. `operator.neg` in the above example.\n\nYou can also define you own by subclassing the `Operator` class, and then register it to replace the default one by decorating it with `register_operator`.\n\n\n[1]: https://github.com/machow/siuba\n[2]: https://github.com/kieferk/dfply\n[3]: https://github.com/has2k1/plydata\n[4]: https://github.com/dodger487/dplython\n[5]: https://github.com/alexmojaki/executing\n[6]: https://github.com/pwwang/plyrda\n[7]: https://pwwang.github.io/datar/piping_vs_regular/\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4a696459601fccc5ce12f72cc9ae5dcab7910917
2,513
ipynb
Jupyter Notebook
notebooks/lecture_python_day4.ipynb
LindsayRAbrams/2021-02-23-NOAA-online
dbff979f9c65444b3024cbd963a4fb11bf7d2d2a
[ "CC-BY-4.0" ]
null
null
null
notebooks/lecture_python_day4.ipynb
LindsayRAbrams/2021-02-23-NOAA-online
dbff979f9c65444b3024cbd963a4fb11bf7d2d2a
[ "CC-BY-4.0" ]
null
null
null
notebooks/lecture_python_day4.ipynb
LindsayRAbrams/2021-02-23-NOAA-online
dbff979f9c65444b3024cbd963a4fb11bf7d2d2a
[ "CC-BY-4.0" ]
2
2021-02-26T22:03:25.000Z
2021-03-13T23:40:27.000Z
22.845455
76
0.469558
[ [ [ "%%writefile ./code/test_sys.py\nimport sys\n\nprint('Print the first command line argument', sys.argv[0])\n\nfor i in sys.argv:\n print(i)", "Writing ./code/test_sys.py\n" ], [ "%%writefile ./code/my_reading.py\nimport sys\nimport numpy as np\n\ndef compute_mean(data):\n \n mean = np.mean(data)\n print('The computed mean is:', mean)\ndef compute_max(data):\n pass\ndef compute_min(data):\n pass\ndef load_data(filename):\n \n data = np.loadtxt(fname=filename, delimiter=',')\n assert isinstance(data, np.ndarray)\n return data\ndef main():\n \n action = sys.argv[1]\n \n assert action in ['--mean', '--min', '--max'], 'Invalid action'\n for filename in sys.argv[2:]:\n assert isinstance(filename, str) \n assert filename[-4:] == '.csv'\n for filename in sys.argv[2:]: \n data = load_data(filename)\n \n if action == '--mean':\n result = compute_mean(data)\n elif action == '--max':\n result = compute_max(data)\n elif action == '--min':\n result = compute_min(data)\nif __name__ == '__main__':\n main()\n\n\n", "Overwriting ./code/my_reading.py\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4a696c5b11356c917b7e4af4e44f7f964d74a5ad
557,341
ipynb
Jupyter Notebook
fxa/5_Machine_Learning/7_Best_Confs_MDS.ipynb
jRicciL/ml_and_ensemble_docking
d2bf7010d6df34710e860b0c01f2746b4dc8e09a
[ "MIT" ]
7
2021-05-11T18:39:26.000Z
2022-01-28T14:41:49.000Z
fxa/5_Machine_Learning/7_Best_Confs_MDS.ipynb
jRicciL/ml_and_ensemble_docking
d2bf7010d6df34710e860b0c01f2746b4dc8e09a
[ "MIT" ]
null
null
null
fxa/5_Machine_Learning/7_Best_Confs_MDS.ipynb
jRicciL/ml_and_ensemble_docking
d2bf7010d6df34710e860b0c01f2746b4dc8e09a
[ "MIT" ]
3
2021-12-04T13:42:19.000Z
2022-01-28T14:41:50.000Z
446.945469
180,227
0.927608
[ [ [ "# Visualize the best RFE conformations using cMDS plots", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport sys\nsys.path.append('../..')\nfrom helper_modules.run_or_load import *\nfrom helper_modules.MDS import *", "_____no_output_____" ] ], [ [ "### Load protein related data", "_____no_output_____" ] ], [ [ "prot_name = 'fxa'", "_____no_output_____" ], [ "DIR = '../1_Download_and_prepare_protein_ensembles'\npath_to_file = f'{DIR}/TABLA_MTDATA_FXA_136_crys_LIGS_INFO.json'\ndf_prot = pd.read_json(path_to_file)\ndf_prot.head(3)", "_____no_output_____" ] ], [ [ "### Load the dimensionality reduction results", "_____no_output_____" ] ], [ [ "df_dims = pd.read_pickle('../3_Protein_Ensembles_Analysis/df_PROTEINS_DIMS_reduced_TABLE.obj')\n\n# Update the df with the mds axis\n# Pocket shape\ndf_prot['vol_x'] = df_dims['mds_vol_pkt_x']\ndf_prot['vol_y'] = df_dims['mds_vol_pkt_y']\n# secondary structure residues RMSD\ndf_prot['secres_x'] = df_dims['mds_sec_x']\ndf_prot['secres_y'] = df_dims['mds_sec_y']\n# pocket residues RMSD\ndf_prot['pkt_x'] = df_dims['mds_pkt_x']\ndf_prot['pkt_y'] = df_dims['mds_pkt_y']\n\ndf_prot.head(3)", "_____no_output_____" ] ], [ [ "### Load POVME3 results and single-conformation docking performances (AUC-ROC)", "_____no_output_____" ] ], [ [ "# Extra features to get volume or surface area\ndf_extra = pd.read_pickle(f'../4_Ensemble_docking_results/TABLE_Confs_Features_and_performances_fxa.pkl')\n\n# Adding to the main df\ndf_prot['volume'] = df_extra['Pk. Volume']\ndf_prot['surf_area'] = df_extra['Pk. SASA']\n\n# ROC-AUC single performance\ndf_prot['AUC-ROC'] = df_extra['AUC-ROC']\n\ndf_prot.head(3)", "_____no_output_____" ] ], [ [ "### Load *Recursive Feature Elimination* results", "_____no_output_____" ] ], [ [ "# Open RFE_estimator\n# Open RFE_estimator\ndataset = 'MERGED'\nmodel_name = 'XGB_tree'\nsplit = 'random'\nfilename = f'./cachedir/rfe_selectors/RFE_xgb_{prot_name}.joblib'\n# Load the RFE selector (computed in the previos notebook)\nrfe_selector = joblib.load(filename)\n# Create a dataframe with the protein rankings\ndf_ranks = pd.DataFrame({\n 'pdb_id' : df_prot.index, \n 'rfe_ranking': rfe_selector.ranking_\n })\ndf_ranks = df_ranks.sort_values('rfe_ranking').set_index('pdb_id')\n\n# Update the df with the rank values\ndf_prot = df_prot.merge(df_ranks, left_index=True, right_index=True)\\\n .sort_values('rfe_ranking')\n\ndf_prot.head(3)", "/Users/joelricci/miniconda/envs/mds/lib/python3.7/site-packages/sklearn/base.py:315: UserWarning: Trying to unpickle estimator RFE from version 0.23.2 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.\n UserWarning)\n" ] ], [ [ "## cMDS plots\nWe will use `ggplot2` for plotting", "_____no_output_____" ] ], [ [ "%load_ext rpy2.ipython", "_____no_output_____" ] ], [ [ "Just a few modifications for visualization purposes.", "_____no_output_____" ] ], [ [ "# To be able to plot confs with no inhibitors => NA == 10\ndf_prot['Inhib_mass_num'] = pd.to_numeric(df_prot['Inhib_mass']).\\\n fillna(10) ** 2\ndf_prot['volume.T'] = (df_prot['volume']/100) ** 1.5\n\ndf_selected = df_prot.sort_values('rfe_ranking').head(16)\nx = 'vol_x'\ny = 'vol_y'\nsize='volume.T'", "_____no_output_____" ] ], [ [ "#### Create the dataframe for plotting", "_____no_output_____" ] ], [ [ "# This is the final table for plotting\ndf_volpk = df_prot[['rfe_ranking', 'vol_x', 'vol_y', 'volume']]\ndf_volpk = df_volpk.rename({'vol_x': 'x', 'vol_y': 'y'}, axis = 1)\ndf_volpk", "_____no_output_____" ], [ "%%R -i df_volpk -i prot_name -w 4. -h 4. --units in -r 200\n\nsource('../../R_scripts/plot_cMDS.R')\nprot_name <- prot_name\n\np <- plot_cMDS(df_volpk)\n\n# Save the picture\nspace <- 'povme'\nmethodology <- 'MDS_plots/'\nsave_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/'\nfilename <- paste0(save_path, methodology,\n paste(prot_name, space, 'MDS.pdf', sep='_'))\nggsave(filename, plot=p, width=4., height= 4.) \nprint(p)", "R[write to console]: ── \u001b[1mAttaching packages\u001b[22m ─────────────────────────────────────── tidyverse 1.3.0 ──\n\nR[write to console]: \u001b[32m✔\u001b[39m \u001b[34mtibble \u001b[39m 3.0.4 \u001b[32m✔\u001b[39m \u001b[34mdplyr \u001b[39m 1.0.2\n\u001b[32m✔\u001b[39m \u001b[34mtidyr \u001b[39m 1.1.2 \u001b[32m✔\u001b[39m \u001b[34mstringr\u001b[39m 1.4.0\n\u001b[32m✔\u001b[39m \u001b[34mreadr \u001b[39m 1.4.0 \u001b[32m✔\u001b[39m \u001b[34mforcats\u001b[39m 0.5.1\n\u001b[32m✔\u001b[39m \u001b[34mpurrr \u001b[39m 0.3.4 \n\nR[write to console]: ── \u001b[1mConflicts\u001b[22m ────────────────────────────────────────── tidyverse_conflicts() ──\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mfilter()\u001b[39m masks \u001b[34mstats\u001b[39m::filter()\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mlag()\u001b[39m masks \u001b[34mstats\u001b[39m::lag()\n\n" ] ], [ [ "## Swarplot with the AUC-ROC values per conformation\n- The following plot show the distribution of the protein conformations regarding its AUC-ROC value computed from their individual docking results.", "_____no_output_____" ] ], [ [ "import matplotlib\nimport seaborn as sns\nimport matplotlib.ticker as ticker\nfrom matplotlib.colors import LinearSegmentedColormap\n\ntop_confs = 8\n\n# Define the colormap\ncmap = LinearSegmentedColormap.from_list(\n name ='test', \n colors = [\"red\", \"orange\", \"#374E55\"],\n N = top_confs\n)\nmatplotlib.cm.register_cmap(\"mycolormap\", cmap)\nsns.set(font_scale = 1.1, style = 'whitegrid')\n\n# Filter the \ndf_ = df_prot.copy()\n# Get the top 16\ndf_['top_mask'] = [2 if i <= top_confs else \n 1 for i in df_['rfe_ranking']]\ndf_ = df_[['AUC-ROC', 'top_mask', 'rfe_ranking']]\\\n .melt(id_vars=('top_mask', \n 'rfe_ranking'))\n\nfig, ax = plt.subplots(figsize=(2.2, 4.45))\n# Blue dots (all conformations)\nnp.random.seed(2)\nsns.swarmplot(y = 'value', \n x = 'variable', \n data = df_, \n size = 4.6,\n ax = ax,\n color = '#87DADE')\n# Plot the top RFE 16 conformations\ndf_top = df_.query('top_mask == 2') \nnp.random.seed(2)\nsns.swarmplot(y = 'value', \n x = 'variable', \n data = df_top, \n size = 5,\n ax = ax,\n hue ='rfe_ranking',\n edgecolor = 'black', \n linewidth = 0.5,\n palette = 'mycolormap')\n# Axis and labels\nax.set_yticks(np.arange(0.5, 0.70, .05))\nax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\nax.yaxis.tick_left()\nax.get_legend().remove()\nax.tick_params(length = 2, color = 'black', axis = 'y')\nax.grid(True, linewidth = 0.7)\nax.tick_params(axis=\"y\",direction=\"in\", pad=-27)\nax.set(xlabel = 'Protein conformations', ylabel = '')\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(0.55)\n ax.spines[axis].set_edgecolor('black')\nplt.savefig(f'{prot_name}_swarm_auc.pdf')\n\n# Save the picture\nplt.show()", "_____no_output_____" ], [ "top_confs = 8\n\n# Define the colormap\ncmap = LinearSegmentedColormap.from_list(\n name ='test', \n colors = [\"red\", \"orange\", \"#374E55\"],\n N = top_confs\n)\nmatplotlib.cm.register_cmap(\"mycolormap\", cmap)\nsns.set(font_scale = 0.7, style = 'whitegrid')\n\n# Filter the \ndf_ = df_prot.copy()\n# Get the top 16\ndf_['top_mask'] = [2 if i <= top_confs else \n 1 for i in df_['rfe_ranking']]\ndf_ = df_[['AUC-ROC', 'top_mask', 'rfe_ranking']]\\\n .melt(id_vars=('top_mask', \n 'rfe_ranking'))\n# Get the AUC-ROC of the 32 lowest conformation\nauc_worst_32 = df_['value'].nsmallest(32).max()\ndf_['worst_32'] = df_['value'] <= auc_worst_32\n\nfig, ax = plt.subplots(figsize=(1.7, 3.52))\n# Blue dots (all conformations)\nnp.random.seed(2)\nsns.swarmplot(y = 'value', \n x = 'variable', \n data = df_, \n size = 3.6,\n ax = ax,\n alpha = 0.7,\n hue = 'worst_32',\n palette = ['#F0B3B2', '#5CA586'])\n# Axis and labels\nax.set_yticks(list(np.arange(0.3, 1.1, .1)) + [auc_worst_32])\nax.get_yticklabels()[-1].set_color(\"#B24745\")\nax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\nax.yaxis.tick_left()\nax.get_legend().remove()\nplt.axhline(y=0.5, color='darkgrey', linewidth = 1.2, linestyle = '--')\nplt.axhline(y=auc_worst_32, color='#79AF97', \n linestyle=':', linewidth = 1.2)\nax.fill_between([-1,1], [0], [auc_worst_32], color='#79AF97', alpha = 0.3 )\nax.tick_params(length = 3, color = 'black', axis = 'y')\nax.grid(True, linewidth = 0.7)\n# ax.tick_params(axis=\"y\",direction=\"in\", pad=-27)\nax.set_xlabel('SCPs from the entire dataset', fontsize = 8)\nax.set_ylabel('')\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(0.55)\n ax.spines[axis].set_edgecolor('black')\nplt.ylim(0.265, 1.033)\nplt.savefig(f'{prot_name}_swarm_auc.pdf')\n# Save the picture\nplt.show()", "_____no_output_____" ] ], [ [ "## MDS using Secondary structure - Pisani (2016) residues.\n- The following projection was computed from the pairwise RMSD matrix of the C$\\alpha$ of the residues defined by Pisani (2016).", "_____no_output_____" ] ], [ [ "df_secRMSD = df_prot[['rfe_ranking', 'secres_x', 'secres_y', 'volume']]\ndf_secRMSD = df_secRMSD.rename({'secres_x': 'x', 'secres_y': 'y'}, axis = 1)", "_____no_output_____" ], [ "%%R -i df_secRMSD -w 3.5 -h 3.5 --units in -r 200\n\np <- plot_cMDS(df_secRMSD)\n\n# Save the picture\nspace <- 'secRMSD'\nmethodology <- 'MDS_plots/'\nsave_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/'\nfilename <- paste0(save_path, methodology,\n paste(prot_name, space, 'MDS.pdf', sep='_'))\nggsave(filename, plot=p, width=4.0, height= 4.0) \nprint(p)", "_____no_output_____" ] ], [ [ "## MDS using pocket residues", "_____no_output_____" ] ], [ [ "df_pkRMSD = df_prot[['rfe_ranking', 'pkt_x', 'pkt_y', 'volume']]\ndf_pkRMSD = df_pkRMSD.rename({'pkt_x': 'x', 'pkt_y': 'y'}, axis = 1)", "_____no_output_____" ], [ "%%R -i df_pkRMSD -w 4.1 -h 4.1 --units in -r 200\n\np <- plot_cMDS(df_pkRMSD)\n\n# Save the picture\nspace <- 'pkRMSD'\nmethodology <- 'MDS_plots/'\nsave_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/'\nfilename <- paste0(save_path, methodology,\n paste(prot_name, space, 'MDS.pdf', sep='_'))\nggsave(filename, plot=p, width=4.0, height= 4.0) \nprint(p)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a69723ae727f515ccab14419d5d66daea9a57c4
654,074
ipynb
Jupyter Notebook
samuel_1/shap/02_rf_shap.ipynb
samuel-book/samuel-2
845f5bd47fbfa170dd66f4468c3ef8d0f1502cdb
[ "MIT" ]
null
null
null
samuel_1/shap/02_rf_shap.ipynb
samuel-book/samuel-2
845f5bd47fbfa170dd66f4468c3ef8d0f1502cdb
[ "MIT" ]
null
null
null
samuel_1/shap/02_rf_shap.ipynb
samuel-book/samuel-2
845f5bd47fbfa170dd66f4468c3ef8d0f1502cdb
[ "MIT" ]
null
null
null
693.609756
107,240
0.947252
[ [ [ "# Explaining random forest model predictions with Shapley values\n\nShapley values provide an estimate of how much any particular feature influences the model decision. When Shapley values are averaged they provide a measure of the overall influence of a feature.\n\nShapley values may be used across model types, and so provide a model-agnostic measure of a feature’s influence. This means that the influence of features may be compared across model types, and it allows black box models like neural networks to be explained, at least in part.\n\nFor more on Shapley values in general see Chris Molner’s excellent book chapter:\n\nhttps://christophm.github.io/interpretable-ml-book/shapley.html\n\n\nMore information on the shap library, inclusiong lots of useful examples may be found at: https://shap.readthedocs.io/en/latest/index.html\n\nHere we provide an example of using shap with random forests.", "_____no_output_____" ], [ "## Load packages", "_____no_output_____" ] ], [ [ "# Turn warnings off to keep notebook tidy\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport shap\n\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ] ], [ [ "## Load data\n\nWe will use the first train/test split.", "_____no_output_____" ] ], [ [ "data_loc = '../../data/sam_1/kfold_5fold/'\n\ntrain = pd.read_csv(data_loc + 'train_0.csv')\ntest = pd.read_csv(data_loc + 'test_0.csv')\ntest = test.sample(2500)", "_____no_output_____" ] ], [ [ "## Fit random forest model\n\nFit model and get feature importances.", "_____no_output_____" ] ], [ [ "# Get X and y\nX_train = train.drop('S2Thrombolysis', axis=1)\nX_test = test.drop('S2Thrombolysis', axis=1)\ny_train = train['S2Thrombolysis']\ny_test = test['S2Thrombolysis']\n\n# One hot encode hospitals\nX_train_hosp = pd.get_dummies(X_train['StrokeTeam'], prefix = 'team')\nX_train = pd.concat([X_train, X_train_hosp], axis=1)\nX_train.drop('StrokeTeam', axis=1, inplace=True)\nX_test_hosp = pd.get_dummies(X_test['StrokeTeam'], prefix = 'team')\nX_test = pd.concat([X_test, X_test_hosp], axis=1)\nX_test.drop('StrokeTeam', axis=1, inplace=True)\n\n# Define and Fit model\nmodel = RandomForestClassifier(\n n_estimators=100, n_jobs=-1, class_weight='balanced', random_state=42)\nmodel.fit(X_train, y_train)\n\n# Get feature weights\nfeatures = list(X_train)\nfeature_importances = model.feature_importances_\nimportances = pd.DataFrame(index=features)\nimportances['importance'] = feature_importances\nimportances['rank'] = importances['importance'].rank(ascending=False).values\n\n# Get predicted class and ptrobability\ny_pred = model.predict(X_test)\ny_prob = model.predict_proba(X_test)[:, 1]\n\n# Measure accuracy\naccuracy = np.mean(y_pred == y_test)\nprint(f'Accuracy: {accuracy:0.3f}')", "Accuracy: 0.849\n" ] ], [ [ "## Shap values\n\n### Get Shap values\n\nWe will get the Shap values for the test set. Can chose to calculate shap and save, or load shap explainer.", "_____no_output_____" ] ], [ [ "calculate_shap_values = True\n\nif calculate_shap_values:\n\n # Set up explainer using typical feature values from training set\n # Note: Use a sample of 100-1000 for this if explainer too slow\n explainer = shap.Explainer(model, X_train.sample(10))\n\n # Get Shapley values along with base and features\n shap_values_extended = explainer(X_test)\n # Shap values exist for each classification in a Tree; 1=give thrombolysis\n shap_values = shap_values_extended.values[:,:,1]\n \n # Save using pickle\n filename = './output/shap_values_extended_rf1.p'\n with open(filename, 'wb') as filehandler:\n pickle.dump(shap_values_extended, filehandler)\n \nelse:\n # Load preloaded explainer\n filename = './output/shap_values_extended_rf1.p'\n with open(filename, 'rb') as filehandler: \n shap_values_extended = pickle.load(filehandler)\n shap_values = shap_values_extended.values[:,:,1]", "100%|===================| 4997/5000 [01:39<00:00] " ], [ "# Get mean Shap values for each feature\nshap_values_mean = pd.DataFrame(index=features)\nshap_values_mean['mean_shap'] = np.mean(shap_values, axis=0)\nshap_values_mean['abs_mean_shap'] = np.abs(shap_values_mean)\nshap_values_mean['mean_abs_shap'] = np.mean(np.abs(shap_values), axis=0)\nshap_values_mean['rank'] = shap_values_mean['mean_abs_shap'].rank(\n ascending=False).values", "_____no_output_____" ] ], [ [ "### Compare top 10 weights and Shap values", "_____no_output_____" ] ], [ [ "top_10_importances = \\\n importances.sort_values('importance', ascending=False).head(10)\ntop_10_importances", "_____no_output_____" ] ], [ [ "When looking for the most influential Shap values we use the mean of the absolute Shap values for each feature.", "_____no_output_____" ] ], [ [ "top_10_shap = shap_values_mean.sort_values(\n 'mean_abs_shap', ascending=False).head(10)\ntop_10_shap", "_____no_output_____" ] ], [ [ "Get intersection between top 10 weights and top 10 Shap values.", "_____no_output_____" ] ], [ [ "intersection = list(top_10_importances.index.intersection(top_10_shap.index))\nprint(f'Number of intersection values = {len(intersection)}')\nprint('\\nIntersecting values:')\nintersection", "Number of intersection values = 8\n\nIntersecting values:\n" ] ], [ [ "### Plot average Shap values against average weights", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(6,6))\nax = fig.add_subplot(111)\nax.scatter(importances['importance'], shap_values_mean['mean_abs_shap'])\nax.grid()\nax.set_xlabel('Feature importance')\nax.set_ylabel('Feature shap')\nplt.show()", "_____no_output_____" ] ], [ [ "### Plot most influential features\n\nThis is a plot of the mean absolute Shap values.", "_____no_output_____" ] ], [ [ "shap.summary_plot(shap_values, X_test, plot_type='bar')", "_____no_output_____" ] ], [ [ "### Beeswarm plot\n\nA Beeswarm plot shows all points. The feature value for each point is shown by the colour, and its position indicates the Shap value for that instance.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(6,6))\n\nshap.summary_plot(shap_values=shap_values, \n features=X_test,\n feature_names=features, \n cmap=plt.get_cmap('nipy_spectral'),show=False)\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Plot Waterfall and decision plot plots for instances with low or high probability of receiving thrombolysis\n\nWaterfall plot and decision plots are alternative ways of plotting the influence of features for individual cases.", "_____no_output_____" ] ], [ [ "# Get the location of an example each where probability of giving thrombolysis\n# is <0.1 or >0.9\n\nlocation_low_probability = np.where(y_prob < 0.1)[0][0]\nlocation_high_probability = np.where(y_prob > 0.9)[0][0]", "_____no_output_____" ] ], [ [ "An example with low probability of receiving thrombolysis.", "_____no_output_____" ] ], [ [ "shap.plots.waterfall(shap_values_extended[location_low_probability][:,1], \n max_display=15)", "_____no_output_____" ], [ "instance = location_low_probability\n\nchosen_instance = X_test.iloc[instance]\nshap_values_instance = shap_values[instance]\nexpected_value = shap_values_extended.base_values[0][1]#explainer.expected_value\nprint(f\"The base value is {expected_value:0.2f}\")\n\nshap.decision_plot(expected_value, shap_values_instance, chosen_instance)", "The base value is 0.38\n" ] ], [ [ "An example with high probability of receiving thrombolysis.", "_____no_output_____" ] ], [ [ "shap.plots.waterfall(shap_values_extended[location_high_probability][:,1], \n max_display=15)", "_____no_output_____" ], [ "instance = location_high_probability\n\nchosen_instance = X_test.iloc[instance]\nshap_values_instance = shap_values[instance]\nexpected_value = shap_values_extended.base_values[0][1]#explainer.expected_value\nprint(f\"The base value is {expected_value:0.2f}\")\n\nshap.decision_plot(expected_value, shap_values_instance, chosen_instance)", "The base value is 0.38\n" ] ], [ [ "### Show the relationship between feature value and Shap value for top 5 influential features.", "_____no_output_____" ] ], [ [ "feat_to_show = top_10_shap.index[0:5]\n\nfor feat in feat_to_show:\n shap.plots.scatter(shap_values_extended[:, feat][:,1], x_jitter=0)", "_____no_output_____" ] ], [ [ "Examine `S2BrainImagingTime_min` in range 0-400 minutes.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(6,6))\nax = fig.add_subplot(111)\nshap.plots.scatter(shap_values_extended[:, 'S2BrainImagingTime_min'][:,1], \n x_jitter=0, ax=ax, show=False)\nax.set_xlim(0,400)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6981b6b274d0d377a6af4955bdd0e958f733b9
3,018
ipynb
Jupyter Notebook
notebooks/exploratory/307_afox_papermill.ipynb
alanfox/spg_fresh_blob_202104
e0221e31d690b08585c1d75b34398610f91fc2ba
[ "MIT" ]
null
null
null
notebooks/exploratory/307_afox_papermill.ipynb
alanfox/spg_fresh_blob_202104
e0221e31d690b08585c1d75b34398610f91fc2ba
[ "MIT" ]
null
null
null
notebooks/exploratory/307_afox_papermill.ipynb
alanfox/spg_fresh_blob_202104
e0221e31d690b08585c1d75b34398610f91fc2ba
[ "MIT" ]
null
null
null
26.707965
162
0.58383
[ [ [ "## Prepare papermill for schulung3.geomar.de\n\n Make sure you have activated the correct kernel\n Install kernel manually", "_____no_output_____" ] ], [ [ "!python -m ipykernel install --user --name parcels-container_2021.09.29-09ab0ce", "Installed kernelspec parcels-container_2021.09.29-09ab0ce in /home/jupyter-workshop007/.local/share/jupyter/kernels/parcels-container_2021.09.29-09ab0ce\n" ], [ "!jupyter kernelspec list", "Available kernels:\n parcels-container_2021.03.17-6c459b7 /home/jupyter-workshop007/.local/share/jupyter/kernels/parcels-container_2021.03.17-6c459b7\n parcels-container_2021.09.29-09ab0ce /home/jupyter-workshop007/.local/share/jupyter/kernels/parcels-container_2021.09.29-09ab0ce\n py3_lagrange_v2.2.2 /home/jupyter-workshop007/.local/share/jupyter/kernels/py3_lagrange_v2.2.2\n python3 /opt/tljh/user/envs/parcels-container_2021.09.29-09ab0ce/share/jupyter/kernels/python3\n" ] ], [ [ "### Run papermill on schulung3.geomar.de", "_____no_output_____" ] ], [ [ "%%bash\n\nfor year in {1990..2019};\ndo\npapermill 307_afox_trackendsandpaths_sumsandmeans_nonorth_final.ipynb \\\n ../executed/307_afox_fullstats/307_afox_trackendsandpaths_sumsandmeans_nonorth_final_${year}.ipynb \\\n -p year $year \\\n -p nsubsets 32 \\\n -k parcels-container_2021.09.29-09ab0ce\ndone\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6982bde0e39e63c92a8ddc72a6574e4041d805
6,678
ipynb
Jupyter Notebook
ONNX/sklearn-onnx.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
3
2020-04-19T01:07:41.000Z
2021-02-05T02:29:50.000Z
ONNX/sklearn-onnx.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
null
null
null
ONNX/sklearn-onnx.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
null
null
null
38.601156
1,145
0.615154
[ [ [ "### Abstract\n\nThis notebook demonstrates how to convert the models from sklearn to ONNX and do the inference from ONNX runtime.\n", "_____no_output_____" ], [ "#### References:\n\n- [sklearn-onnx](http://onnx.ai/sklearn-onnx/)\n\n- [github sklearn-onnx](https://github.com/onnx/sklearn-onnx)", "_____no_output_____" ], [ "### Train Model", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nclr = RandomForestClassifier()\nclr.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "### Convert Model to ONNX", "_____no_output_____" ] ], [ [ "# Convert into ONNX format with onnxmltools\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType\ninitial_type = [('float_input', FloatTensorType([1, 4]))]\nonx = convert_sklearn(clr, initial_types=initial_type)\nwith open(\"rf_iris.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())", "The maximum opset needed by this model is only 9.\nThe maximum opset needed by this model is only 1.\n" ], [ "!ls -hl", "total 12K\r\n-rw-r--r-- 1 lguo ama-unix 7.0K Jul 24 14:15 rf_iris.onnx\r\n-rw-r--r-- 1 lguo ama-unix 2.9K Jul 24 14:15 sklearn-onnx.ipynb\r\n" ] ], [ [ "### Inference with ONNX", "_____no_output_____" ] ], [ [ "# Compute the prediction with ONNX Runtime\nimport onnxruntime as rt\nimport numpy\nsess = rt.InferenceSession(\"rf_iris.onnx\")\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\npred_onx = sess.run([label_name], {input_name: X_test.astype(numpy.float32)})[0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a698e102fd40482e95a7097dbe64689a364c2c5
79,764
ipynb
Jupyter Notebook
examples/Notebooks/flopy3_mnw2package_example.ipynb
pjhaest/flopy
369893b6e58cf37bd09c95c6e7cb129c74359214
[ "BSD-3-Clause" ]
null
null
null
examples/Notebooks/flopy3_mnw2package_example.ipynb
pjhaest/flopy
369893b6e58cf37bd09c95c6e7cb129c74359214
[ "BSD-3-Clause" ]
null
null
null
examples/Notebooks/flopy3_mnw2package_example.ipynb
pjhaest/flopy
369893b6e58cf37bd09c95c6e7cb129c74359214
[ "BSD-3-Clause" ]
null
null
null
32.477199
605
0.371295
[ [ [ "# FloPy\n\n## MNW2 package example", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport sys\nimport os\nimport numpy as np\ntry:\n import pandas as pd\nexcept:\n pass\n\n# run installed version of flopy or add local path\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join('..', '..'))\n sys.path.append(fpth)\n import flopy\n\nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\ntry:\n print('pandas version: {}'.format(pd.__version__))\nexcept:\n pass\nprint('flopy version: {}'.format(flopy.__version__))", "3.6.4 | packaged by conda-forge | (default, Dec 23 2017, 16:54:01) \n[GCC 4.2.1 Compatible Apple LLVM 6.1.0 (clang-602.0.53)]\nnumpy version: 1.14.0\npandas version: 0.22.0\nflopy version: 3.2.9\n" ] ], [ [ "### Make an MNW2 package from scratch", "_____no_output_____" ] ], [ [ "m = flopy.modflow.Modflow('mnw2example', model_ws='data')\ndis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m)", "_____no_output_____" ] ], [ [ "### MNW2 information by node\n(this could be prepared externally from well reconds and read in from a csv or excel file)\n* this table has two multi-node wells, the first (well1) consisting of two nodes that are manually specified\n(where the variable **rw** is specified by node)\n* node that some variables that are constant for the whole well are also included (losstype, zpump, etc.)", "_____no_output_____" ] ], [ [ "node_data = pd.DataFrame([[1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1., 2., 5., 6.2],\n [1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2., 5., 6.2],\n [3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1., 2., 5., 4.1]], \n columns=['i', 'j', 'ztop', 'zbotm', 'wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', 'pumpcap', \n 'rw', 'rskin', 'kskin', 'zpump'])\nnode_data", "_____no_output_____" ] ], [ [ "#### convert the DataFrame to a rec array for compatibility with flopy", "_____no_output_____" ] ], [ [ "node_data = node_data.to_records()\nnode_data", "_____no_output_____" ] ], [ [ "### Stress period information\n(could also be developed externally)", "_____no_output_____" ] ], [ [ "stress_period_data = pd.DataFrame([[0, 'well1', 0],\n [1, 'well1', 100.0],\n [0, 'well2', 0],\n [1, 'well2', 1000.]], columns=['per', 'wellid', 'qdes'])\nstress_period_data", "_____no_output_____" ], [ "pers = stress_period_data.groupby('per')\nstress_period_data = {i: pers.get_group(i).to_records() for i in [0, 1]}\nstress_period_data", "_____no_output_____" ] ], [ [ "### Make ``ModflowMnw2`` package object\n* note that extraneous columns in node_data and stress_period_data are ignored\n* if itmp is positive, it must equal the number of active wells being specified in ``stress_period_data``, otherwise the package class will raise an error.", "_____no_output_____" ] ], [ [ "mnw2 = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n node_data=node_data, \n stress_period_data=stress_period_data, \n itmp=[2, 2, -1], # reuse second per pumping for last stress period\n )", "_____no_output_____" ], [ "# \"nodtot\" is computed automatically\nmnw2.nodtot", "_____no_output_____" ], [ "pd.DataFrame(mnw2.node_data)", "_____no_output_____" ], [ "pd.DataFrame(mnw2.stress_period_data[0])", "_____no_output_____" ], [ "pd.DataFrame(mnw2.stress_period_data[1])", "_____no_output_____" ], [ "tmp = flopy.modflow.ModflowMnw2(model=m,\n itmp=[1, 1, -1], # reuse second per pumping for last stress period\n )", "WARNING: unit 34 of package MNW2 already in use\n****Warning -- two packages of the same type: <class 'flopy.modflow.mfmnw2.ModflowMnw2'> <class 'flopy.modflow.mfmnw2.ModflowMnw2'>\nreplacing existing Package...\n" ] ], [ [ "### empty ``node_data`` and ``stress_period_data`` tables can also be generated by the package class, and then filled", "_____no_output_____" ] ], [ [ "node_data = tmp.get_empty_node_data(3)\nnode_data", "_____no_output_____" ] ], [ [ "### Mnw objects\nat the base of the flopy mnw2 module is the **Mnw** object class, which describes a single multi-node well.\nA list or dict of **Mnw** objects can be used to build a package (using the example above):\n```\nflopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n mnw=<dict or list of Mnw objects>,\n itmp=[1, 1, -1], # reuse second per pumping for last stress period\n )\n```\nor if node_data and stress_period_data are supplied, the **Mnw** objects are created on initialization of the ModflowMnw2 class instance, and assigned to the ```.mnw``` attribute, as items in a dictionary keyed by ```wellid```.", "_____no_output_____" ] ], [ [ "mnw2.mnw", "_____no_output_____" ], [ "mnw2.mnw['well1'].__dict__", "_____no_output_____" ] ], [ [ "Note that Mnw object attributes for variables that vary by node are lists (e.g. ``rw`` above)\n\n#### Each Mnw object has its own ``node_data`` and ``stress_period_data``", "_____no_output_____" ] ], [ [ "pd.DataFrame(mnw2.mnw['well1'].node_data)", "_____no_output_____" ] ], [ [ "#### Instead of a dict keyed by stress period, Mnw.stress_period_data is a recarray with pumping data listed by stress period for that well\n* note that data for period 2, where ``itmp`` < 1, is shown (was copied from s.p. 1 during construction of the **Mnw** object)", "_____no_output_____" ] ], [ [ "pd.DataFrame(mnw2.mnw['well2'].stress_period_data)", "_____no_output_____" ] ], [ [ "### Build the same package using only the ``Mnw`` objects", "_____no_output_____" ] ], [ [ "mnw2fromobj = flopy.modflow.ModflowMnw2(model=m, mnwmax=2,\n mnw=mnw2.mnw,\n itmp=[2, 2, -1], # reuse second per pumping for last stress period\n )", "WARNING: unit 34 of package MNW2 already in use\n****Warning -- two packages of the same type: <class 'flopy.modflow.mfmnw2.ModflowMnw2'> <class 'flopy.modflow.mfmnw2.ModflowMnw2'>\nreplacing existing Package...\n" ], [ "pd.DataFrame(mnw2fromobj.node_data)", "_____no_output_____" ], [ "pd.DataFrame(mnw2fromobj.stress_period_data[0])", "_____no_output_____" ], [ "pd.DataFrame(mnw2fromobj.stress_period_data[1])", "_____no_output_____" ] ], [ [ "### By default, the ``node_data`` and ``stress_period_data`` tables attached to the ``ModflowMnw2`` package class are definitive\n* on writing of the package output (``mnw2.write_file()``), the **Mnw** objects are regenerated from the tables. This setting is controlled by the default argument ``use_tables=True``. To write the package file using the **Mnw** objects (ignoring the tables), use ``mnw2.write_file(use_tables=False)``. ", "_____no_output_____" ] ], [ [ "per1 = flopy.modflow.ModflowMnw2.get_empty_stress_period_data(itmp=2)\nper1", "_____no_output_____" ] ], [ [ "### Write an MNW2 package file and inspect the results", "_____no_output_____" ] ], [ [ "mnw2.write_file(os.path.join('data/test.mnw2'))", "_____no_output_____" ], [ "junk = [print(l.strip('\\n')) for l in open('data/test.mnw2').readlines()]", "# MNW2 package for MODFLOW-2005, generated by Flopy.\n2 0 0\nwell1 -2\n skin -1 0 0 0\n -1.0000000E+00 2.0000000E+00 5.0000000E+00\n 7.0999999E+00 5.0999999E+00 2 2 5.0000000E-01\n 9.5000000E+00 7.0999999E+00 2 2 1.0000000E+00\n 6.1999998E+00\nwell2 -1\n skin -1 0 0 0\n 1.0000000E+00 2.0000000E+00 5.0000000E+00\n 9.1000004E+00 3.7000000E+00 4 4\n 4.0999999E+00\n2 Stress Period 1\nwell1 0.0000000E+00\nwell2 0.0000000E+00\n2 Stress Period 2\nwell1 1.0000000E+02\nwell2 1.0000000E+03\n-1 Stress Period 3\n" ] ], [ [ "### Load some example MNW2 packages", "_____no_output_____" ] ], [ [ "path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')\ncpth = os.path.join('..', '..', 'autotest', 'data')\nm = flopy.modflow.Modflow('MNW2-Fig28', model_ws=cpth)\ndis = flopy.modflow.ModflowDis.load(os.path.join(path, 'MNW2-Fig28.dis'), m)", "_____no_output_____" ], [ "m.get_package_list()", "_____no_output_____" ], [ "mnw2pth = os.path.join(path, 'MNW2-Fig28.mnw2')\nmnw2 = flopy.modflow.ModflowMnw2.load(mnw2pth, m)", "_____no_output_____" ], [ "pd.DataFrame(mnw2.node_data)", "_____no_output_____" ], [ "pd.DataFrame(mnw2.stress_period_data[0])", "_____no_output_____" ], [ "mnw2.mnw", "_____no_output_____" ], [ "pd.DataFrame(mnw2.mnw['Well-A'].stress_period_data)", "_____no_output_____" ], [ "path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples')\ncpth = os.path.join('data')\nm = flopy.modflow.Modflow('br', model_ws=cpth)\nmnw2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m)", "_____no_output_____" ], [ "df = pd.DataFrame(mnw2.node_data)\ndf.loc[:, df.sum(axis=0) != 0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a69c0578997dce13bb0fe589f3c62e39469edba
145,786
ipynb
Jupyter Notebook
notebooks/dl-chollet/scripts/Dogs and Cats Visualising Heatmaps.ipynb
yantraguru/deeplearning
f15fbf831cf179d3531d11784ab0cf4dc390f4a9
[ "Apache-2.0" ]
1
2019-04-27T11:32:40.000Z
2019-04-27T11:32:40.000Z
notebooks/dl-chollet/scripts/Dogs and Cats Visualising Heatmaps.ipynb
yantraguru/deeplearning
f15fbf831cf179d3531d11784ab0cf4dc390f4a9
[ "Apache-2.0" ]
null
null
null
notebooks/dl-chollet/scripts/Dogs and Cats Visualising Heatmaps.ipynb
yantraguru/deeplearning
f15fbf831cf179d3531d11784ab0cf4dc390f4a9
[ "Apache-2.0" ]
null
null
null
537.95572
133,480
0.946929
[ [ [ "from IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:85% !important; }</style>\"))\n\n%reload_ext autoreload", "_____no_output_____" ], [ "import os\nimport numpy as np\n\nimport cv2\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.client import device_lib\n\nfrom tensorflow.keras import layers,models,utils\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras import backend as K\n\nfrom tensorflow.keras.applications import VGG16\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\nfrom tensorflow.keras.applications.vgg16 import decode_predictions\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "model = VGG16(weights='imagenet')", "WARNING:tensorflow:From /home/algolaptop8/anaconda3/envs/dl-env/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nDownloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5\n553467904/553467096 [==============================] - 520s 1us/step\n" ], [ "img_path = './../data/misc/elephant.jpeg'\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)", "_____no_output_____" ], [ "preds = model.predict(x)\nprint('Predicted:', decode_predictions(preds, top=3)[0])", "Downloading data from https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json\n40960/35363 [==================================] - 0s 10us/step\nPredicted: [('n02504458', 'African_elephant', 0.8474201), ('n01871265', 'tusker', 0.117097124), ('n02408429', 'water_buffalo', 0.023289043)]\n" ], [ "np.argmax(preds[0])", "_____no_output_____" ], [ "african_elephant_output = model.output[:, 386]\nlast_conv_layer = model.get_layer('block5_conv3')\ngrads = K.gradients(african_elephant_output, last_conv_layer.output)[0]\npooled_grads = K.mean(grads, axis=(0, 1, 2))\niterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])\npooled_grads_value, conv_layer_output_value = iterate([x])\nfor i in range(512):\n conv_layer_output_value[:, :, i] *= pooled_grads_value[i]\n \nheatmap = np.mean(conv_layer_output_value, axis=-1)", "_____no_output_____" ], [ "heatmap = np.maximum(heatmap, 0)\nheatmap /= np.max(heatmap)\nplt.matshow(heatmap)", "_____no_output_____" ], [ "img = cv2.imread(img_path)\nheatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))\nheatmap = np.uint8(255 * heatmap)\nheatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\nsuperimposed_img = heatmap * 0.4 + img\ncv2.imwrite('./../data/misc/elephant_cam.jpg', superimposed_img)", "_____no_output_____" ], [ "plt.imshow(superimposed_img.astype('int'))", "Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a69c44a69b94cfaee18ea5ffaa4578075d11013
20,807
ipynb
Jupyter Notebook
_notebooks/2022-03-23-Decision-Trees.ipynb
geon-youn/DunGeon
70792a1042630fbf114fe43263b851d0b57d5b18
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-03-23-Decision-Trees.ipynb
geon-youn/DunGeon
70792a1042630fbf114fe43263b851d0b57d5b18
[ "Apache-2.0" ]
null
null
null
_notebooks/2022-03-23-Decision-Trees.ipynb
geon-youn/DunGeon
70792a1042630fbf114fe43263b851d0b57d5b18
[ "Apache-2.0" ]
null
null
null
56.849727
725
0.696689
[ [ [ "# \"You get a decision tree! And YOU get a decision tree!\"\n> \"Oprah was so close to discovering random forests.\"\n\n- comments: true\n- categories: [tabular]", "_____no_output_____" ], [ "Our first method for training structured tabular data is to use ensembles of decision trees. \n\n--- \n**Decision trees**: a decision tree asks a series of yes/no questions about the data. After each question, the data at that part splits between yes/no. After one or more questions, predictions can be formed by finding the group the data is part of at the bottom of the tree and returning the average value of the targets in that group. \n\n---\n\nTo train a decision tree, we follow a greedy approach with six steps:\n\n1. Loop through each column of the data set.\n2. For each column, loop through each possible **level** of that column.\n\n--- \n**Level**: for most continuous and some categorical variables, when we say levels, we're referring to variables that can be ordered. For example, sizes like \"Small\" < \"Medium\" < \"Large\". For other categorical variables, we refer to the actual values.\n\n---\n\n3. Try splitting the data into two groups, based on whether they're greater than or less than that value (or equal to or not equal to for other categorical variables). \n4. Find the average prediction for each of those two groups and use your metric to see how close that is to the actual value of each of the items in that group. \n5. After looping through all the possible columns and levels for each column, pick the split point that gave the best prediction. \n6. Now, we have two groups for our data set. Treat each of them as new data sets and repeat from step 1 until each group reaches your minimum size threshold. \n\nWith decision trees, you have to be careful with how many leaf nodes you end up with. If you have too many (close to the number of data entries), then your model will overfit. ", "_____no_output_____" ], [ "## Overfitting? No problem.", "_____no_output_____" ], [ "One year before his retirement, Leo Breiman published a paper on \"bagging\". Instead of training on the entire training set (or mini-batches), you \n1. randomly choose a subset of the rows of your data, \n2. train a model using this subset, \n3. save the model, and \n4. train more models on different subsets of the data. \n\nEventually, you end up with a number of models. To make a prediction, you predict using all of the models and take the average. \n\nEach of the models have errors since they're not trained on the full training set, but since different models have different errors (and these errors aren't correlated with each other; i.e., they're independent) the errors end up cancelling out when we take the average. \n\nSeven years later, Breiman also coined \"random forests\" where you apply bagging to decision trees not only by randomly choosing a subset of the *rows* of your data, but you also randomly choosing a subset of the *columns when choosing a split* in each decision tree. \n\n---\n**Random forests**: a specific type of an *ensemble of decision trees*, where bagging is used to combine the results of several decision trees that were trained on random subsets of the rows of the data where each split made on a random subset of the columns of the data. \n\n--- \n\nSince the errors tend to cancel out, it also means the trees are less susceptible to hyperparameter changes. We can also have as many trees as we want; in fact, the error rate usually decreases as we add more trees. ", "_____no_output_____" ], [ "## Interpreting the model", "_____no_output_____" ], [ "Once we trained our model, if the error rate for the validation set is higher than the training set, we want to make sure it's from generalization (or extrapolation) problems and not overfitting.\n\n**Out-of-bag error** allows us to check if we're overfitting without the need of a validation set. Since each tree in a random forest is trained on a subset of the data, we can form a validation set for each tree as the rows not included in training for that tree. \n\nWhat makes out-of-bag error different from validation set error is that the data in the former is within the range of the training set, while the validation set is usually outside of the range; this range is most important for time series data since the validation set should contain data that's in the future compared to the training set. \n\nSo, if our out-of-bag error is lower than the validation set error, then the model is not overfitting and is instead having other problems.\n\nIn general, we want to interpret in our model:\n- how confident are we in our predictions for a particular row of data?\n- for making our predictions on a specific row of data, what were the most important columns, and how did they influence the prediction?\n- which columns are the most important; and which columns can we ignore (remove them from training)?\n- which columns are *effectively redundant* in terms of prediction?\n- how do predictions vary as we vary the columns (as in, what kind of relationship do the columns have with the predictions)? \n", "_____no_output_____" ], [ "## Confidence for a prediction on a particular row of data", "_____no_output_____" ], [ "When we want to predict for a particular row of data, we pass the data to each tree in our random forest and take the average of the results. To find the *relative* confidence of the prediction, we can take the standard deviation of the predictions instead of the average. So, if the standard deviation is high, we should be more wary of the prediction since the trees disagree more than if the standard deviation was low. ", "_____no_output_____" ], [ "## Feature importance", "_____no_output_____" ], [ "It's important to understand *how* our models are making predictions, not just how accuracte the predictions are. \n\nTo find the importance of each column (feature) in our data, we can loop through each tree and recursively explore each branch. At each branch, look at what column was used for that split and how much the model improved at that split. The improvement, which is weighted by the number of rows in that group is added to the importance score for that column. The importance score is summed across all branches of all trees. Then, you can normalize the scores (so that they sum to 1) and sort them in ascending order to see the least important columns, and by descending order to see the most important columns.\n\nThe \"how\" is mostly used in production (and not in model training) to see how the data is leading to predictions. To find how each column influenced the prediction, we take a single row of the data and pass it through each of the decision trees in our random forest. At each split point, record how the prediction changes (increases or decreases) compared to the parent node of the tree and add it to the column's score. Then, combine the score for each of the columns and you can see how each column increased or decreased the prediction relative to the parent node of the random forest (which is the average of the average of the target in each row in the batch of rows in the batch of trees in the random forest). ", "_____no_output_____" ], [ "## Ignoring features", "_____no_output_____" ], [ "Once you found the importance of each column, you can set a threshold such that you ignore features whose importance scores were lower than that threshold (this is why we normalized the scores). \n\nTry retraining your model with those columns ignored and you can decide to keep the change (if the accuracy hasn't changed much) or change your threshold (if the accuracy decreased significantly). In any case, it's nicer to train your model with less unimportant columns since you'll be able to train future models on the same data set faster. ", "_____no_output_____" ], [ "## Redundant features", "_____no_output_____" ], [ "To find redundant columns, you want to find how similar each column is to another. To do so, you calculate the *rank correlation*, where all the values in each column are replaced with their *rank* relative to other values in the same column (think of it like descending `argsort`, where you give each row in a specific column the index it would have for the column to be sorted in descending order).Then, the *correlation* is calculated (kind of like the correlation coefficient $r$, but with rank). Columns with similar rank correlations may be synonyms for each other and one (or more) of them could be removed. \n\nWhen removing redundant columns, retrain the model where you remove only one redundant column at a time. Then, try removing them in groups and eventually altogether. The point of this tedious task is to make sure we're not significantly reducing the accuracy of the model. And, some columns, although they seem redundant, may not be redundant and would be important to keep in the model. \n\nAlthough not necessary, you should remove unimportant and redundant columns when possible since it'll simplify your model.", "_____no_output_____" ], [ "## Relationship between columns and predictions", "_____no_output_____" ], [ "To find the relationship between a column and prediction, you could guess that we should have a row where we keep all columns constant except for the column in question.\n\nBut, we can't just take the average of the predictions for a specific level of a column since other variables can change. Instead, we replace every single value in the column with a specific level in the validation set, and record the prediction with the new validation set as the input. Then, we do the same for every other level of that column. \n\nWith these predictions, we can form a line graph with the levels as the x-axis and the predictions as the y-axis. We call this graph a **partial dependence plot**. \n\nSometimes, you trained your model and \n- your accuraccy is too good to be true, \n- some features don't make sense to be predictors, or \n- *the partial dependence plots looks weird*. \n\nIf so, your data might have **data leakage** where the training set contains information that wouldn't be available in the data you give at inference (i.e., when using the model in practice and/or your validation set). \n\nData leakage are subtleties that give away the correct answer. For example, if you trained a model to predict the weather and the precipitation was in an available column (and/or it was only filled out on rainy days), you bet your model would predict it was \"raining\" on \"rainy days\" if there was any precipitation and \"sunny\" on \"sunny days\" otherwise. So, when you interpret the model later, you might see really high accuracy, with precipitation being a high predictor. \n\nIn preventing data leakage, train your model first and then look for data leakage (and then clean or reprocess your data); this process is the same with how you would train your model first before performing data cleaning. ", "_____no_output_____" ], [ "## We can't always use random forests", "_____no_output_____" ], [ "With time series data, you usually want to have a model that can generalize to new data and extrapolate accurately. The downside of random forests is that it can only predict within the range of its training data. So, if the value in the validation set is outside of the range of the training set, the accuracy of the random forest will always be low since it can't predict values that high. \n\nWhy might this be the case? A random forest returns a prediction based on the average of the predictions of its decision trees, where each tree predicts the average of the targets in the rows in a leaf node. So, a random forest can never predict a value that's outside of the range of the training set. \n\nIn a general sense, a random forest can't generalize to **out-of-domain data**, so we need to make sure our validation, test, and future data sets contain the same kind of data as our training set. \n\nTo test if there's out-of-(the training set's)-domain data, we can build a random forest that predicts which row is in the validation or training set. To do so, you can concatenate the validation and training set and label the rows by validation or training. Then, through feature importance, if there's a particular column that is more prominent in the validation set, there will be a nonuniform distribution of importance scores. \n\nSometimes, you can remove the columns with high feature importance and improve the accuracy of the model since those columns might be related to another column (hence removing redundant columns). \n\nRemoving those columns can also make your model more resilient over time since those columns may be affected by **domain shift** where the data put into the model is significantly different from the training data. ", "_____no_output_____" ], [ "## Boosting instead of bagging", "_____no_output_____" ], [ "Instead of random forests, which forms an ensemble of decision trees through *bagging*, we can also make **gradient boosted machines** which uses *boosting* instead of bagging. \n\nBagging takes the average of the predictions from each decision tree. Boosting, on the other hand, *adds* the predictions of each decision tree. So, you also train your decision trees differently:\n- train a decision tree that *underfits* the targets of your training set,\n- calculate residuals by subtracting the predictions from the targets,\n- repeat from the beginning, but train your future models with the residuals as the targets, and \n- continue training more trees until you reach a certain maximum or your validation metric gets worse. \n\nWith boosting, we try to minimize the error by having the residuals become as small as possible by underfitting them.\n\nUnlike random forests, the trees aren't independent of each other so the more trees we train, the more the overall model will overfit the training set. ", "_____no_output_____" ], [ "## Free accuracy boost", "_____no_output_____" ], [ "In training a model for tabular data, you can get a boost in accuracy by training a random forest model, doing some analysis like feature importance and partial dependence plots to remove redundant columns, and then training a neural network that uses *embeddings for the categorical variables/columns*. \n\nThen, we *retrain* our random forest model, but instead of creating *levels* for the categorical variables, we use the *embeddings trained by the neural network*. So, instead of using a neural network at inference, you can use an improved random forest model. \n\nThe same can be done for gradient boosted machines, and any model that uses categorical variables. Just use the embeddings trained by the neural network. ", "_____no_output_____" ], [ "## Conclusion", "_____no_output_____" ], [ "We covered a machine learning technique called ensembles of decision trees. Here, we mentioned two methods of ensembling: bagging and boosting. \n\nWith bagging, you form a *random forest* that's quick and easy to train. Random forests are also resistant to hyperparameter changes and since the trees are independent, it's very difficult to overfit as you increase the number of trees.\n\nWith boosting, you form a *gradient boosted machine* (or *gradient boosted decision tree*) that are just as fast to train as random forests in theory, but require more hyperparameter tuning and are susceptible to overfitting with the more trees you train since the trees aren't independent of each other. However, gradient boosted machines tend to have higher accuracy than random forests.\n\nOverall, because of the limitations of decision trees, both random forests and gradient boosted machines can't extrapolate to out-of-domain data. Therefore, you sometimes have to make a *neural network*. \n\nNeural networks take the longest to train and require more preprocessing like batch normalization (which also needs to be done at inference). With neural networks, you have to be careful with your hyperparameters since they can lead to overfitting. However, neural networks are great at extrapolating and can have the highest accuracy of the three models. \n\nWith neural networks, you can also use ensembles of decision trees to do some of the preprocessing to make them faster to train. And, once you train a neural network, you can use the embeddings trained by the neural networks as the inputs for the categorical variables in another ensemble of decision trees on the same data set. Doing so tends to produce much higher accuracy. \n\nIf the task doesn't require extrpolation (all future predictions are expected to be in the same range as the training set), then you can use the improved ensemble of decision trees since they will be faster at inference compared to neural networks. \n\nMoreover, if the response time at inference isn't a major problem, you can even form an ensemble of neural networks and an ensemble of decision trees where you take the average of the predictions of each of the models. Taking the theory behind random forests, since the two (or more) models were trained by two (or more) very different algorithms, the errors each make are independent of each other and will cancel each other out, leading to higher accuracy with less chances of overfitting. Still, it won't make a bad model a good model. ", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a69c640cf194280aff70bb6e434589c5d315d18
639,776
ipynb
Jupyter Notebook
Stolen_Bronzes.ipynb
shawngraham/tdm-notebooks
45ef951170da869e1e11786fa1bb368b55b1405e
[ "CC-BY-4.0" ]
1
2020-10-21T08:38:36.000Z
2020-10-21T08:38:36.000Z
Stolen_Bronzes.ipynb
shawngraham/tdm-notebooks
45ef951170da869e1e11786fa1bb368b55b1405e
[ "CC-BY-4.0" ]
null
null
null
Stolen_Bronzes.ipynb
shawngraham/tdm-notebooks
45ef951170da869e1e11786fa1bb368b55b1405e
[ "CC-BY-4.0" ]
1
2021-03-17T20:57:39.000Z
2021-03-17T20:57:39.000Z
71.236611
83,234
0.460089
[ [ [ "<a href=\"https://colab.research.google.com/github/kuriousk516/HIST4916a-Stolen_Bronzes/blob/main/Stolen_Bronzes.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Stolen Bronzes: Western Museums and Repatriation\n\n## Introduction\n\n>\"*Walk into any European museum today and you will see the curated spoils of Empire. They sit behind plate glass: dignified, tastefully lit. Accompanying pieces of card offer a name, date and place of origin. They do not mention that the objects are all stolen*.\"\n>\n> 'Radicals in Conversation': The Brutish Museums\n\nPublic history and digital humanities offers a locus point of contending with difficult pasts. Museums, often considered bastions of knowledge, learning, and public good have fallen under an increasingly critical gaze -- and rightfully so. Public museums have been tools of colonialism, racism, and superiority centred around the supremacy of the west and its history. \n\nDigital repositories of museum archives and websites can be used to subvert the exclusionary practices employed by museums and provide tools for marginalized peoples --. The purpose of this notebook is to act as a digital tool for real life change, and it is focused on Dan Hick's [Tweet](https://twitter.com/profdanhicks/status/1375421209265983488) and book, *The Brutish Museum*.", "_____no_output_____" ] ], [ [ "%%html\n<iframe src=\"https://drive.google.com/file/d/1txSH3UkjJgLTeQW47MGLfrht7AHCEkGC/preview\" width=\"640\" height=\"480\"></iframe>", "_____no_output_____" ] ], [ [ "What I read in Dan Hicks' Tweet was a call to action. Not necessarily for the average citizen to take the bronzes back, but to start an important discussion about the nature of artifact aqcuisition and confronting how museums procure these items in the first place.\n\nThe appendix' list is a small fraction of the stolen artifacts found in hundreds of museums all over the world but it is a powerful point of focus. I want to create something, however small, that can give others the tools to have a visual representation of stolen artifacts distribution and interrogate why (mostly) western museums are the institutions holding these artifacts, what effect this has, and what's being done with them. Can anyone own art? Who has the power to decide? How do we give that power back to those who were stolen from?\n\nTo learn more about the Benin bronzes and their history, a good place to start is with the ['Radicals in Conversation'](https://www.plutobooks.com/blog/podcast-brutish-museums-benin-bronzes-decolonisation/) podcast.\n\nAnd now, what I have here is a helpful tool for all of us to answer, **\"*How close are you right this second to a looted Benin Bronze*?\"**\n\n# Data\n\nI have compiled a dataframe of all the museums listed in Hicks' appendix'; you can see the original above in his Tweet. The data is in a .CSV file stored in my [GitHub repository](https://github.com/kuriousk516/HIST4916a-Stolen_Bronzes), and you can also find screenshots of the errors I encountered and advice I recieved through the HIST4916a Discord server, some of which I will reference here when discussing data limitations. \n\n## Mapping with Folium\n\nFolium seemed the best choice for this project since it doesn't rely on Google Maps for the map itself or the data entry process. [This is the tutorial](https://craftingdh.netlify.app/tutorials/folium/) that I used for the majority of the data coding, and this is the [Point Map alternative](https://handsondataviz.org/mymaps.html) I considered but decided against. ", "_____no_output_____" ] ], [ [ "import lxml", "_____no_output_____" ], [ "import pandas as pd\npd.set_option(\"max_rows\", 400)\npd.set_option(\"max_colwidth\", 400)", "_____no_output_____" ], [ "import pandas, os", "_____no_output_____" ], [ "os.listdir()\n['.config', 'benin_bronze_locations2.csv', 'sample_data']", "_____no_output_____" ] ], [ [ "Here is where I ran into some trouble. I was having great difficulty in loading my .CSV file into the notebook, so I uploaded the file from my computer. Here is the alternative code to upload it using the RAW link from GitHub: \n\nurl = 'copied_raw_GH_link'\n\ndf1 = pd.read_csv(url)\n\nIf you have another (simpler) way of getting the job done, I fully encourage you altering the code to make it happen. ", "_____no_output_____" ] ], [ [ "from google.colab import files\nuploaded = files.upload()", "_____no_output_____" ] ], [ [ "In the .CSV file, I only had the name of the museums, cities, and countries. Manually inputting the necessary data for plotting the locations would be time-consuming and tedious, but I have an example using geopy and Nomatim to pull individual location info for the cases when \"NaN\" pops up when expanding the entire dataframe.", "_____no_output_____" ] ], [ [ "df1=pandas.read_csv('benin_bronze_locations2.csv', encoding = \"ISO-8859-1\", engine ='python')\ndf1", "_____no_output_____" ], [ "!pip install geopy", "Collecting geopy\n Using cached https://files.pythonhosted.org/packages/0c/67/915668d0e286caa21a1da82a85ffe3d20528ec7212777b43ccd027d94023/geopy-2.1.0-py3-none-any.whl\nCollecting geographiclib<2,>=1.49 (from geopy)\n Using cached https://files.pythonhosted.org/packages/8b/62/26ec95a98ba64299163199e95ad1b0e34ad3f4e176e221c40245f211e425/geographiclib-1.50-py3-none-any.whl\nInstalling collected packages: geographiclib, geopy\nSuccessfully installed geographiclib-1.50 geopy-2.1.0\n" ], [ "from geopy.geocoders import Nominatim", "_____no_output_____" ], [ "geolocator = Nominatim(user_agent=\"BENIN-BRONZES\", timeout=2)", "_____no_output_____" ], [ "location = geolocator.geocode(\"Ulster Museum United Kingdom\")", "_____no_output_____" ], [ "location", "_____no_output_____" ] ], [ [ "Great! Now we have the means of finding the relevant map information for individual entires. But to process the large amount of data, I followed [this YouTube tutorial](https://www.youtube.com/watch?v=0IjdfgmWzMk) for some extra help.", "_____no_output_____" ] ], [ [ "def find_location(row):\n \n place = row['place']\n \n location = geolocator.geocode(place)\n \n if location != None:\n return location.address, location.latitude, location.longitude, location.raw['importance']\n else:\n return \"Not Found\", \"Not Found\", \"Not Found\", \"Not Found\"", "_____no_output_____" ] ], [ [ "To expand on my data, I needed to add a new column to my dataframe -- the addresses of the museums. \n\n", "_____no_output_____" ] ], [ [ "df1[\"Address\"]=df1[\"Place\"]+\", \"+df1[\"City\"]+\", \"+df1[\"Country\"]", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "#Then I added this string to the geocode to create a coordinates column.\n\ndf1[\"Coordinates\"]=df1[\"Address\"].apply(geolocator.geocode)", "_____no_output_____" ], [ "df1", "_____no_output_____" ] ], [ [ "After compiling the addresses and coordinates, the dataframe needed the latitude and longitudes for Folium to plot the locations on the map. ", "_____no_output_____" ] ], [ [ "df1[\"Latitude\"]=df1[\"Coordinates\"].apply(lambda x: x.latitude if x !=None else None)", "_____no_output_____" ], [ "df1[\"Longitude\"]=df1[\"Coordinates\"].apply(lambda x: x.longitude if x !=None else None)", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "!pip install folium", "Collecting folium\n Using cached https://files.pythonhosted.org/packages/c3/83/e8cb37afc2f016a1cf4caab8d22caf7fe4156c4c15230d8abc9c83547e0c/folium-0.12.1-py2.py3-none-any.whl\nRequirement already satisfied: numpy in /opt/anaconda3/lib/python3.7/site-packages (from folium) (1.17.2)\nCollecting branca>=0.3.0 (from folium)\n Using cached https://files.pythonhosted.org/packages/61/1f/570b0615c452265d57e4114e633231d6cd9b9d275256778a675681e4f711/branca-0.4.2-py3-none-any.whl\nRequirement already satisfied: jinja2>=2.9 in /opt/anaconda3/lib/python3.7/site-packages (from folium) (2.10.3)\nRequirement already satisfied: requests in /opt/anaconda3/lib/python3.7/site-packages (from folium) (2.22.0)\nRequirement already satisfied: MarkupSafe>=0.23 in /opt/anaconda3/lib/python3.7/site-packages (from jinja2>=2.9->folium) (1.1.1)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/anaconda3/lib/python3.7/site-packages (from requests->folium) (1.24.2)\nRequirement already satisfied: idna<2.9,>=2.5 in /opt/anaconda3/lib/python3.7/site-packages (from requests->folium) (2.8)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/anaconda3/lib/python3.7/site-packages (from requests->folium) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/lib/python3.7/site-packages (from requests->folium) (2019.9.11)\nInstalling collected packages: branca, folium\nSuccessfully installed branca-0.4.2 folium-0.12.1\n" ], [ "import folium", "_____no_output_____" ], [ "beninbronze_map = folium.Map(location=[6.3350, 5.6037], zoom_start=7)\nbeninbronze_map", "_____no_output_____" ] ], [ [ "I want Benin City to be the centre of this map, a rough point of origin. The Kingdom of Benin existed in modern day Nigeria, and it's where the looted bronzes belong. Only *nine* locations in Nigeria have collections of the bronzes, as opposed to the 152 others all over Europe, America, Canada, Russia, and Japan. Nigeria needs to be the centre of the conversation of the looted bronzes and repatriation, and so it is the centre of the map being created. ", "_____no_output_____" ] ], [ [ "def create_map_markers(row, beninbronze_map):\n folium.Marker(location=[row['lat'], row['lon']], popup=row['place']).add_to(beninbronze_map)", "_____no_output_____" ], [ "folium.Marker(location=[6.3350, 5.6037], popup=\"Send the bronzes home\").add_to(beninbronze_map)", "_____no_output_____" ], [ "beninbronze_map", "_____no_output_____" ], [ "def create_map_markers(row, beninbronze_map):\n folium.Marker(location=[row['Latitude'], row['Longitude']], popup=row['Place']).add_to(beninbronze_map)", "_____no_output_____" ] ], [ [ "Many of the data entries came up as \"NaN\" when the code was trying to find their latitude and longitude. It's an invalid entry and needs to be dropped in order for the map markers to function. This is very important to note: out of the 156 data entries, only 86 were plotted on the map. The missing coordinates need to be added to the dataframe, but that's a bit beyond the scope of this project. I invite anyone with the time to complete the map markers using the code examples above. ", "_____no_output_____" ] ], [ [ "df1.dropna(subset = [\"Latitude\"], inplace=True)", "_____no_output_____" ], [ "df1.dropna(subset = [\"Longitude\"], inplace=True)", "_____no_output_____" ], [ "nan_value = float(\"NaN\")\ndf1.replace(\"\",nan_value, inplace=True)\ndf1.dropna(subset = [\"Latitude\"], inplace=True)\ndf1.dropna(subset = [\"Longitude\"], inplace=True)", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df1.apply(lambda row:folium.CircleMarker(location=[row[\"Latitude\"], \n row[\"Longitude\"]]).add_to(beninbronze_map),\n axis=1)\nbeninbronze_map", "_____no_output_____" ], [ "beninbronze_map.save(\"stolen-bronzes-map.html\")", "_____no_output_____" ] ], [ [ "# Conclusion\n\nNow we have a map showing (some of) the locations of the looted Benin bronzes. It needs to be expanded to include the other locations, but I hope it helped you to think about what Dan Hicks' asked: how close are you, right this minute, to a looted Benin bronze? \n\n# Recommended Reading and Points of Reference\n\nAbt, Jeffrey. “The Origins of the Public Museum.” In A Companion to Museum Studies, 115–134. Malden, MA, USA: Blackwell Publishing Ltd, 2006.\n\nBennett, Tony. 1990. “The Political Rationality of the Museum,” Continuum: The Australian Journal of Media and Culture 2, no. 1 (1990).\n\nBivens, Joy, and Ben Garcia, Porchia Moore, nikhil trivedi, Aletheia Wittman. 2019. ‘Collections: How We Hold the Stuff We Hold in Trust’ in MASSAction, Museums As Site for Social Action, toolkit, https://static1.squarespace.com/static/58fa685dff7c50f78be5f2b2/t/59dcdd27e5dd5b5a1b51d9d8/1507646780650/TOOLKIT_10_2017.pdf\n\nDW.com. \"'A matter of fairness': New debate about Benin Bronzes in Germany.\" Published March 26, 2021. https://www.dw.com/en/a-matter-of-fairness-new-debate-about-benin-bronzes-in-germany/a-57013604\n\nHudson, David J. 2016. “On Dark Continents and Digital Divides: Information Inequality and the Reproduction of Racial Otherness in Library and Information Studies” https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9862.\n\nKreps, Christina. 2008. ‘Non-western Models of Museums and Curation in Cross-cultural Perspective’in Sharon Macdonald, ed. ‘Companion to Museum Studies’.\n\nMacDonald, Sharon. 2008. “Collecting Practices” in Sharon Macdonald, ed. ‘Companion to Museum Studies’.\n\nSentance, Nathan mudyi. 2018. “Why Do We Collect,” Archival Decolonist blog, August 18, 2018, https://archivaldecolonist.com/2018/08/18/why-do-we-collect/\n\nhttps://www.danhicks.uk/brutishmuseums\n\nhttps://www.plutobooks.com/blog/podcast-brutish-museums-benin-bronzes-decolonisation/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a69c7e4414c8a38afc1385d4b3cc55202233f55
45,102
ipynb
Jupyter Notebook
docs/tutorials/intro_to_openfermion.ipynb
JosephBless/OpenFermion
5b43a28ee1095eb032cc4c0efd2af1e8206110e0
[ "Apache-2.0" ]
1
2021-12-18T00:12:16.000Z
2021-12-18T00:12:16.000Z
docs/tutorials/intro_to_openfermion.ipynb
JosephBless/OpenFermion
5b43a28ee1095eb032cc4c0efd2af1e8206110e0
[ "Apache-2.0" ]
null
null
null
docs/tutorials/intro_to_openfermion.ipynb
JosephBless/OpenFermion
5b43a28ee1095eb032cc4c0efd2af1e8206110e0
[ "Apache-2.0" ]
null
null
null
49.891593
1,169
0.616425
[ [ [ "##### Copyright 2020 The OpenFermion Developers", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "# Introduction to OpenFermion", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://quantumai.google/openfermion/tutorials/intro_to_openfermion\"><img src=\"https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png\" />View on QuantumAI</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/github_logo_1x.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/OpenFermion/docs/tutorials/intro_to_openfermion.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/download_icon_1x.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: The examples below must be run sequentially within a section.", "_____no_output_____" ], [ "## Setup\n\nInstall the OpenFermion package:", "_____no_output_____" ] ], [ [ "try:\n import openfermion\nexcept ImportError:\n !pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion", "_____no_output_____" ] ], [ [ "## Initializing the FermionOperator data structure\n\nFermionic systems are often treated in second quantization where arbitrary operators can be expressed using the fermionic creation and annihilation operators, $a^\\dagger_k$ and $a_k$. The fermionic ladder operators play a similar role to their qubit ladder operator counterparts, $\\sigma^+_k$ and $\\sigma^-_k$ but are distinguished by the canonical fermionic anticommutation relations, $\\{a^\\dagger_i, a^\\dagger_j\\} = \\{a_i, a_j\\} = 0$ and $\\{a_i, a_j^\\dagger\\} = \\delta_{ij}$. Any weighted sums of products of these operators are represented with the FermionOperator data structure in OpenFermion. The following are examples of valid FermionOperators:\n\n$$\n\\begin{align}\n& a_1 \\nonumber \\\\\n& 1.7 a^\\dagger_3 \\nonumber \\\\\n&-1.7 \\, a^\\dagger_3 a_1 \\nonumber \\\\\n&(1 + 2i) \\, a^\\dagger_4 a^\\dagger_3 a_9 a_1 \\nonumber \\\\\n&(1 + 2i) \\, a^\\dagger_4 a^\\dagger_3 a_9 a_1 - 1.7 \\, a^\\dagger_3 a_1 \\nonumber\n\\end{align}\n$$\n\nThe FermionOperator class is contained in $\\textrm{ops/_fermion_operator.py}$. In order to support fast addition of FermionOperator instances, the class is implemented as hash table (python dictionary). The keys of the dictionary encode the strings of ladder operators and values of the dictionary store the coefficients. The strings of ladder operators are encoded as a tuple of 2-tuples which we refer to as the \"terms tuple\". Each ladder operator is represented by a 2-tuple. The first element of the 2-tuple is an int indicating the tensor factor on which the ladder operator acts. The second element of the 2-tuple is Boole: 1 represents raising and 0 represents lowering. For instance, $a^\\dagger_8$ is represented in a 2-tuple as $(8, 1)$. Note that indices start at 0 and the identity operator is an empty list. Below we give some examples of operators and their terms tuple:\n\n$$\n\\begin{align}\nI & \\mapsto () \\nonumber \\\\\na_1 & \\mapsto ((1, 0),) \\nonumber \\\\\na^\\dagger_3 & \\mapsto ((3, 1),) \\nonumber \\\\\na^\\dagger_3 a_1 & \\mapsto ((3, 1), (1, 0)) \\nonumber \\\\\na^\\dagger_4 a^\\dagger_3 a_9 a_1 & \\mapsto ((4, 1), (3, 1), (9, 0), (1, 0)) \\nonumber\n\\end{align}\n$$\n\nNote that when initializing a single ladder operator one should be careful to add the comma after the inner pair. This is because in python ((1, 2)) = (1, 2) whereas ((1, 2),) = ((1, 2),). The \"terms tuple\" is usually convenient when one wishes to initialize a term as part of a coded routine. However, the terms tuple is not particularly intuitive. Accordingly, OpenFermion also supports another user-friendly, string notation below. This representation is rendered when calling \"print\" on a FermionOperator.\n\n$$\n\\begin{align}\nI & \\mapsto \\textrm{\"\"} \\nonumber \\\\\na_1 & \\mapsto \\textrm{\"1\"} \\nonumber \\\\\na^\\dagger_3 & \\mapsto \\textrm{\"3^\"} \\nonumber \\\\\na^\\dagger_3 a_1 & \\mapsto \\textrm{\"3^}\\;\\textrm{1\"} \\nonumber \\\\\na^\\dagger_4 a^\\dagger_3 a_9 a_1 & \\mapsto \\textrm{\"4^}\\;\\textrm{3^}\\;\\textrm{9}\\;\\textrm{1\"} \\nonumber\n\\end{align}\n$$\n\nLet's initialize our first term! We do it two different ways below.", "_____no_output_____" ] ], [ [ "from openfermion.ops import FermionOperator\n\nmy_term = FermionOperator(((3, 1), (1, 0)))\nprint(my_term)\n\nmy_term = FermionOperator('3^ 1')\nprint(my_term)", "_____no_output_____" ] ], [ [ "The preferred way to specify the coefficient in openfermion is to provide an optional coefficient argument. If not provided, the coefficient defaults to 1. In the code below, the first method is preferred. The multiplication in the second method actually creates a copy of the term, which introduces some additional cost. All inplace operands (such as +=) modify classes whereas binary operands such as + create copies. Important caveats are that the empty tuple FermionOperator(()) and the empty string FermionOperator('') initializes identity. The empty initializer FermionOperator() initializes the zero operator.", "_____no_output_____" ] ], [ [ "good_way_to_initialize = FermionOperator('3^ 1', -1.7)\nprint(good_way_to_initialize)\n\nbad_way_to_initialize = -1.7 * FermionOperator('3^ 1')\nprint(bad_way_to_initialize)\n\nidentity = FermionOperator('')\nprint(identity)\n\nzero_operator = FermionOperator()\nprint(zero_operator)", "_____no_output_____" ] ], [ [ "Note that FermionOperator has only one attribute: .terms. This attribute is the dictionary which stores the term tuples.", "_____no_output_____" ] ], [ [ "my_operator = FermionOperator('4^ 1^ 3 9', 1. + 2.j)\nprint(my_operator)\nprint(my_operator.terms)", "_____no_output_____" ] ], [ [ "## Manipulating the FermionOperator data structure\nSo far we have explained how to initialize a single FermionOperator such as $-1.7 \\, a^\\dagger_3 a_1$. However, in general we will want to represent sums of these operators such as $(1 + 2i) \\, a^\\dagger_4 a^\\dagger_3 a_9 a_1 - 1.7 \\, a^\\dagger_3 a_1$. To do this, just add together two FermionOperators! We demonstrate below.", "_____no_output_____" ] ], [ [ "from openfermion.ops import FermionOperator\n\nterm_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)\nterm_2 = FermionOperator('3^ 1', -1.7)\nmy_operator = term_1 + term_2\nprint(my_operator)\n\nmy_operator = FermionOperator('4^ 3^ 9 1', 1. + 2.j)\nterm_2 = FermionOperator('3^ 1', -1.7)\nmy_operator += term_2\nprint('')\nprint(my_operator)", "_____no_output_____" ] ], [ [ "The print function prints each term in the operator on a different line. Note that the line my_operator = term_1 + term_2 creates a new object, which involves a copy of term_1 and term_2. The second block of code uses the inplace method +=, which is more efficient. This is especially important when trying to construct a very large FermionOperator. FermionOperators also support a wide range of builtins including, str(), repr(), ==, !=, *=, *, /, /=, +, +=, -, -=, - and **. Note that since FermionOperators involve floats, == and != check for (in)equality up to numerical precision. We demonstrate some of these methods below.", "_____no_output_____" ] ], [ [ "term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)\nterm_2 = FermionOperator('3^ 1', -1.7)\n\nmy_operator = term_1 - 33. * term_2\nprint(my_operator)\n\nmy_operator *= 3.17 * (term_2 + term_1) ** 2\nprint('')\nprint(my_operator)\n\nprint('')\nprint(term_2 ** 3)\n\nprint('')\nprint(term_1 == 2.*term_1 - term_1)\nprint(term_1 == my_operator)", "_____no_output_____" ] ], [ [ "Additionally, there are a variety of methods that act on the FermionOperator data structure. We demonstrate a small subset of those methods here.", "_____no_output_____" ] ], [ [ "from openfermion.utils import commutator, count_qubits, hermitian_conjugated\nfrom openfermion.transforms import normal_ordered\n\n# Get the Hermitian conjugate of a FermionOperator, count its qubit, check if it is normal-ordered.\nterm_1 = FermionOperator('4^ 3 3^', 1. + 2.j)\nprint(hermitian_conjugated(term_1))\nprint(term_1.is_normal_ordered())\nprint(count_qubits(term_1))\n\n# Normal order the term.\nterm_2 = normal_ordered(term_1)\nprint('')\nprint(term_2)\nprint(term_2.is_normal_ordered())\n\n# Compute a commutator of the terms.\nprint('')\nprint(commutator(term_1, term_2))", "_____no_output_____" ] ], [ [ "## The QubitOperator data structure\nThe QubitOperator data structure is another essential part of openfermion. As the name suggests, QubitOperator is used to store qubit operators in almost exactly the same way that FermionOperator is used to store fermion operators. For instance $X_0 Z_3 Y_4$ is a QubitOperator. The internal representation of this as a terms tuple would be $((0, \\textrm{\"X\"}), (3, \\textrm{\"Z\"}), (4, \\textrm{\"Y\"}))$. Note that one important difference between QubitOperator and FermionOperator is that the terms in QubitOperator are always sorted in order of tensor factor. In some cases, this enables faster manipulation. We initialize some QubitOperators below.", "_____no_output_____" ] ], [ [ "from openfermion.ops import QubitOperator\n\nmy_first_qubit_operator = QubitOperator('X1 Y2 Z3')\nprint(my_first_qubit_operator)\nprint(my_first_qubit_operator.terms)\n\noperator_2 = QubitOperator('X3 Z4', 3.17)\noperator_2 -= 77. * my_first_qubit_operator\nprint('')\nprint(operator_2)", "_____no_output_____" ] ], [ [ "## Jordan-Wigner and Bravyi-Kitaev\nopenfermion provides functions for mapping FermionOperators to QubitOperators.", "_____no_output_____" ] ], [ [ "from openfermion.ops import FermionOperator\nfrom openfermion.transforms import jordan_wigner, bravyi_kitaev\nfrom openfermion.utils import hermitian_conjugated\nfrom openfermion.linalg import eigenspectrum\n\n# Initialize an operator.\nfermion_operator = FermionOperator('2^ 0', 3.17)\nfermion_operator += hermitian_conjugated(fermion_operator)\nprint(fermion_operator)\n\n# Transform to qubits under the Jordan-Wigner transformation and print its spectrum.\njw_operator = jordan_wigner(fermion_operator)\nprint('')\nprint(jw_operator)\njw_spectrum = eigenspectrum(jw_operator)\nprint(jw_spectrum)\n\n# Transform to qubits under the Bravyi-Kitaev transformation and print its spectrum.\nbk_operator = bravyi_kitaev(fermion_operator)\nprint('')\nprint(bk_operator)\nbk_spectrum = eigenspectrum(bk_operator)\nprint(bk_spectrum)", "_____no_output_____" ] ], [ [ "We see that despite the different representation, these operators are iso-spectral. We can also apply the Jordan-Wigner transform in reverse to map arbitrary QubitOperators to FermionOperators. Note that we also demonstrate the .compress() method (a method on both FermionOperators and QubitOperators) which removes zero entries.", "_____no_output_____" ] ], [ [ "from openfermion.transforms import reverse_jordan_wigner\n\n# Initialize QubitOperator.\nmy_operator = QubitOperator('X0 Y1 Z2', 88.)\nmy_operator += QubitOperator('Z1 Z4', 3.17)\nprint(my_operator)\n\n# Map QubitOperator to a FermionOperator.\nmapped_operator = reverse_jordan_wigner(my_operator)\nprint('')\nprint(mapped_operator)\n\n# Map the operator back to qubits and make sure it is the same.\nback_to_normal = jordan_wigner(mapped_operator)\nback_to_normal.compress()\nprint('')\nprint(back_to_normal)", "_____no_output_____" ] ], [ [ "## Sparse matrices and the Hubbard model\nOften, one would like to obtain a sparse matrix representation of an operator which can be analyzed numerically. There is code in both openfermion.transforms and openfermion.utils which facilitates this. The function get_sparse_operator converts either a FermionOperator, a QubitOperator or other more advanced classes such as InteractionOperator to a scipy.sparse.csc matrix. There are numerous functions in openfermion.utils which one can call on the sparse operators such as \"get_gap\", \"get_hartree_fock_state\", \"get_ground_state\", etc. We show this off by computing the ground state energy of the Hubbard model. To do that, we use code from the openfermion.hamiltonians module which constructs lattice models of fermions such as Hubbard models.", "_____no_output_____" ] ], [ [ "from openfermion.hamiltonians import fermi_hubbard\nfrom openfermion.linalg import get_sparse_operator, get_ground_state\nfrom openfermion.transforms import jordan_wigner\n\n\n# Set model.\nx_dimension = 2\ny_dimension = 2\ntunneling = 2.\ncoulomb = 1.\nmagnetic_field = 0.5\nchemical_potential = 0.25\nperiodic = 1\nspinless = 1\n\n# Get fermion operator.\nhubbard_model = fermi_hubbard(\n x_dimension, y_dimension, tunneling, coulomb, chemical_potential,\n magnetic_field, periodic, spinless)\nprint(hubbard_model)\n\n# Get qubit operator under Jordan-Wigner.\njw_hamiltonian = jordan_wigner(hubbard_model)\njw_hamiltonian.compress()\nprint('')\nprint(jw_hamiltonian)\n\n# Get scipy.sparse.csc representation.\nsparse_operator = get_sparse_operator(hubbard_model)\nprint('')\nprint(sparse_operator)\nprint('\\nEnergy of the model is {} in units of T and J.'.format(\n get_ground_state(sparse_operator)[0]))", "_____no_output_____" ] ], [ [ "## Hamiltonians in the plane wave basis\nA user can write plugins to openfermion which allow for the use of, e.g., third-party electronic structure package to compute molecular orbitals, Hamiltonians, energies, reduced density matrices, coupled cluster amplitudes, etc using Gaussian basis sets. We may provide scripts which interface between such packages and openfermion in future but do not discuss them in this tutorial.\n\nWhen using simpler basis sets such as plane waves, these packages are not needed. openfermion comes with code which computes Hamiltonians in the plane wave basis. Note that when using plane waves, one is working with the periodized Coulomb operator, best suited for condensed phase calculations such as studying the electronic structure of a solid. To obtain these Hamiltonians one must choose to study the system without a spin degree of freedom (spinless), one must the specify dimension in which the calculation is performed (n_dimensions, usually 3), one must specify how many plane waves are in each dimension (grid_length) and one must specify the length scale of the plane wave harmonics in each dimension (length_scale) and also the locations and charges of the nuclei. One can generate these models with plane_wave_hamiltonian() found in openfermion.hamiltonians. For simplicity, below we compute the Hamiltonian in the case of zero external charge (corresponding to the uniform electron gas, aka jellium). We also demonstrate that one can transform the plane wave Hamiltonian using a Fourier transform without effecting the spectrum of the operator.", "_____no_output_____" ] ], [ [ "from openfermion.hamiltonians import jellium_model\nfrom openfermion.utils import Grid\nfrom openfermion.linalg import eigenspectrum\nfrom openfermion.transforms import jordan_wigner, fourier_transform\n\n# Let's look at a very small model of jellium in 1D.\ngrid = Grid(dimensions=1, length=3, scale=1.0)\nspinless = True\n\n# Get the momentum Hamiltonian.\nmomentum_hamiltonian = jellium_model(grid, spinless)\nmomentum_qubit_operator = jordan_wigner(momentum_hamiltonian)\nmomentum_qubit_operator.compress()\nprint(momentum_qubit_operator)\n\n# Fourier transform the Hamiltonian to the position basis.\nposition_hamiltonian = fourier_transform(momentum_hamiltonian, grid, spinless)\nposition_qubit_operator = jordan_wigner(position_hamiltonian)\nposition_qubit_operator.compress()\nprint('')\nprint (position_qubit_operator)\n\n# Check the spectra to make sure these representations are iso-spectral.\nspectral_difference = eigenspectrum(momentum_qubit_operator) - eigenspectrum(position_qubit_operator)\nprint('')\nprint(spectral_difference)", "_____no_output_____" ] ], [ [ "## Basics of MolecularData class\n\nData from electronic structure calculations can be saved in an OpenFermion data structure called MolecularData, which makes it easy to access within our library. Often, one would like to analyze a chemical series or look at many different Hamiltonians and sometimes the electronic structure calculations are either expensive to compute or difficult to converge (e.g. one needs to mess around with different types of SCF routines to make things converge). Accordingly, we anticipate that users will want some way to automatically database the results of their electronic structure calculations so that important data (such as the SCF integrals) can be looked up on-the-fly if the user has computed them in the past. OpenFermion supports a data provenance strategy which saves key results of the electronic structure calculation (including pointers to files containing large amounts of data, such as the molecular integrals) in an HDF5 container.\n\nThe MolecularData class stores information about molecules. One initializes a MolecularData object by specifying parameters of a molecule such as its geometry, basis, multiplicity, charge and an optional string describing it. One can also initialize MolecularData simply by providing a string giving a filename where a previous MolecularData object was saved in an HDF5 container. One can save a MolecularData instance by calling the class's .save() method. This automatically saves the instance in a data folder specified during OpenFermion installation. The name of the file is generated automatically from the instance attributes and optionally provided description. Alternatively, a filename can also be provided as an optional input if one wishes to manually name the file.\n\nWhen electronic structure calculations are run, the data files for the molecule can be automatically updated. If one wishes to later use that data they either initialize MolecularData with the instance filename or initialize the instance and then later call the .load() method.\n\nBasis functions are provided to initialization using a string such as \"6-31g\". Geometries can be specified using a simple txt input file (see geometry_from_file function in molecular_data.py) or can be passed using a simple python list format demonstrated below. Atoms are specified using a string for their atomic symbol. Distances should be provided in angstrom. Below we initialize a simple instance of MolecularData without performing any electronic structure calculations.", "_____no_output_____" ] ], [ [ "from openfermion.chem import MolecularData\n\n# Set parameters to make a simple molecule.\ndiatomic_bond_length = .7414\ngeometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]\nbasis = 'sto-3g'\nmultiplicity = 1\ncharge = 0\ndescription = str(diatomic_bond_length)\n\n# Make molecule and print out a few interesting facts about it.\nmolecule = MolecularData(geometry, basis, multiplicity,\n charge, description)\nprint('Molecule has automatically generated name {}'.format(\n molecule.name))\nprint('Information about this molecule would be saved at:\\n{}\\n'.format(\n molecule.filename))\nprint('This molecule has {} atoms and {} electrons.'.format(\n molecule.n_atoms, molecule.n_electrons))\nfor atom, atomic_number in zip(molecule.atoms, molecule.protons):\n print('Contains {} atom, which has {} protons.'.format(\n atom, atomic_number))", "_____no_output_____" ] ], [ [ "If we had previously computed this molecule using an electronic structure package, we can call molecule.load() to populate all sorts of interesting fields in the data structure. Though we make no assumptions about what electronic structure packages users might install, we assume that the calculations are saved in OpenFermion's MolecularData objects. Currently plugins are available for [Psi4](http://psicode.org/) [(OpenFermion-Psi4)](http://github.com/quantumlib/OpenFermion-Psi4) and [PySCF](https://github.com/sunqm/pyscf) [(OpenFermion-PySCF)](http://github.com/quantumlib/OpenFermion-PySCF), and there may be more in the future. For the purposes of this example, we will load data that ships with OpenFermion to make a plot of the energy surface of hydrogen. Note that helper functions to initialize some interesting chemical benchmarks are found in openfermion.utils.", "_____no_output_____" ] ], [ [ "# Set molecule parameters.\nbasis = 'sto-3g'\nmultiplicity = 1\nbond_length_interval = 0.1\nn_points = 25\n\n# Generate molecule at different bond lengths.\nhf_energies = []\nfci_energies = []\nbond_lengths = []\nfor point in range(3, n_points + 1):\n bond_length = bond_length_interval * point\n bond_lengths += [bond_length]\n description = str(round(bond_length,2))\n print(description)\n geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]\n molecule = MolecularData(\n geometry, basis, multiplicity, description=description)\n \n # Load data.\n molecule.load()\n\n # Print out some results of calculation.\n print('\\nAt bond length of {} angstrom, molecular hydrogen has:'.format(\n bond_length))\n print('Hartree-Fock energy of {} Hartree.'.format(molecule.hf_energy))\n print('MP2 energy of {} Hartree.'.format(molecule.mp2_energy))\n print('FCI energy of {} Hartree.'.format(molecule.fci_energy))\n print('Nuclear repulsion energy between protons is {} Hartree.'.format(\n molecule.nuclear_repulsion))\n for orbital in range(molecule.n_orbitals):\n print('Spatial orbital {} has energy of {} Hartree.'.format(\n orbital, molecule.orbital_energies[orbital]))\n hf_energies += [molecule.hf_energy]\n fci_energies += [molecule.fci_energy]\n\n# Plot.\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.figure(0)\nplt.plot(bond_lengths, fci_energies, 'x-')\nplt.plot(bond_lengths, hf_energies, 'o-')\nplt.ylabel('Energy in Hartree')\nplt.xlabel('Bond length in angstrom')\nplt.show()", "_____no_output_____" ] ], [ [ "The geometry data needed to generate MolecularData can also be retreived from the PubChem online database by inputting the molecule's name.", "_____no_output_____" ] ], [ [ "from openfermion.chem import geometry_from_pubchem\n\nmethane_geometry = geometry_from_pubchem('methane')\nprint(methane_geometry)", "_____no_output_____" ] ], [ [ "## InteractionOperator and InteractionRDM for efficient numerical representations\n\nFermion Hamiltonians can be expressed as $H = h_0 + \\sum_{pq} h_{pq}\\, a^\\dagger_p a_q + \\frac{1}{2} \\sum_{pqrs} h_{pqrs} \\, a^\\dagger_p a^\\dagger_q a_r a_s$ where $h_0$ is a constant shift due to the nuclear repulsion and $h_{pq}$ and $h_{pqrs}$ are the famous molecular integrals. Since fermions interact pairwise, their energy is thus a unique function of the one-particle and two-particle reduced density matrices which are expressed in second quantization as $\\rho_{pq} = \\left \\langle p \\mid a^\\dagger_p a_q \\mid q \\right \\rangle$ and $\\rho_{pqrs} = \\left \\langle pq \\mid a^\\dagger_p a^\\dagger_q a_r a_s \\mid rs \\right \\rangle$, respectively.\n\nBecause the RDMs and molecular Hamiltonians are both compactly represented and manipulated as 2- and 4- index tensors, we can represent them in a particularly efficient form using similar data structures. The InteractionOperator data structure can be initialized for a Hamiltonian by passing the constant $h_0$ (or 0), as well as numpy arrays representing $h_{pq}$ (or $\\rho_{pq}$) and $h_{pqrs}$ (or $\\rho_{pqrs}$). Importantly, InteractionOperators can also be obtained by calling MolecularData.get_molecular_hamiltonian() or by calling the function get_interaction_operator() (found in openfermion.transforms) on a FermionOperator. The InteractionRDM data structure is similar but represents RDMs. For instance, one can get a molecular RDM by calling MolecularData.get_molecular_rdm(). When generating Hamiltonians from the MolecularData class, one can choose to restrict the system to an active space.\n\nThese classes inherit from the same base class, PolynomialTensor. This data structure overloads the slice operator [] so that one can get or set the key attributes of the InteractionOperator: $\\textrm{.constant}$, $\\textrm{.one_body_coefficients}$ and $\\textrm{.two_body_coefficients}$ . For instance, InteractionOperator[(p, 1), (q, 1), (r, 0), (s, 0)] would return $h_{pqrs}$ and InteractionRDM would return $\\rho_{pqrs}$. Importantly, the class supports fast basis transformations using the method PolynomialTensor.rotate_basis(rotation_matrix).\nBut perhaps most importantly, one can map the InteractionOperator to any of the other data structures we've described here.\n\nBelow, we load MolecularData from a saved calculation of LiH. We then obtain an InteractionOperator representation of this system in an active space. We then map that operator to qubits. We then demonstrate that one can rotate the orbital basis of the InteractionOperator using random angles to obtain a totally different operator that is still iso-spectral.", "_____no_output_____" ] ], [ [ "from openfermion.chem import MolecularData\nfrom openfermion.transforms import get_fermion_operator, jordan_wigner\nfrom openfermion.linalg import get_ground_state, get_sparse_operator\nimport numpy\nimport scipy\nimport scipy.linalg\n\n# Load saved file for LiH.\ndiatomic_bond_length = 1.45\ngeometry = [('Li', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]\nbasis = 'sto-3g'\nmultiplicity = 1\n\n# Set Hamiltonian parameters.\nactive_space_start = 1\nactive_space_stop = 3\n\n# Generate and populate instance of MolecularData.\nmolecule = MolecularData(geometry, basis, multiplicity, description=\"1.45\")\nmolecule.load()\n\n# Get the Hamiltonian in an active space.\nmolecular_hamiltonian = molecule.get_molecular_hamiltonian(\n occupied_indices=range(active_space_start),\n active_indices=range(active_space_start, active_space_stop))\n\n# Map operator to fermions and qubits.\nfermion_hamiltonian = get_fermion_operator(molecular_hamiltonian)\nqubit_hamiltonian = jordan_wigner(fermion_hamiltonian)\nqubit_hamiltonian.compress()\nprint('The Jordan-Wigner Hamiltonian in canonical basis follows:\\n{}'.format(qubit_hamiltonian))\n\n# Get sparse operator and ground state energy.\nsparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)\nenergy, state = get_ground_state(sparse_hamiltonian)\nprint('Ground state energy before rotation is {} Hartree.\\n'.format(energy))\n\n# Randomly rotate.\nn_orbitals = molecular_hamiltonian.n_qubits // 2\nn_variables = int(n_orbitals * (n_orbitals - 1) / 2)\nnumpy.random.seed(1)\nrandom_angles = numpy.pi * (1. - 2. * numpy.random.rand(n_variables))\nkappa = numpy.zeros((n_orbitals, n_orbitals))\nindex = 0\nfor p in range(n_orbitals):\n for q in range(p + 1, n_orbitals):\n kappa[p, q] = random_angles[index]\n kappa[q, p] = -numpy.conjugate(random_angles[index])\n index += 1\n\n # Build the unitary rotation matrix.\n difference_matrix = kappa + kappa.transpose()\n rotation_matrix = scipy.linalg.expm(kappa)\n\n # Apply the unitary.\n molecular_hamiltonian.rotate_basis(rotation_matrix)\n\n# Get qubit Hamiltonian in rotated basis.\nqubit_hamiltonian = jordan_wigner(molecular_hamiltonian)\nqubit_hamiltonian.compress()\nprint('The Jordan-Wigner Hamiltonian in rotated basis follows:\\n{}'.format(qubit_hamiltonian))\n\n# Get sparse Hamiltonian and energy in rotated basis.\nsparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)\nenergy, state = get_ground_state(sparse_hamiltonian)\nprint('Ground state energy after rotation is {} Hartree.'.format(energy))", "_____no_output_____" ] ], [ [ "## Quadratic Hamiltonians and Slater determinants\n\nThe general electronic structure Hamiltonian\n$H = h_0 + \\sum_{pq} h_{pq}\\, a^\\dagger_p a_q + \\frac{1}{2} \\sum_{pqrs} h_{pqrs} \\, a^\\dagger_p a^\\dagger_q a_r a_s$ contains terms that act on up to 4 sites, or\nis quartic in the fermionic creation and annihilation operators. However, in many situations\nwe may fruitfully approximate these Hamiltonians by replacing these quartic terms with\nterms that act on at most 2 fermionic sites, or quadratic terms, as in mean-field approximation theory. \nThese Hamiltonians have a number of\nspecial properties one can exploit for efficient simulation and manipulation of the Hamiltonian, thus\nwarranting a special data structure. We refer to Hamiltonians which\nonly contain terms that are quadratic in the fermionic creation and annihilation operators\nas quadratic Hamiltonians, and include the general case of non-particle conserving terms as in\na general Bogoliubov transformation. Eigenstates of quadratic Hamiltonians can be prepared\nefficiently on both a quantum and classical computer, making them amenable to initial guesses for\nmany more challenging problems.\n\nA general quadratic Hamiltonian takes the form\n$$H = \\sum_{p, q} (M_{pq} - \\mu \\delta_{pq}) a^\\dagger_p a_q + \\frac{1}{2} \\sum_{p, q} (\\Delta_{pq} a^\\dagger_p a^\\dagger_q + \\Delta_{pq}^* a_q a_p) + \\text{constant},$$\nwhere $M$ is a Hermitian matrix, $\\Delta$ is an antisymmetric matrix,\n$\\delta_{pq}$ is the Kronecker delta symbol, and $\\mu$ is a chemical\npotential term which we keep separate from $M$ so that we can use it\nto adjust the expectation of the total number of particles.\nIn OpenFermion, quadratic Hamiltonians are conveniently represented and manipulated\nusing the QuadraticHamiltonian class, which stores $M$, $\\Delta$, $\\mu$ and the constant. It is specialized to exploit the properties unique to quadratic Hamiltonians. Like InteractionOperator and InteractionRDM, it inherits from the PolynomialTensor class.\n\nThe BCS mean-field model of superconductivity is a quadratic Hamiltonian. The following code constructs an instance of this model as a FermionOperator, converts it to a QuadraticHamiltonian, and then computes its ground energy:", "_____no_output_____" ] ], [ [ "from openfermion.hamiltonians import mean_field_dwave\nfrom openfermion.transforms import get_quadratic_hamiltonian\n\n# Set model.\nx_dimension = 2\ny_dimension = 2\ntunneling = 2.\nsc_gap = 1.\nperiodic = True\n\n# Get FermionOperator.\nmean_field_model = mean_field_dwave(\n x_dimension, y_dimension, tunneling, sc_gap, periodic=periodic)\n\n# Convert to QuadraticHamiltonian\nquadratic_hamiltonian = get_quadratic_hamiltonian(mean_field_model)\n\n# Compute the ground energy\nground_energy = quadratic_hamiltonian.ground_energy()\nprint(ground_energy)", "_____no_output_____" ] ], [ [ "Any quadratic Hamiltonian may be rewritten in the form\n$$H = \\sum_p \\varepsilon_p b^\\dagger_p b_p + \\text{constant},$$\nwhere the $b_p$ are new annihilation operators that satisfy the fermionic anticommutation relations, and which are linear combinations of the old creation and annihilation operators. This form of $H$ makes it easy to deduce its eigenvalues; they are sums of subsets of the $\\varepsilon_p$, which we call the orbital energies of $H$. The following code computes the orbital energies and the constant:", "_____no_output_____" ] ], [ [ "orbital_energies, constant = quadratic_hamiltonian.orbital_energies()\nprint(orbital_energies)\nprint()\nprint(constant)", "_____no_output_____" ] ], [ [ "Eigenstates of quadratic hamiltonians are also known as fermionic Gaussian states, and they can be prepared efficiently on a quantum computer. One can use OpenFermion to obtain circuits for preparing these states. The following code obtains the description of a circuit which prepares the ground state (operations that can be performed in parallel are grouped together), along with a description of the starting state to which the circuit should be applied:", "_____no_output_____" ] ], [ [ "from openfermion.circuits import gaussian_state_preparation_circuit\n\ncircuit_description, start_orbitals = gaussian_state_preparation_circuit(quadratic_hamiltonian)\nfor parallel_ops in circuit_description:\n print(parallel_ops)\nprint('')\nprint(start_orbitals)", "_____no_output_____" ] ], [ [ "In the circuit description, each elementary operation is either a tuple of the form $(i, j, \\theta, \\varphi)$, indicating the operation $\\exp[i \\varphi a_j^\\dagger a_j]\\exp[\\theta (a_i^\\dagger a_j - a_j^\\dagger a_i)]$, which is a Givens rotation of modes $i$ and $j$, or the string 'pht', indicating the particle-hole transformation on the last fermionic mode, which is the operator $\\mathcal{B}$ such that $\\mathcal{B} a_N \\mathcal{B}^\\dagger = a_N^\\dagger$ and leaves the rest of the ladder operators unchanged. Operations that can be performed in parallel are grouped together.\n\nIn the special case that a quadratic Hamiltonian conserves particle number ($\\Delta = 0$), its eigenstates take the form\n$$\\lvert \\Psi_S \\rangle = b^\\dagger_{1}\\cdots b^\\dagger_{N_f}\\lvert \\text{vac} \\rangle,\\qquad\nb^\\dagger_{p} = \\sum_{k=1}^N Q_{pq}a^\\dagger_q,$$\nwhere $Q$ is an $N_f \\times N$ matrix with orthonormal rows. These states are also known as Slater determinants. OpenFermion also provides functionality to obtain circuits for preparing Slater determinants starting with the matrix $Q$ as the input.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a69da3df6a34d040406843a6f7ba758c8bc0129
16,930
ipynb
Jupyter Notebook
Risk Factor Models/historical_variance.ipynb
2series/Trading-AI-Style
c7c067fea06bdb3cf2a7b8bef1e3af7cf5944424
[ "MIT" ]
null
null
null
Risk Factor Models/historical_variance.ipynb
2series/Trading-AI-Style
c7c067fea06bdb3cf2a7b8bef1e3af7cf5944424
[ "MIT" ]
null
null
null
Risk Factor Models/historical_variance.ipynb
2series/Trading-AI-Style
c7c067fea06bdb3cf2a7b8bef1e3af7cf5944424
[ "MIT" ]
null
null
null
32.43295
1,361
0.601004
[ [ [ "# Historical Variance\n\nLet's see how we'd be calculating a covariance matrix of assets without the help of a factor model", "_____no_output_____" ] ], [ [ "import sys\n!{sys.executable} -m pip install -r requirements.txt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport time\nimport os\nimport quiz_helper\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "%matplotlib inline\nplt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (14, 8)", "_____no_output_____" ] ], [ [ "### data bundle", "_____no_output_____" ] ], [ [ "import os\nimport quiz_helper\nfrom zipline.data import bundles", "_____no_output_____" ], [ "os.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..','data','module_4_quizzes_eod')\ningest_func = bundles.csvdir.csvdir_equities(['daily'], quiz_helper.EOD_BUNDLE_NAME)\nbundles.register(quiz_helper.EOD_BUNDLE_NAME, ingest_func)\nprint('Data Registered')", "Data Registered\n" ] ], [ [ "### Build pipeline engine", "_____no_output_____" ] ], [ [ "from zipline.pipeline import Pipeline\nfrom zipline.pipeline.factors import AverageDollarVolume\nfrom zipline.utils.calendars import get_calendar\n\nuniverse = AverageDollarVolume(window_length=120).top(500) \ntrading_calendar = get_calendar('NYSE') \nbundle_data = bundles.load(quiz_helper.EOD_BUNDLE_NAME)\nengine = quiz_helper.build_pipeline_engine(bundle_data, trading_calendar)", "_____no_output_____" ] ], [ [ "### View Data¶\nWith the pipeline engine built, let's get the stocks at the end of the period in the universe we're using. We'll use these tickers to generate the returns data for the our risk model.", "_____no_output_____" ] ], [ [ "universe_end_date = pd.Timestamp('2016-01-05', tz='UTC')\n\nuniverse_tickers = engine\\\n .run_pipeline(\n Pipeline(screen=universe),\n universe_end_date,\n universe_end_date)\\\n .index.get_level_values(1)\\\n .values.tolist()\n \nuniverse_tickers", "_____no_output_____" ], [ "len(universe_tickers)", "_____no_output_____" ], [ "from zipline.data.data_portal import DataPortal\n\ndata_portal = DataPortal(\n bundle_data.asset_finder,\n trading_calendar=trading_calendar,\n first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day,\n equity_minute_reader=None,\n equity_daily_reader=bundle_data.equity_daily_bar_reader,\n adjustment_reader=bundle_data.adjustment_reader)", "_____no_output_____" ] ], [ [ "## Get pricing data helper function", "_____no_output_____" ] ], [ [ "from quiz_helper import get_pricing", "_____no_output_____" ] ], [ [ "## get pricing data into a dataframe", "_____no_output_____" ] ], [ [ "returns_df = \\\n get_pricing(\n data_portal,\n trading_calendar,\n universe_tickers,\n universe_end_date - pd.DateOffset(years=5),\n universe_end_date)\\\n .pct_change()[1:].fillna(0) #convert prices into returns\n\nreturns_df", "_____no_output_____" ] ], [ [ "## Quiz 1\n\nCheck out the [numpy.cov documentation](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.cov.html). Then think about what's wrong with the following use of numpy.cov", "_____no_output_____" ] ], [ [ "# What's wrong with this?\nannualization_factor = 252\ncovariance_assets_not_correct = annualization_factor*np.cov(returns_df)", "_____no_output_____" ], [ "## TODO: Check the shape of the covariance matrix\n", "_____no_output_____" ] ], [ [ "## Answer 1 here:\n\n", "_____no_output_____" ], [ "## Quiz 2\nHow can you adjust the input so that we get the desired covariance matrix of assets?", "_____no_output_____" ] ], [ [ "# TODO: calculate the covariance matrix of assets\nannualization_factor = # ...\ncovariance_assets = # ...", "_____no_output_____" ], [ "covariance_assets.shape", "_____no_output_____" ] ], [ [ "## Answer 2:", "_____no_output_____" ], [ "## Visualize the covariance matrix", "_____no_output_____" ] ], [ [ "import seaborn as sns", "_____no_output_____" ], [ "# view a heatmap of the covariance matrix\nsns.heatmap(covariance_assets,cmap='Paired');\n## If the colors aren't distinctive, please try a couple of these color schemes:\n## cmap = 'tab10'\n# cmap = 'Accent'", "_____no_output_____" ] ], [ [ "## Quiz 3\nLooking at the colormap are covariances more likely to be positive or negative? Are covariances likely to be above 0.10 or below 0.10?", "_____no_output_____" ], [ "## Answer 3 here:\n", "_____no_output_____" ], [ "## Fun Quiz!\nDo you know what the [seaborn visualization package](https://seaborn.pydata.org/index.html) was named after?", "_____no_output_____" ], [ "## Fun Answer! here \nor just check the solution notebook!", "_____no_output_____" ], [ "## Solutions\nThe [solution notebook is here](historical_variance_solution.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a69f5b48b5f6a3f2aee2b798761e24f4370a1e1
53,824
ipynb
Jupyter Notebook
Feature Importance.ipynb
wasit7/grade_analysis
6b352ccbc8bc66901b16ba8c2049a7f3caf26c0a
[ "MIT" ]
null
null
null
Feature Importance.ipynb
wasit7/grade_analysis
6b352ccbc8bc66901b16ba8c2049a7f3caf26c0a
[ "MIT" ]
null
null
null
Feature Importance.ipynb
wasit7/grade_analysis
6b352ccbc8bc66901b16ba8c2049a7f3caf26c0a
[ "MIT" ]
null
null
null
55.776166
1,524
0.554381
[ [ [ "pwd", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\ndf_csv= pd.read_pickle(\"../df_noplus/df5.pkl\")\nall_subjects=df_csv['COURSEID'].value_counts()\n##removing any subject enrolled less than 20 times\n#all_subjects=all_subjects[all_subjects>=20]\nprint df_csv.shape\ndf_csv=df_csv[df_csv[\"COURSEID\"].isin(all_subjects.index)]\nprint df_csv.shape", "_____no_output_____" ], [ "for subject,count in all_subjects.iteritems():\n print \"%s \"%subject,\n ##load data\n dfx=df_csv[df_csv['COURSEID']==subject]\n dfx=dfx.iloc[np.random.permutation(len(dfx))]\n ##convert to np.array\n #pattern\n X=dfx.as_matrix( dfx.columns[4:] )\n #label\n y=dfx.as_matrix( ['GRADE'] ).T[0]\n ##evaluation", "MA211 CS102 CS101 TU154 TH161 CS111 CS213 EL171 SC135 PY228 EL172 SC185 TU110 CS223 TU120 ST216 CS284 EL295 TU130 MA212 MA332 CS314 CS222 CS214 CS261 CS251 CS281 CS341 EL395 CS301 CS311 CS374 CS302 CS401 CS342 CS105 HO201 CS395 CS402 CS365 EL070 AT326 TU100 CS289 CS385 AT316 CS326 TU122 CS288 CS487 CS211 CS489 SC123 SC173 CS296 CS488 SW111 CS367 SW365 CS486 SW212 CS409 SW221 CS215 CS386 CS366 CS295 CS377 LA209 CS456 CS467 CS300 SW478 SW213 MW314 BA291 SW475 CS396 CS427 ES356 CS387 CS286 CS297 CS429 CS446 SW335 CS356 HR201 CS459 SO201 NS132 TA395 CJ321 CS397 CS398 CS348 CJ317 MW313 CJ316 MA216 CS407 CS115 CS457 CS388 CS426 CS449 CS408 CJ315 CS285 CS399 PY218 CS328 AT366 CS359 JC201 TU153 ES456 CS469 CS479 EC210 CS499 PY211 PE245 CS447 ES256 AS171 TU116 PM235 CS496 PM236 EG241 AT207 SW366 MU100 PY237 JC200 IS201 TU115 JP171 PY226 PY267 MA217 MU130 JC260 FN211 MU135 TD436 PM215 MU278 MU202 RT326 MA221 PE240 DM201 CS275 CF367 CF366 HS266 HS269 SW224 PY217 GE311 SW223 MW318 GE225 HS356 AS178 SW489 SN212 JP172 SW222 MU275 AT346 MU277 ST218 CS231 RE333 TU156 AN201 JC281 SW467 TU111 AS177 DM215 PC286 FD211 EL202 AT336 NS112 DM207 CN342 SW486 SW214 EL231 HS360 \n" ], [ "indices", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import ExtraTreesClassifier\n\nsubject='CS284'\nprint subject\n\ndef get_importances20(subject,pattern_names):\n ##load data\n dfx=df_csv[df_csv['COURSEID']==subject]\n dfx=dfx.iloc[np.random.permutation(len(dfx))]\n ##convert to np.array\n X=dfx.as_matrix( dfx.columns[4:] )\n y=dfx.as_matrix( ['GRADE'] ).T[0]\n\n # Build a forest and compute the feature importances\n forest = ExtraTreesClassifier(n_estimators=250,\n random_state=0)\n\n forest.fit(X, y)\n importances = forest.feature_importances_\n #std = np.std([tree.feature_importances_ for tree in forest.estimators_],\n # axis=0)\n indices = np.argsort(importances)[::-1]\n obj=pattern_names[indices[(importances[indices].cumsum()<0.2) & (importances[indices] > 0.05) ]]\n import_list = [i for i in obj][:3]\n return (import_list, indices, importances)\n\n# Print the feature ranking\nprint(\"feature important ranking:\")\n\npattern_names=df_csv.columns[4:]\nimport_list, indices, importances = get_importances20(subject,pattern_names)\nprint import_list\n\nfor f in range(X.shape[1]):\n print(\"subject: %s, importance:%.3f, acc_sum:%.3f\" % (\n pattern_names[indices[f]], importances[indices[f]], importances[indices].cumsum()[f]))", "CS284\nfeature important ranking:\n['SC135', 'MA211', 'CS111']\nsubject: SC135, importance:0.060, acc_sum:0.060\nsubject: MA211, importance:0.060, acc_sum:0.120\nsubject: CS111, importance:0.059, acc_sum:0.179\nsubject: TU154, importance:0.055, acc_sum:0.234\nsubject: EL171, importance:0.054, acc_sum:0.288\nsubject: EL172, importance:0.052, acc_sum:0.340\nsubject: TU120, importance:0.050, acc_sum:0.390\nsubject: CS102, importance:0.050, acc_sum:0.440\nsubject: PY228, importance:0.047, acc_sum:0.487\nsubject: CS101, importance:0.046, acc_sum:0.533\nsubject: TU110, importance:0.045, acc_sum:0.578\nsubject: TH161, importance:0.043, acc_sum:0.621\nsubject: SC185, importance:0.040, acc_sum:0.661\nsubject: EL070, importance:0.023, acc_sum:0.684\nsubject: TU100, importance:0.022, acc_sum:0.707\nsubject: MA212, importance:0.019, acc_sum:0.726\nsubject: TU130, importance:0.019, acc_sum:0.745\nsubject: SW111, importance:0.018, acc_sum:0.763\nsubject: ST216, importance:0.016, acc_sum:0.780\nsubject: CS105, importance:0.015, acc_sum:0.794\nsubject: LA209, importance:0.014, acc_sum:0.809\nsubject: TU122, importance:0.013, acc_sum:0.822\nsubject: SO201, importance:0.011, acc_sum:0.833\nsubject: SW335, importance:0.011, acc_sum:0.844\nsubject: SC123, importance:0.010, acc_sum:0.854\nsubject: SW212, importance:0.009, acc_sum:0.863\nsubject: SC173, importance:0.009, acc_sum:0.872\nsubject: MW314, importance:0.009, acc_sum:0.880\nsubject: MW313, importance:0.008, acc_sum:0.889\nsubject: CS115, importance:0.008, acc_sum:0.897\nsubject: SW213, importance:0.008, acc_sum:0.904\nsubject: AT316, importance:0.007, acc_sum:0.912\nsubject: ES356, importance:0.007, acc_sum:0.919\nsubject: SW221, importance:0.006, acc_sum:0.925\nsubject: CJ316, importance:0.006, acc_sum:0.931\nsubject: TA395, importance:0.005, acc_sum:0.936\nsubject: TU153, importance:0.005, acc_sum:0.940\nsubject: SW475, importance:0.004, acc_sum:0.944\nsubject: CJ317, importance:0.004, acc_sum:0.948\nsubject: MA216, importance:0.003, acc_sum:0.951\nsubject: CS261, importance:0.003, acc_sum:0.954\nsubject: PM236, importance:0.003, acc_sum:0.957\nsubject: CJ315, importance:0.003, acc_sum:0.960\nsubject: PE245, importance:0.003, acc_sum:0.962\nsubject: CS214, importance:0.002, acc_sum:0.965\nsubject: HO201, importance:0.002, acc_sum:0.967\nsubject: EL295, importance:0.002, acc_sum:0.969\nsubject: NS132, importance:0.002, acc_sum:0.970\nsubject: MA332, importance:0.002, acc_sum:0.972\nsubject: TU116, importance:0.002, acc_sum:0.974\nsubject: JC260, importance:0.001, acc_sum:0.975\nsubject: AT326, importance:0.001, acc_sum:0.977\nsubject: CS213, importance:0.001, acc_sum:0.978\nsubject: MA217, importance:0.001, acc_sum:0.979\nsubject: CS222, importance:0.001, acc_sum:0.981\nsubject: PM235, importance:0.001, acc_sum:0.982\nsubject: CS223, importance:0.001, acc_sum:0.983\nsubject: SW365, importance:0.001, acc_sum:0.984\nsubject: AS171, importance:0.001, acc_sum:0.985\nsubject: NS112, importance:0.001, acc_sum:0.986\nsubject: CS296, importance:0.001, acc_sum:0.987\nsubject: ES456, importance:0.001, acc_sum:0.988\nsubject: HS266, importance:0.001, acc_sum:0.989\nsubject: HS360, importance:0.001, acc_sum:0.990\nsubject: CS284, importance:0.001, acc_sum:0.990\nsubject: PM215, importance:0.001, acc_sum:0.991\nsubject: CS251, importance:0.001, acc_sum:0.992\nsubject: CS311, importance:0.001, acc_sum:0.992\nsubject: CS281, importance:0.001, acc_sum:0.993\nsubject: CS295, importance:0.001, acc_sum:0.994\nsubject: CS377, importance:0.000, acc_sum:0.994\nsubject: CS395, importance:0.000, acc_sum:0.995\nsubject: GE225, importance:0.000, acc_sum:0.995\nsubject: CS314, importance:0.000, acc_sum:0.995\nsubject: PY218, importance:0.000, acc_sum:0.996\nsubject: CS342, importance:0.000, acc_sum:0.996\nsubject: CS301, importance:0.000, acc_sum:0.996\nsubject: EL395, importance:0.000, acc_sum:0.997\nsubject: CS374, importance:0.000, acc_sum:0.997\nsubject: CS365, importance:0.000, acc_sum:0.997\nsubject: CS367, importance:0.000, acc_sum:0.998\nsubject: CS396, importance:0.000, acc_sum:0.998\nsubject: CS341, importance:0.000, acc_sum:0.998\nsubject: CS326, importance:0.000, acc_sum:0.998\nsubject: CS402, importance:0.000, acc_sum:0.998\nsubject: TU115, importance:0.000, acc_sum:0.999\nsubject: CS397, importance:0.000, acc_sum:0.999\nsubject: CS297, importance:0.000, acc_sum:0.999\nsubject: CS302, importance:0.000, acc_sum:0.999\nsubject: SW478, importance:0.000, acc_sum:0.999\nsubject: CS429, importance:0.000, acc_sum:0.999\nsubject: CS288, importance:0.000, acc_sum:0.999\nsubject: CS401, importance:0.000, acc_sum:0.999\nsubject: CS398, importance:0.000, acc_sum:1.000\nsubject: CS486, importance:0.000, acc_sum:1.000\nsubject: MU277, importance:0.000, acc_sum:1.000\nsubject: CS487, importance:0.000, acc_sum:1.000\nsubject: AT366, importance:0.000, acc_sum:1.000\nsubject: MU135, importance:0.000, acc_sum:1.000\nsubject: CS289, importance:0.000, acc_sum:1.000\nsubject: CS286, importance:0.000, acc_sum:1.000\nsubject: MU202, importance:0.000, acc_sum:1.000\nsubject: CS385, importance:0.000, acc_sum:1.000\nsubject: JC201, importance:0.000, acc_sum:1.000\nsubject: AT336, importance:0.000, acc_sum:1.000\nsubject: AT346, importance:0.000, acc_sum:1.000\nsubject: CS388, importance:0.000, acc_sum:1.000\nsubject: CS386, importance:0.000, acc_sum:1.000\nsubject: CS387, importance:0.000, acc_sum:1.000\nsubject: AT207, importance:0.000, acc_sum:1.000\nsubject: AS178, importance:0.000, acc_sum:1.000\nsubject: AS177, importance:0.000, acc_sum:1.000\nsubject: CS366, importance:0.000, acc_sum:1.000\nsubject: BA291, importance:0.000, acc_sum:1.000\nsubject: CS359, importance:0.000, acc_sum:1.000\nsubject: CS356, importance:0.000, acc_sum:1.000\nsubject: CS348, importance:0.000, acc_sum:1.000\nsubject: CF366, importance:0.000, acc_sum:1.000\nsubject: CS328, importance:0.000, acc_sum:1.000\nsubject: CF367, importance:0.000, acc_sum:1.000\nsubject: CJ321, importance:0.000, acc_sum:1.000\nsubject: CN342, importance:0.000, acc_sum:1.000\nsubject: CS211, importance:0.000, acc_sum:1.000\nsubject: CS300, importance:0.000, acc_sum:1.000\nsubject: CS231, importance:0.000, acc_sum:1.000\nsubject: CS275, importance:0.000, acc_sum:1.000\nsubject: CS285, importance:0.000, acc_sum:1.000\nsubject: CS215, importance:0.000, acc_sum:1.000\nsubject: TU156, importance:0.000, acc_sum:1.000\nsubject: CS399, importance:0.000, acc_sum:1.000\nsubject: PY237, importance:0.000, acc_sum:1.000\nsubject: JC281, importance:0.000, acc_sum:1.000\nsubject: JP171, importance:0.000, acc_sum:1.000\nsubject: JP172, importance:0.000, acc_sum:1.000\nsubject: MA221, importance:0.000, acc_sum:1.000\nsubject: MU100, importance:0.000, acc_sum:1.000\nsubject: MU130, importance:0.000, acc_sum:1.000\nsubject: MU275, importance:0.000, acc_sum:1.000\nsubject: MU278, importance:0.000, acc_sum:1.000\nsubject: MW318, importance:0.000, acc_sum:1.000\nsubject: PC286, importance:0.000, acc_sum:1.000\nsubject: PE240, importance:0.000, acc_sum:1.000\nsubject: PY211, importance:0.000, acc_sum:1.000\nsubject: PY217, importance:0.000, acc_sum:1.000\nsubject: PY226, importance:0.000, acc_sum:1.000\nsubject: PY267, importance:0.000, acc_sum:1.000\nsubject: CS407, importance:0.000, acc_sum:1.000\nsubject: RE333, importance:0.000, acc_sum:1.000\nsubject: RT326, importance:0.000, acc_sum:1.000\nsubject: SN212, importance:0.000, acc_sum:1.000\nsubject: ST218, importance:0.000, acc_sum:1.000\nsubject: SW214, importance:0.000, acc_sum:1.000\nsubject: SW222, importance:0.000, acc_sum:1.000\nsubject: SW223, importance:0.000, acc_sum:1.000\nsubject: SW224, importance:0.000, acc_sum:1.000\nsubject: SW366, importance:0.000, acc_sum:1.000\nsubject: SW467, importance:0.000, acc_sum:1.000\nsubject: SW486, importance:0.000, acc_sum:1.000\nsubject: SW489, importance:0.000, acc_sum:1.000\nsubject: TD436, importance:0.000, acc_sum:1.000\nsubject: TU111, importance:0.000, acc_sum:1.000\nsubject: JC200, importance:0.000, acc_sum:1.000\nsubject: IS201, importance:0.000, acc_sum:1.000\nsubject: HS356, importance:0.000, acc_sum:1.000\nsubject: HS269, importance:0.000, acc_sum:1.000\nsubject: CS408, importance:0.000, acc_sum:1.000\nsubject: CS409, importance:0.000, acc_sum:1.000\nsubject: CS426, importance:0.000, acc_sum:1.000\nsubject: CS427, importance:0.000, acc_sum:1.000\nsubject: CS446, importance:0.000, acc_sum:1.000\nsubject: CS447, importance:0.000, acc_sum:1.000\nsubject: CS449, importance:0.000, acc_sum:1.000\nsubject: CS456, importance:0.000, acc_sum:1.000\nsubject: CS457, importance:0.000, acc_sum:1.000\nsubject: CS459, importance:0.000, acc_sum:1.000\nsubject: CS467, importance:0.000, acc_sum:1.000\nsubject: CS469, importance:0.000, acc_sum:1.000\nsubject: CS479, importance:0.000, acc_sum:1.000\nsubject: CS488, importance:0.000, acc_sum:1.000\nsubject: CS489, importance:0.000, acc_sum:1.000\nsubject: CS496, importance:0.000, acc_sum:1.000\nsubject: CS499, importance:0.000, acc_sum:1.000\nsubject: DM201, importance:0.000, acc_sum:1.000\nsubject: DM207, importance:0.000, acc_sum:1.000\nsubject: DM215, importance:0.000, acc_sum:1.000\nsubject: EC210, importance:0.000, acc_sum:1.000\nsubject: EG241, importance:0.000, acc_sum:1.000\nsubject: EL202, importance:0.000, acc_sum:1.000\nsubject: EL231, importance:0.000, acc_sum:1.000\nsubject: ES256, importance:0.000, acc_sum:1.000\nsubject: FD211, importance:0.000, acc_sum:1.000\nsubject: FN211, importance:0.000, acc_sum:1.000\nsubject: GE311, importance:0.000, acc_sum:1.000\nsubject: HR201, importance:0.000, acc_sum:1.000\nsubject: AN201, importance:0.000, acc_sum:1.000\n" ], [ "##generate importance\ndef add_dot(x):\n return 'cstu.'+x[:2]+'.'+x\npattern_names=df_csv.columns[4:]\ndata3=[]\nfor s in all_subjects.index:\n clist = map( add_dot , get_importances20(s,pattern_names)[0] )\n obj={'name':add_dot(s), 'imports': clist }\n data3.append(obj)\ndata3", "_____no_output_____" ], [ "import json\nwith open('bundle_files/flare-imports.json','w') as fp:\n json.dump(data3, fp,indent=2, separators=(', ', ': '))", "_____no_output_____" ], [ "##load imported list, json data3\nimport json\nwith open('bundle_files/flare-imports.json','r') as fp:\n data3=json.load(fp)\n##filtered non-connected subject out from data3 \nimported_subjects={}\nfor i in data3:\n for j in i['imports']:\n if j in imported_subjects:\n imported_subjects[j]+=1\n else:\n imported_subjects[j]=0\ndata4=[]\nfor i in data3:\n if i['name'] in imported_subjects or i['imports']:\n data4.append(i)\n else: \n print i\nimport json\nwith open('bundle_files/flare-imports.json','w') as fp:\n json.dump(data4, fp,indent=2, separators=(', ', ': '))\n\ndata4", "_____no_output_____" ], [ "import operator\nsorted_imsub = sorted(imported_subjects.items(), key=operator.itemgetter(1), reverse=True)\nsorted_imsub", "_____no_output_____" ], [ "y=x['imports']", "_____no_output_____" ], [ "for k in x.iterkeys(): print k", "imports\nname\nsize\n" ], [ "len(data3)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6a192a510d9d45c70a0dffec457dc2001f1dd3
17,836
ipynb
Jupyter Notebook
notebooks/01_tabular_data_exploration.ipynb
khanfarhan10/scikit-learn-mooc
37f1e34eae0304a92f557a8194e068b4333ad418
[ "CC-BY-4.0" ]
null
null
null
notebooks/01_tabular_data_exploration.ipynb
khanfarhan10/scikit-learn-mooc
37f1e34eae0304a92f557a8194e068b4333ad418
[ "CC-BY-4.0" ]
null
null
null
notebooks/01_tabular_data_exploration.ipynb
khanfarhan10/scikit-learn-mooc
37f1e34eae0304a92f557a8194e068b4333ad418
[ "CC-BY-4.0" ]
null
null
null
37.470588
148
0.625645
[ [ [ "# First look at our dataset\n\nIn this notebook, we will look at the necessary steps required before any\n machine learning takes place. It involves:\n\n* loading the data;\n* looking at the variables in the dataset, in particular, differentiate\n between numerical and categorical variables, which need different\n preprocessing in most machine learning workflows;\n* visualizing the distribution of the variables to gain some insights into\n the dataset.", "_____no_output_____" ], [ "## Loading the adult census dataset\n\nWe will use data from the 1994 US census that we downloaded from\n[OpenML](http://openml.org/).\n\nYou can look at the OpenML webpage to learn more about this dataset:\n<http://www.openml.org/d/1590>\n\nThe dataset is available as a CSV (Comma-Separated Values) file and we will\nuse pandas to read it.\n\n<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\"><a class=\"reference external\" href=\"https://pandas.pydata.org/\">Pandas</a> is a Python library used for\nmanipulating 1 and 2 dimensional structured data. If you have never used\npandas, we recommend you look at this\n<a class=\"reference external\" href=\"https://pandas.pydata.org/docs/user_guide/10min.html\">tutorial</a>.</p>\n</div>", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nadult_census = pd.read_csv(\"../datasets/adult-census.csv\")", "_____no_output_____" ] ], [ [ "The goal with this data is to predict whether a person earns over 50K a year\nfrom heterogeneous data such as age, employment, education, family\ninformation, etc.", "_____no_output_____" ], [ "## The variables (columns) in the dataset\n\nThe data are stored in a pandas dataframe. A dataframe is a type of\nstructured data composed of 2 dimensions. This type of data is also referred\nas tabular data.\n\nEach row represents a sample. In the field of machine learning or descriptive\nstatistics, commonly used equivalent terms are \"record\", \"instance\", or\n\"observation\".\n\nEach column represents a type of information that has been collected and is\ncalled a feature. In the field of machine learning and descriptive\nstatistics, commonly used equivalent terms are \"variable\", \"attribute\", or\n\"covariate\".", "_____no_output_____" ], [ "A quick way to inspect the dataframe is to show the first few lines with the\n`head` method:", "_____no_output_____" ] ], [ [ "adult_census.head()", "_____no_output_____" ] ], [ [ "The column named **class** is our target variable (i.e., the variable which\nwe want to predict). The two possible classes are `<=50K` (low-revenue) and\n`>50K` (high-revenue). The resulting prediction problem is therefore a\nbinary classification problem, while we will use the other columns as input\nvariables for our model.", "_____no_output_____" ] ], [ [ "target_column = 'class'\nadult_census[target_column].value_counts()", "_____no_output_____" ] ], [ [ "<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p>Classes are slightly imbalanced, meaning there are more samples of one or\nmore classes compared to others. Class imbalance happens often in practice\nand may need special techniques when building a predictive model.</p>\n<p class=\"last\">For example in a medical setting, if we are trying to predict whether\nsubjects will develop a rare disease, there will be a lot more healthy\nsubjects than ill subjects in the dataset.</p>\n</div>", "_____no_output_____" ], [ "The dataset contains both numerical and categorical data. Numerical values\ntake continuous values, for example `age`. Categorical values can have a\nfinite number of values, for example `native-country`.", "_____no_output_____" ] ], [ [ "numerical_columns = [\n 'age', 'education-num', 'capital-gain', 'capital-loss',\n 'hours-per-week']\ncategorical_columns = [\n 'workclass', 'education', 'marital-status', 'occupation',\n 'relationship', 'race', 'sex', 'native-country']\nall_columns = numerical_columns + categorical_columns + [\n target_column]\n\nadult_census = adult_census[all_columns]", "_____no_output_____" ] ], [ [ "We can check the number of samples and the number of columns available in\nthe dataset:", "_____no_output_____" ] ], [ [ "print(f\"The dataset contains {adult_census.shape[0]} samples and \"\n f\"{adult_census.shape[1]} columns\")", "_____no_output_____" ] ], [ [ "We can compute the number of features by counting the number of columns and\nsubtract 1, since one of the columns is the target.", "_____no_output_____" ] ], [ [ "print(f\"The dataset contains {adult_census.shape[1] - 1} features.\")", "_____no_output_____" ] ], [ [ "## Visual inspection of the data\nBefore building a predictive model, it is a good idea to look at the data:\n\n* maybe the task you are trying to achieve can be solved without machine\n learning;\n* you need to check that the information you need for your task is actually\n present in the dataset;\n* inspecting the data is a good way to find peculiarities. These can\n arise during data collection (for example, malfunctioning sensor or missing\n values), or from the way the data is processed afterwards (for example\n capped values).", "_____no_output_____" ], [ "Let's look at the distribution of individual features, to get some insights\nabout the data. We can start by plotting histograms, note that this only\nworks for features containing numerical values:", "_____no_output_____" ] ], [ [ "_ = adult_census.hist(figsize=(20, 14))", "_____no_output_____" ] ], [ [ "<div class=\"admonition tip alert alert-warning\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Tip</p>\n<p class=\"last\">In the previous cell, we used the following pattern: <tt class=\"docutils literal\">_ = func()</tt>. We do this\nto avoid showing the output of <tt class=\"docutils literal\">func()</tt> which in this case is not that\nuseful. We actually assign the output of <tt class=\"docutils literal\">func()</tt> into the variable <tt class=\"docutils literal\">_</tt>\n(called underscore). By convention, in Python the underscore variable is used\nas a \"garbage\" variable to store results that we are not interested in.</p>\n</div>\n\nWe can already make a few comments about some of the variables:\n\n* `age`: there are not that many points for `age > 70`. The dataset\n description does indicate that retired people have been filtered out\n (`hours-per-week > 0`);\n* `education-num`: peak at 10 and 13, hard to tell what it corresponds to\n without looking much further. We'll do that later in this notebook;\n* `hours-per-week` peaks at 40, this was very likely the standard number of\n working hours at the time of the data collection;\n* most values of `capital-gain` and `capital-loss` are close to zero.", "_____no_output_____" ], [ "For categorical variables, we can look at the distribution of values:", "_____no_output_____" ] ], [ [ "adult_census['sex'].value_counts()", "_____no_output_____" ], [ "adult_census['education'].value_counts()", "_____no_output_____" ] ], [ [ "As noted above, `education-num` distribution has two clear peaks around 10\nand 13. It would be reasonable to expect that `education-num` is the number\nof years of education.\n\nLet's look at the relationship between `education` and `education-num`.", "_____no_output_____" ] ], [ [ "pd.crosstab(index=adult_census['education'],\n columns=adult_census['education-num'])", "_____no_output_____" ] ], [ [ "This shows that `education` and `education-num` give you the same\ninformation. For example, `education-num=2` is equivalent to\n`education='1st-4th'`. In practice that means we can remove `education-num`\nwithout losing information. Note that having redundant (or highly correlated)\ncolumns can be a problem for machine learning algorithms.", "_____no_output_____" ], [ "<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\">In the upcoming notebooks, we will only keep the <tt class=\"docutils literal\">education</tt> variable,\nexcluding the <tt class=\"docutils literal\"><span class=\"pre\">education-num</span></tt> variable.</p>\n</div>", "_____no_output_____" ], [ "Another way to inspect the data is to do a `pairplot` and show how each\nvariable differs according to our target, i.e. `class`. Plots along the\ndiagonal show the distribution of individual variables for each `class`. The\nplots on the off-diagonal can reveal interesting interactions between\nvariables.", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\nn_samples_to_plot = 5000\ncolumns = ['age', 'education-num', 'hours-per-week']\n_ = sns.pairplot(data=adult_census[:n_samples_to_plot], vars=columns,\n hue=target_column, plot_kws={'alpha': 0.2},\n height=3, diag_kind='hist', diag_kws={'bins': 30})", "_____no_output_____" ] ], [ [ "\nBy looking at the data you could infer some hand-written rules to predict the\nclass:\n\n* if you are young (less than 25 year-old roughly), you are in the\n `<=50K` class;\n* if you are old (more than 70 year-old roughly), you are in the\n `<=50K` class;\n* if you work part-time (less than 40 hours roughly) you are in the\n `<=50K` class.\n\nThese hand-written rules could work reasonably well without the need for any\nmachine learning. Note however that it is not very easy to create rules for\nthe region `40 < hours-per-week < 60` and `30 < age < 70`. We can hope that\nmachine learning can help in this region. Also note that visualization can\nhelp creating hand-written rules but is limited to 2 dimensions (maybe 3\ndimensions), whereas machine learning models can build models in\nhigh-dimensional spaces.\n\n<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\">In a machine-learning setting, a model automatically creates the \"rules\" from\nthe data in order to make predictions on new unseen data.</p>\n</div>\n\nAnother thing worth mentioning in this plot: if you are young (less than 25\nyear-old roughly) or old (more than 70 year-old roughly) you tend to work\nless. This is a non-linear relationship between age and hours per week.\nLinear machine learning models can only capture linear interactions, so this\nmay be a factor when deciding which model to chose.", "_____no_output_____" ], [ "\n## An example of machine learning model decision rules\n\nThe plot below shows the rules of a simple model, called decision tree. This\nmodel has been trained using the `age` and `hours-per-week` features, so that\nwe can have a nice graphical representation of its decision rules in two\ndimensions. We will explain how this model works in a later notebook, for now\nlet us just consider the model predictions when trained on this dataset:\n\n![](../figures/simple_decision_tree_adult_census.png)\n\nThe data points (circles) show the distribution of `hours-per-week` and `age`\nin the dataset. Blue points mean `low-income` and orange points mean\n`high-income`. This part of the plot is the same as the bottom-left plot in\nthe pairplot above.\n\nWhat is new in this plot is that we have added the model decision rules as\nbackground colors. The background color in each area represents the\nprobability of the class `high-income` as estimated by the model. Values\ntowards 0 (dark blue) indicates that the model predicts `low-income` with a\nhigh probability. Values towards 1 (dark orange) indicates that the model\npredicts `high-income` with a high probability. Values towards 0.5 (white)\nindicates that the model is not very sure about its prediction.\n\nLooking at the plot, here is what we can gather:\n\n* In the region `age < 28.5` (left region) the prediction is `low-income`.\n The dark blue color indicates that the model is quite sure about its\n prediction.\n* In the region `age > 28.5 AND hours-per-week < 40.5`\n (bottom-right region), the prediction is `low-income`. Note that the blue\n is a bit lighter that for the left region which means that the algorithm is\n not as certain in this region.\n* In the region `age > 28.5 AND hours-per-week > 40.5` (top-right region),\n the prediction is `low-income`. However the probability of the class\n `low-income` is very close to 0.5 which means the model is not sure at all\n about its prediction.\n\nIt is interesting to see that a simple model creates rules similar to the\nones that we could have created by hand. Note that machine learning is really\ninteresting when creating rules by hand is not straightforward, for example\nbecause we are in high dimension (many features) or because there are no\nsimple and obvious rules that separate the two classes as in the top-right\nregion", "_____no_output_____" ], [ "\nIn this notebook we have:\n\n* loaded the data from a CSV file using `pandas`;\n* looked at the different kind of variables to differentiate between\n categorical and numerical variables;\n* inspected the data with `pandas` and `seaborn`. Data inspection can allow\n you to decide whether using machine learning is appropriate for your data\n and to highlight potential peculiarities in your data.\n\nIdeas which will be discussed more in details later:\n\n* if your target variable is imbalanced (e.g., you have more samples from one\n target category than another), you may need special techniques for training\n and evaluating your machine learning model;\n* having redundant (or highly correlated) columns can be a problem for\n some machine learning algorithms;\n* contrary to decision tree, linear models can only capture linear\n interaction, so be aware of non-linear relationships in your data.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4a6a23e74584f1248316e72fcd0152203a5f9bca
72,572
ipynb
Jupyter Notebook
1-inequality-and-fairness/code/analysis/5-additional-results-fairness-coop.ipynb
uguryi/dissertation
546282394225aa7acde624d28fbd0119f014f5df
[ "MIT" ]
null
null
null
1-inequality-and-fairness/code/analysis/5-additional-results-fairness-coop.ipynb
uguryi/dissertation
546282394225aa7acde624d28fbd0119f014f5df
[ "MIT" ]
null
null
null
1-inequality-and-fairness/code/analysis/5-additional-results-fairness-coop.ipynb
uguryi/dissertation
546282394225aa7acde624d28fbd0119f014f5df
[ "MIT" ]
null
null
null
36.141434
128
0.490782
[ [ [ "### Import necessary libraries, set options", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport patsy\nimport seaborn as sns\nimport statsmodels.api as sm\nimport warnings\n\nfrom statsmodels.formula.api import glm\n\npd.set_option('display.max_columns', 125)\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "### Read in pickled datasets", "_____no_output_____" ] ], [ [ "path_to_diss = os.path.join(\"path/to/dissertation/chapter\")\n\npath_to_data = os.path.join(path_to_diss, \"path/to/data/processed-data\")", "_____no_output_____" ] ], [ [ "##### Between-individual", "_____no_output_____" ] ], [ [ "data_final = pd.read_pickle(path_to_data + \"/pkl/data_final.pkl\")\nprint(len(data_final))\ndata_final.head()", "_____no_output_____" ], [ "data_final_coop = pd.read_pickle(path_to_data + \"/pkl/data_final_coop.pkl\")\nprint(len(data_final_coop))\ndata_final_coop.head()", "_____no_output_____" ], [ "data_pid_version = pd.read_pickle(path_to_data + \"/pkl/data_pid_version.pkl\")\nprint(len(data_pid_version))\ndata_pid_version.head()", "_____no_output_____" ], [ "data_pid = pd.read_pickle(path_to_data + \"/pkl/data_pid.pkl\")\nprint(len(data_pid))\ndata_pid.head()", "_____no_output_____" ] ], [ [ "##### Between-session", "_____no_output_____" ] ], [ [ "session_data = pd.read_pickle(path_to_data + \"/pkl/session_data.pkl\")\nprint(len(session_data))\nsession_data.head()", "_____no_output_____" ], [ "session_data_coop = pd.read_pickle(path_to_data + \"/pkl/session_data_coop.pkl\")\nprint(len(session_data_coop))\nsession_data_coop.head()", "_____no_output_____" ], [ "session_data_pid_version = pd.read_pickle(path_to_data + \"/pkl/session_data_pid_version.pkl\")\nprint(len(session_data_pid_version))\nsession_data_pid_version.head()", "_____no_output_____" ] ], [ [ "##### Within-individual", "_____no_output_____" ] ], [ [ "data_within = pd.read_pickle(path_to_data + \"/pkl/data_within.pkl\")\nprint(len(data_within))\ndata_within.head()", "_____no_output_____" ], [ "data_within_coop = pd.read_pickle(path_to_data + \"/pkl/data_within_coop.pkl\")\nprint(len(data_within_coop))\ndata_within_coop.head()", "_____no_output_____" ], [ "data_within_pid = pd.read_pickle(path_to_data + \"/pkl/data_within_pid.pkl\")\nprint(len(data_within_pid))\ndata_within_pid.head()", "_____no_output_____" ] ], [ [ "##### Within-session", "_____no_output_____" ] ], [ [ "session_data_within = pd.read_pickle(path_to_data + \"/pkl/session_data_within.pkl\")\nprint(len(session_data_within))\nsession_data_within.head()", "_____no_output_____" ], [ "session_data_within_coop = pd.read_pickle(path_to_data + \"/pkl/session_data_within_coop.pkl\")\nprint(len(session_data_within_coop))\nsession_data_within_coop.head()", "_____no_output_____" ], [ "session_data_within_pid = pd.read_pickle(path_to_data + \"/pkl/session_data_within_pid.pkl\")\nprint(len(session_data_within_pid))\nsession_data_within_pid.head()", "_____no_output_____" ] ], [ [ "### Generate within-subjects interaction term", "_____no_output_____" ] ], [ [ "data_within_pid['game1_int'] = data_within_pid['earned_v1'] * data_within_pid['equal_v1']\ndata_within_pid['game2_int'] = data_within_pid['earned_v2'] * data_within_pid['equal_v2']\ndata_within_pid['delta_int'] = data_within_pid['game2_int'] - data_within_pid['game1_int']", "_____no_output_____" ], [ "data_within_coop['game1_int'] = data_within_coop['earned_v1'] * data_within_coop['equal_v1']\ndata_within_coop['game2_int'] = data_within_coop['earned_v2'] * data_within_coop['equal_v2']\ndata_within_coop['delta_int'] = data_within_coop['game2_int'] - data_within_coop['game1_int']", "_____no_output_____" ], [ "session_data_within_pid['game1_int'] = session_data_within_pid['earned1'] * session_data_within_pid['equal1']\nsession_data_within_pid['game2_int'] = session_data_within_pid['earned2'] * session_data_within_pid['equal2']\nsession_data_within_pid['delta_int'] = session_data_within_pid['game2_int'] - session_data_within_pid['game1_int']", "_____no_output_____" ], [ "session_data_within_coop['game1_int'] = session_data_within_coop['earned1'] * session_data_within_coop['equal1']\nsession_data_within_coop['game2_int'] = session_data_within_coop['earned2'] * session_data_within_coop['equal2']\nsession_data_within_coop['delta_int'] = session_data_within_coop['game2_int'] - session_data_within_coop['game1_int']", "_____no_output_____" ] ], [ [ "### Fairness ~ conditions", "_____no_output_____" ], [ "##### Overview", "_____no_output_____" ] ], [ [ "print(np.mean(data_pid_version.f_score))\nprint()\n\nprint(data_pid_version.groupby('version')['f_score'].mean())\nprint()\n\nprint(data_pid_version.groupby('condition')['f_score'].mean())\nprint()\n\nprint(data_pid_version.groupby(['version', 'condition'])['f_score'].mean())", "_____no_output_____" ] ], [ [ "##### Figures", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 3, figsize = (15, 5))\n\nfor i in range(3):\n\n if i == 0:\n temp = data_pid_version[data_pid_version['version'] == 1].groupby('condition')['f_score'].mean().reset_index()\n ax[i].set_title(\"First game only\")\n elif i == 1:\n temp = data_pid_version[data_pid_version['version'] == 2].groupby('condition')['f_score'].mean().reset_index()\n ax[i].set_title(\"Second game only\")\n else:\n temp = data_pid_version.groupby('condition')['f_score'].mean().reset_index()\n ax[i].set_title(\"Pooled\")\n\n groupedvalues = temp.sort_values(by = ['f_score']).reset_index(drop = True)\n #pal = sns.color_palette(\"Reds_d\", len(groupedvalues))\n #rank = groupedvalues[\"f_score\"].argsort().argsort()\n #sns.barplot(x='condition', y='f_score', data=groupedvalues, palette=np.array(pal[::-1])[rank], ax=ax[i])\n clrs = ['tomato', 'royalblue', 'limegreen', 'orange']\n sns.barplot(x = 'condition', y = 'f_score', data = groupedvalues, palette = clrs, ax = ax[i])\n for index, row in groupedvalues.iterrows():\n ax[i].text(row.name, row.f_score, round(row.f_score, 2), color = 'black', ha = \"center\")\n ax[i].set(xlabel = '', ylabel = 'Average fairness score')\n ax[i].set_xticklabels(['RU', 'EE', 'RE', 'EU'])\n ax[i].set(ylim=(0, 6))\n \nfig.savefig(os.path.join(path_to_diss, \"paper/figures/appendices/figureA5.1.png\"), \n bbox_inches = 'tight', \n pad_inches = 0.25)", "_____no_output_____" ] ], [ [ "##### Pairwise fairness and preference comparisons", "_____no_output_____" ] ], [ [ "dic_more_fair = {\n 'ee_vs_re': [],\n 'ee_vs_eu': [],\n 'ee_vs_ru': [],\n 're_vs_eu': [],\n 're_vs_ru': [],\n 'eu_vs_ru': []\n}\n\ndic_prefer = {\n 'ee_vs_re': [],\n 'ee_vs_eu': [],\n 'ee_vs_ru': [],\n 're_vs_eu': [],\n 're_vs_ru': [],\n 'eu_vs_ru': []\n}", "_____no_output_____" ], [ "for index, row in data_pid.iterrows():\n if (row[\"earned1\"] == 1 and row[\"equal1\"] == 1 and row[\"earned2\"] == 0 and row[\"equal2\"] == 1) or \\\n (row[\"earned1\"] == 0 and row[\"equal1\"] == 1 and row[\"earned2\"] == 1 and row[\"equal2\"] == 1):\n dic_more_fair['ee_vs_re'].append(row['more_fair'])\n dic_prefer['ee_vs_re'].append(row['prefer'])\n elif (row[\"earned1\"] == 1 and row[\"equal1\"] == 1 and row[\"earned2\"] == 1 and row[\"equal2\"] == 0) or \\\n (row[\"earned1\"] == 1 and row[\"equal1\"] == 0 and row[\"earned2\"] == 1 and row[\"equal2\"] == 1):\n dic_more_fair['ee_vs_eu'].append(row['more_fair'])\n dic_prefer['ee_vs_eu'].append(row['prefer'])\n elif (row[\"earned1\"] == 1 and row[\"equal1\"] == 1 and row[\"earned2\"] == 0 and row[\"equal2\"] == 0) or \\\n (row[\"earned1\"] == 0 and row[\"equal1\"] == 0 and row[\"earned2\"] == 1 and row[\"equal2\"] == 1):\n dic_more_fair['ee_vs_ru'].append(row['more_fair'])\n dic_prefer['ee_vs_ru'].append(row['prefer']) \n elif (row[\"earned1\"] == 0 and row[\"equal1\"] == 1 and row[\"earned2\"] == 1 and row[\"equal2\"] == 0) or \\\n (row[\"earned1\"] == 1 and row[\"equal1\"] == 0 and row[\"earned2\"] == 0 and row[\"equal2\"] == 1):\n dic_more_fair['re_vs_eu'].append(row['more_fair'])\n dic_prefer['re_vs_eu'].append(row['prefer'])\n elif (row[\"earned1\"] == 0 and row[\"equal1\"] == 1 and row[\"earned2\"] == 0 and row[\"equal2\"] == 0) or \\\n (row[\"earned1\"] == 0 and row[\"equal1\"] == 0 and row[\"earned2\"] == 0 and row[\"equal2\"] == 1):\n dic_more_fair['re_vs_ru'].append(row['more_fair'])\n dic_prefer['re_vs_ru'].append(row['prefer']) \n elif (row[\"earned1\"] == 1 and row[\"equal1\"] == 0 and row[\"earned2\"] == 0 and row[\"equal2\"] == 0) or \\\n (row[\"earned1\"] == 0 and row[\"equal1\"] == 0 and row[\"earned2\"] == 1 and row[\"equal2\"] == 0):\n dic_more_fair['eu_vs_ru'].append(row['more_fair'])\n dic_prefer['eu_vs_ru'].append(row['prefer'])", "_____no_output_____" ], [ "for key in dic_more_fair.keys():\n temp_dict = {}\n for condition in np.unique(dic_more_fair[key]):\n if condition != '':\n temp_dict[condition] = [dic_more_fair[key].count(condition)]\n dic_more_fair[key] = temp_dict\n tot = 0\n for key2 in dic_more_fair[key].keys():\n tot += dic_more_fair[key][key2][0]\n for key2 in dic_more_fair[key].keys():\n raw_count = dic_more_fair[key][key2][0]\n perc = round(raw_count / tot * 100, 2)\n dic_more_fair[key][key2].append(perc)\n \ndic_more_fair", "_____no_output_____" ], [ "for key in dic_prefer.keys():\n temp_dict = {}\n for condition in np.unique(dic_prefer[key]):\n if condition != '':\n temp_dict[condition] = [dic_prefer[key].count(condition)]\n dic_prefer[key] = temp_dict\n tot = 0\n for key2 in dic_prefer[key].keys():\n tot += dic_prefer[key][key2][0]\n for key2 in dic_prefer[key].keys():\n raw_count = dic_prefer[key][key2][0]\n perc = round(raw_count / tot * 100, 2)\n dic_prefer[key][key2].append(perc)\n \ndic_prefer", "_____no_output_____" ] ], [ [ "##### Between-individual", "_____no_output_____" ] ], [ [ "# Between first only\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal', \n data_pid_version[data_pid_version['version'] == 1],\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == 1) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "# Between second only\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal', \n data_pid_version[data_pid_version['version'] == 2],\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == 2) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "# Between pooled\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal + C(version)', \n data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\npid_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "# Between version interaction\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal * C(version)', \n data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\npid_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "# Between first only + change_in_score\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal + change_in_score', \n data_pid_version[data_pid_version['version'] == 1],\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == 1) &\n (data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "# Between second only + change_in_score\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal + change_in_score', \n data_pid_version[data_pid_version['version'] == 2],\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['version'] == 2) &\n (data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "# Between pooled + change_in_score\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal + C(version) + change_in_score', \n data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\npid_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "# Between version interaction + change_in_score\n\ny, X = patsy.dmatrices(\n 'f_score ~ earned * equal * C(version) + change_in_score', \n data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['session_no']\n\npid_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) &\n (data_pid_version['version'] == data_pid_version['version']) &\n (data_pid_version['f_score'] == data_pid_version['f_score']) &\n (data_pid_version['earned'] == data_pid_version['earned']) &\n (data_pid_version['equal'] == data_pid_version['equal'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Between-session", "_____no_output_____" ] ], [ [ "y, X = patsy.dmatrices(\n 'f_score ~ earned * equal', \n session_data_pid_version[session_data_pid_version['version'] == 1],\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'f_score ~ earned * equal', \n session_data_pid_version[session_data_pid_version['version'] == 2],\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'f_score ~ earned * equal + C(version)', \n session_data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = session_data_pid_version[\n (session_data_pid_version['version'] == session_data_pid_version['version']) &\n (session_data_pid_version['f_score'] == session_data_pid_version['f_score']) &\n (session_data_pid_version['earned'] == session_data_pid_version['earned']) &\n (session_data_pid_version['equal'] == session_data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'f_score ~ earned * equal * C(version)', \n session_data_pid_version,\n return_type = 'dataframe'\n)\n\nsession_c = session_data_pid_version[\n (session_data_pid_version['version'] == session_data_pid_version['version']) &\n (session_data_pid_version['f_score'] == session_data_pid_version['f_score']) &\n (session_data_pid_version['earned'] == session_data_pid_version['earned']) &\n (session_data_pid_version['equal'] == session_data_pid_version['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Within-individual", "_____no_output_____" ] ], [ [ "# y, X = patsy.dmatrices(\n# 'delta_f_score ~ delta_earned + delta_equal + delta_int', \n# # 'delta_f_score ~ delta_earned * delta_equal'\n# data_within_pid,\n# return_type = 'dataframe'\n# )\n\n# session_c = data_within_pid[(data_within_pid['delta_f_score'] == data_within_pid['delta_f_score']) &\n# (data_within_pid['delta_earned'] == data_within_pid['delta_earned']) &\n# (data_within_pid['delta_equal'] == data_within_pid['delta_equal'])]['session_no']\n\n# ols = sm.OLS(y, X)\n# ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "temp = data_within_pid.copy()\n\ntemp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row)\ntemp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row)\n\ny, X = patsy.dmatrices(\n 'delta_f_score ~ C(delta_earned) * C(delta_equal)',\n temp,\n return_type = 'dataframe'\n)\n\nsession_c = temp[(temp['delta_f_score'] == temp['delta_f_score']) &\n (temp['delta_earned'] == temp['delta_earned']) &\n (temp['delta_equal'] == temp['delta_equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'delta_f_score ~ delta_earned + delta_equal + delta_int + delta_change_in_score',\n # 'delta_f_score ~ delta_earned * delta_equal + delta_change_in_score'\n data_within_pid,\n return_type = 'dataframe'\n)\n\nsession_c = data_within_pid[\n (data_within_pid['delta_change_in_score'] == data_within_pid['delta_change_in_score']) &\n (data_within_pid['delta_f_score'] == data_within_pid['delta_f_score']) &\n (data_within_pid['delta_earned'] == data_within_pid['delta_earned']) &\n (data_within_pid['delta_equal'] == data_within_pid['delta_equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Within-session", "_____no_output_____" ] ], [ [ "y, X = patsy.dmatrices(\n 'delta_f_score ~ delta_earned + delta_equal + delta_int',\n # 'delta_f_score ~ delta_earned * delta_equal'\n session_data_within_pid,\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ], [ "temp = session_data_within_pid.copy()\n\ntemp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row)\ntemp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row)\n\ny, X = patsy.dmatrices(\n 'delta_f_score ~ C(delta_earned) * C(delta_equal)',\n temp,\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ] ], [ [ "### Cooperation ~ conditions", "_____no_output_____" ], [ "##### Overview", "_____no_output_____" ] ], [ [ "print(np.mean(data_final_coop.coopChoice))\nprint()\n\nprint(data_final_coop.groupby('version')['coopChoice'].mean())\nprint()\n\nprint(data_final_coop.groupby('round')['coopChoice'].mean())\nprint()", "_____no_output_____" ], [ "print(data_final_coop.groupby('condition')['coopChoice'].mean())\nprint()\n\nprint(data_final_coop.groupby(['version', 'condition'])['coopChoice'].mean())", "_____no_output_____" ] ], [ [ "##### Figures", "_____no_output_____" ] ], [ [ "data_final_coop.groupby(['round', 'version']).mean()['coopChoice'].unstack().plot(style = ['-', '--'],\n color = 'black')\nplt.xlabel('Round')\nplt.ylabel('Average Cooperation')\nplt.title('Average Cooperation by Round')\nplt.legend(['First game', 'Second game'])\nplt.xticks(np.arange(1, 11, step=1))\n\nplt.savefig(os.path.join(path_to_diss, \"paper/figures/appendices/figureA5.2.png\"))\n\nplt.show()", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 3, figsize = (15, 5))\nclrs = ['royalblue', 'orange', 'limegreen', 'tomato']\n\nfor i in range(3):\n if i == 0:\n temp = data_final_coop[\n data_final_coop['version'] == 1].groupby(['round',\n 'condition']).mean()['coopChoice'].unstack().plot(color = clrs, \n ax = ax[i])\n ax[i].set_title('First game only')\n ax[i].set_xticks(np.arange(1, 10, step = 1))\n elif i == 1:\n temp = data_final_coop[\n data_final_coop['version'] == 2].groupby(['round',\n 'condition']).mean()['coopChoice'].unstack().plot(color = clrs,\n ax = ax[i])\n ax[i].set_title('Second game only')\n ax[i].set_xticks(np.arange(1, 11, step = 1))\n else:\n temp = data_final_coop[\n data_final_coop['round'] != 10].groupby(['round',\n 'condition']).mean()['coopChoice'].unstack().plot(color = clrs,\n ax = ax[i])\n ax[i].set_title('Pooled')\n ax[i].set_xticks(np.arange(1, 10, step = 1))\n ax[i].set_xlabel('Round')\n ax[i].set_ylabel('Average Cooperation')\n ax[i].set_yticks(np.arange(0.35, 0.85, step = 0.05))\n ax[i].legend(['EE', 'EU', 'RE', 'RU'])\n \nfig.savefig(os.path.join(path_to_diss, \"paper/figures/appendices/figureA5.3.png\"), \n bbox_inches = 'tight', \n pad_inches = 0.25)", "_____no_output_____" ], [ "data = {'Experimental condition': ['RU to RU', 'RU to EE', 'RU to RE', 'RU to EU', \n 'EE to RU', 'EE to EE', 'EE to RE', 'EE to EU', \n 'RE to RU', 'RE to EE', 'RE to RE', 'RE to EU', \n 'EU to RU', 'EU to EE', 'EU to RE', 'EU to EU'],\n 'Change in fairness score': [ 0.4393, 0.2451, 0.4959, 1.5124, \n -0.1759, 0.0374, 0.2162, 1.4580, \n -0.3945, -0.4087, 0.5403, 0.7417, \n -1.2234, -1.0642, -1.0096, -0.2833],\n 'Change in cooperation': [ 0.0332, -0.0404, -0.0230, -0.0192, \n 0.0510, -0.0232, -0.0014, -0.0266, \n 0.0103, -0.0888, 0.0175, -0.0174, \n 0.0016, 0.0674, 0.0588, 0.0002]}\n\ndf = pd.DataFrame(data)\n\ndf.columns = ['g', 'x', 'y']\n\nx = list(df['x'])\ny = list(df['y'])\n\np1 = np.polyfit(x, y, 1)\n\nplt.rcParams[\"figure.figsize\"] = [10, 8]\n\nplt.xlim(-1.7, 1.7)\nplt.ylim(-0.095, 0.095)\n\nplt.xlabel('Change in perceived fairness score')\nplt.ylabel('Change in probability of cooperating')\n\nplt.scatter(x, y)\nxlims = plt.xlim()\nx.insert(0, xlims[0])\ny.insert(0, np.polyval(p1, xlims[0]))\nx.append(xlims[1])\ny.append(np.polyval(p1, xlims[1]))\nplt.plot(x, np.polyval(p1,x), 'r-', linewidth = 1.5)\nplt.xlim(xlims)\n\nfor line in range(0, df.shape[0]):\n plt.text(df.x[line]-0.1, \n df.y[line]+0.0025, \n df.g[line], \n horizontalalignment='left', \n size='small', \n color='black')\n\nplt.savefig(os.path.join(path_to_diss, \"paper/figures/appendices/figureA4.1.png\")) \n\nplt.show()", "_____no_output_____" ] ], [ [ "##### Cooperation history", "_____no_output_____" ] ], [ [ "def get_coop_history(row):\n return list(row)[1:]", "_____no_output_____" ], [ "df_wide_v1 = data_final_coop[data_final_coop['version'] == 1][\n ['pid', 'round', 'coopChoice']\n].pivot(index = 'pid', columns = 'round', values = 'coopChoice').reset_index().rename_axis(None, axis = 1)\n\ndf_wide_v1.head()", "_____no_output_____" ], [ "df_wide_v2 = data_final_coop[data_final_coop['version'] == 2][\n ['pid', 'round', 'coopChoice']\n].pivot(index = 'pid', columns = 'round', values = 'coopChoice').reset_index().rename_axis(None, axis = 1)\n\ndf_wide_v2.head()", "_____no_output_____" ], [ "df_wide_v1['coop_history'] = df_wide_v1.apply(get_coop_history, axis = 1)\ndf_wide_v1.head()", "_____no_output_____" ], [ "df_wide_v2['coop_history'] = df_wide_v2.apply(get_coop_history, axis = 1)\ndf_wide_v2.head()", "_____no_output_____" ], [ "d1 = {}\nfor ch in df_wide_v1['coop_history']:\n if str(ch) in d1.keys():\n d1[str(ch)] += 1\n else:\n d1[str(ch)] = 1\nd1", "_____no_output_____" ], [ "d2 = {}\nfor ch in df_wide_v2['coop_history']:\n if str(ch) in d2.keys():\n d2[str(ch)] += 1\n else:\n d2[str(ch)] = 1\nd2", "_____no_output_____" ], [ "ni1 = 0\no1 = 0\n\nfor ch in df_wide_v1['coop_history']:\n if sorted(ch, reverse = True) == ch:\n ni1 += 1\n else:\n o1 += 1\n \nni1/(ni1+o1)", "_____no_output_____" ], [ "ni2 = 0\no2 = 0\n\nfor ch in df_wide_v2['coop_history']:\n if sorted(ch, reverse = True) == ch:\n ni2 += 1\n else:\n o2 += 1\n \nni2/(ni2+o2)", "_____no_output_____" ] ], [ [ "##### Trust as a predictor of cooperation in the second game", "_____no_output_____" ] ], [ [ "temp = data_final_coop.groupby(['session_no', \n 'version']).mean().reset_index()[['session_no', \n 'version',\n 'coopChoice', \n 'trust_score']]\n\ntemp1 = temp[temp['version'] == 1]\ntemp2 = temp[temp['version'] == 2]\n\ntemp = temp1.merge(right = temp2, how = \"inner\", on = [\"session_no\"], suffixes = (\"_v1\", \"_v2\"))\ntemp.head()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice_v2 ~ trust_score_v1 + coopChoice_v1', \n temp,\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'trust_score_v1 ~ coopChoice_v1', \n temp,\n return_type = 'dataframe'\n)\n\nols = sm.OLS(y, X)\nols.fit().summary()", "_____no_output_____" ], [ "np.corrcoef(temp['trust_score_v1'], temp['coopChoice_v1'])", "_____no_output_____" ] ], [ [ "##### Average number of neighbors", "_____no_output_____" ] ], [ [ "def count_num_neighbors(row):\n return len(row['neighborsList'])", "_____no_output_____" ], [ "np.mean(data_final_coop.apply(count_num_neighbors, axis = 1))", "_____no_output_____" ] ], [ [ "##### Average game scores", "_____no_output_____" ] ], [ [ "print(np.mean(data_pid['score1']))\nprint(np.mean(data_pid['score2']))\nprint(np.mean(data_pid['score3']))", "_____no_output_____" ] ], [ [ "##### Why people cooperate", "_____no_output_____" ] ], [ [ "dic_why = {\n \"altruism\": 0,\n \"encourage\": 0,\n \"choseA\": 0,\n \"choseB\": 0,\n \"equal\": 0,\n \"more\": 0,\n \"less\": 0,\n \"fair\": 0,\n \"notfair\": 0,\n \"other\": 0,\n \"other_text\": []\n}", "_____no_output_____" ], [ "for index, row in data_pid.iterrows():\n if row[\"why_coop_list\"] == row[\"why_coop_list\"]:\n if \"other\" in list(row[\"why_coop_list\"]):\n dic_why[\"other_text\"].append(row[\"why_coop_other\"])\n for e in row[\"why_coop_list\"]:\n dic_why[e] += 1", "_____no_output_____" ], [ "dict((key,value) for key, value in dic_why.items() if key != \"other_text\")", "_____no_output_____" ], [ "print(len(dic_why[\"other_text\"]) == dic_why[\"other\"])\n\ndic_why[\"other_text\"]", "_____no_output_____" ] ], [ [ "##### Between-individual (using Logit here but same conclusions with OLS)", "_____no_output_____" ] ], [ [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(round)', \n data_final_coop[data_final_coop['version'] == 1],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['version'] == 1) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['version'] == 1) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(round)', \n data_final_coop[data_final_coop['version'] == 2],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['version'] == 2) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['version'] == 2) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(version) + C(round)', \n data_final_coop[data_final_coop['round'] != 10],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['round'] != 10) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['round'] != 10) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal * C(version) + C(round)', \n data_final_coop[data_final_coop['round'] != 10],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['round'] != 10) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['round'] != 10) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(round) + scoreBeforeCoop', \n data_final_coop[data_final_coop['version'] == 1],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['version'] == 1) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['version'] == 1) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(round) + scoreBeforeCoop', \n data_final_coop[data_final_coop['version'] == 2],\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['version'] == 2) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['version'] == 2) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(version) + C(round) + scoreBeforeCoop', \n data_final_coop,\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal * C(version) + C(round) + scoreBeforeCoop', \n data_final_coop,\n return_type = 'dataframe'\n)\n\nsession_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['session_no']\n\npid_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) &\n (data_final_coop['version'] == data_final_coop['version']) &\n (data_final_coop['round'] == data_final_coop['round']) &\n (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) &\n (data_final_coop['earned'] == data_final_coop['earned']) &\n (data_final_coop['equal'] == data_final_coop['equal'])]['pid']\n\nlogit = sm.Logit(y, X)\nlogit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Between-session", "_____no_output_____" ] ], [ [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(round)', \n session_data_coop[session_data_coop['version'] == 1],\n return_type = 'dataframe'\n)\n\nsession_c = session_data_coop[(session_data_coop['version'] == 1) &\n (session_data_coop['version'] == session_data_coop['version']) &\n (session_data_coop['round'] == session_data_coop['round']) &\n (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) &\n (session_data_coop['earned'] == session_data_coop['earned']) &\n (session_data_coop['equal'] == session_data_coop['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(version) + C(round)', \n session_data_coop[session_data_coop['version'] == 2],\n return_type = 'dataframe'\n)\n\nsession_c = session_data_coop[(session_data_coop['version'] == 2) &\n (session_data_coop['version'] == session_data_coop['version']) &\n (session_data_coop['round'] == session_data_coop['round']) &\n (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) &\n (session_data_coop['earned'] == session_data_coop['earned']) &\n (session_data_coop['equal'] == session_data_coop['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal + C(version) + C(round)', \n session_data_coop[session_data_coop['round'] != 10],\n return_type = 'dataframe'\n)\n\nsession_c = session_data_coop[(session_data_coop['round'] != 10) &\n (session_data_coop['version'] == session_data_coop['version']) &\n (session_data_coop['round'] == session_data_coop['round']) &\n (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) &\n (session_data_coop['earned'] == session_data_coop['earned']) &\n (session_data_coop['equal'] == session_data_coop['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'coopChoice ~ earned * equal * C(version) + C(round)', \n session_data_coop[session_data_coop['round'] != 10],\n return_type = 'dataframe'\n)\n\nsession_c = session_data_coop[(session_data_coop['round'] != 10) &\n (session_data_coop['version'] == session_data_coop['version']) &\n (session_data_coop['round'] == session_data_coop['round']) &\n (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) &\n (session_data_coop['earned'] == session_data_coop['earned']) &\n (session_data_coop['equal'] == session_data_coop['equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Within-individual", "_____no_output_____" ] ], [ [ "# y, X = patsy.dmatrices(\n# 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + C(round)', \n# # 'delta_coopChoice ~ delta_earned * delta_equal + C(round)'\n# data_within_coop,\n# return_type = 'dataframe'\n# )\n\n# session_c = data_within_coop[(data_within_coop['round'] == data_within_coop['round']) &\n# (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) &\n# (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) &\n# (data_within_coop['delta_equal'] == data_within_coop['delta_equal'])]['session_no']\n\n# pid_c = data_within_coop[(data_within_coop['round'] == data_within_coop['round']) &\n# (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) &\n# (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) &\n# (data_within_coop['delta_equal'] == data_within_coop['delta_equal'])]['pid']\n\n# ols = sm.OLS(y, X)\n# ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "temp = data_within_coop.copy()\n\ntemp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row)\ntemp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row)\n\ny, X = patsy.dmatrices(\n 'delta_coopChoice ~ C(delta_earned) * C(delta_equal) + C(round)', \n temp,\n return_type = 'dataframe'\n)\n\nsession_c = temp[(temp['round'] == temp['round']) &\n (temp['delta_coopChoice'] == temp['delta_coopChoice']) &\n (temp['delta_earned'] == temp['delta_earned']) &\n (temp['delta_equal'] == temp['delta_equal'])]['session_no']\n\npid_c = temp[(temp['round'] == temp['round']) &\n (temp['delta_coopChoice'] == temp['delta_coopChoice']) &\n (temp['delta_earned'] == temp['delta_earned']) &\n (temp['delta_equal'] == temp['delta_equal'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ], [ "y, X = patsy.dmatrices(\n 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + delta_scoreBeforeCoop + num_other + C(round)', \n # 'delta_coopChoice ~ delta_earned * delta_equal + delta_scoreBeforeCoop + C(round)'\n data_within_coop,\n return_type = 'dataframe'\n)\n\nsession_c = data_within_coop[\n (data_within_coop['delta_scoreBeforeCoop'] == data_within_coop['delta_scoreBeforeCoop']) &\n (data_within_coop['round'] == data_within_coop['round']) &\n (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) &\n (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) &\n (data_within_coop['delta_equal'] == data_within_coop['delta_equal']) &\n (data_within_coop['num_other'] == data_within_coop['num_other'])]['session_no']\n\npid_c = data_within_coop[\n (data_within_coop['delta_scoreBeforeCoop'] == data_within_coop['delta_scoreBeforeCoop']) &\n (data_within_coop['round'] == data_within_coop['round']) &\n (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) &\n (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) &\n (data_within_coop['delta_equal'] == data_within_coop['delta_equal']) &\n (data_within_coop['num_other'] == data_within_coop['num_other'])]['pid']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary()", "_____no_output_____" ] ], [ [ "##### Within-session", "_____no_output_____" ] ], [ [ "y, X = patsy.dmatrices(\n 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + C(round)',\n # 'delta_coopChoice ~ delta_earned * delta_equal + C(round)'\n session_data_within_coop,\n return_type = 'dataframe'\n)\n\nsession_c = session_data_within_coop[\n (session_data_within_coop['round'] == session_data_within_coop['round']) &\n (session_data_within_coop['delta_coopChoice'] == session_data_within_coop['delta_coopChoice']) &\n (session_data_within_coop['delta_earned'] == session_data_within_coop['delta_earned']) &\n (session_data_within_coop['delta_equal'] == session_data_within_coop['delta_equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ], [ "temp = session_data_within_coop.copy()\n\ntemp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row)\ntemp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row)\n\ny, X = patsy.dmatrices(\n 'delta_coopChoice ~ C(delta_earned) * C(delta_equal)',\n temp,\n return_type = 'dataframe'\n)\n\nsession_c = temp[(temp['round'] == temp['round']) &\n (temp['delta_coopChoice'] == temp['delta_coopChoice']) &\n (temp['delta_earned'] == temp['delta_earned']) &\n (temp['delta_equal'] == temp['delta_equal'])]['session_no']\n\nols = sm.OLS(y, X)\nols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6a29e89ad66c8c6d94f8a3b75eff816ef9b879
39,258
ipynb
Jupyter Notebook
IndexingDataFrames.ipynb
onativo/DataScience101
68d42115e350371390e1fc851707598709efd4c1
[ "MIT" ]
null
null
null
IndexingDataFrames.ipynb
onativo/DataScience101
68d42115e350371390e1fc851707598709efd4c1
[ "MIT" ]
null
null
null
IndexingDataFrames.ipynb
onativo/DataScience101
68d42115e350371390e1fc851707598709efd4c1
[ "MIT" ]
null
null
null
34.710875
135
0.358933
[ [ [ "# Indexing Dataframes", "_____no_output_____" ] ], [ [ "#a função set_index é um processo destrutivo e não mantém o index atual\n#se quisermos manter o index atual, precisamos manualmente criar uma nova coluna e copiá-los para ela\n#os valores\nimport pandas as pd\ndf = pd.read_csv('resources/week-1/datasets/Admission_Predict.csv', index_col=0)\ndf.head()", "_____no_output_____" ], [ "#vamos fazer de conta que não queremos manter o serial number como index do nosso DF mas sim o chance of admit, porém queremos\n#manter esse valor do serial number para usar mais tarde\n#fazemos isso usando o set_index para setar o index na coluna chance of admit\n\n#primeiro copiamos o index atua lpara uma nova coluna\ndf['Serial No.'] = df.index\n\n#daí setamos o index para uma nova coluna\ndf = df.set_index('Chance of Admit ')\ndf.head()", "_____no_output_____" ], [ "#quando criamos um index a aprtir de uma nova coluna, ele recebe o nome dessa nova coluna\n#podemos nos desfazer disso usando a função reset_index() para mover o index para uma nova coluna\n#e cria um index com números default\n\n#ele pega o chance of admit que tinha ficado como indexador e coloca ele numa nova coluna e cria um novo indexador\ndf = df.reset_index()\ndf.head()", "_____no_output_____" ], [ "#uma coisa muito legal do pandas é a indexação multi-level que é similar Às chaves compostas nos bancos de dados relacionais\n#para criar um indexador multi-level simplesmente chamamos o set_index e passamos uma lista com nomes de colunas\n#que queremos transformar em indexadores", "_____no_output_____" ], [ "df2 = pd.read_csv('resources/week-2/datasets/census.csv')\ndf2.head()", "_____no_output_____" ], [ "df2['SUMLEV'].unique()", "_____no_output_____" ], [ "#excluindo todas as linhas que são sumários a nível estadual e manter apenas os dados do país\ndf2 = df2[df2['SUMLEV'] == 50]\n#df2.head()\ndf2.columns", "_____no_output_____" ], [ "#vamos reduzir o dataset para mostrar apenas o estimado para a população e o número total de nascimentos\n#daí criamos uma lista com nomes de colunas que desejamos manter, projetá-las e então\n#definir o dataframe resultante para nossa variavel df2\n\ncolumns_to_keep = ['STNAME', 'CTYNAME', 'BIRTHS2010', 'BIRTHS2011', 'BIRTHS2012', 'BIRTHS2013',\n 'BIRTHS2014', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013',\n 'POPESTIMATE2014', 'POPESTIMATE2015']\ndf2 = df2[columns_to_keep]\ndf2.head()\n\n#cria uma lista com as colunas que dejseamos manter e definimos a nova variavel df2 com essas colunas", "_____no_output_____" ], [ "#o censo separa população por estimado por estado e país\n#podemos carregar os dados e setar o index para ser a combinação do estado e do país\n#e daí ver como o pandas trabalha com isso num dataframe\n#faremos isso criando uma lista com os identificadores de colunas que desejamos indexar. daí chamamos o set_index()\n#com essa lista e atribuimos o output como apropriado.\n#vemos acima que temos dois indexadores, o STNAME e CTYNAME\n\ndf2 = df2.set_index(['STNAME', 'CTYNAME'])\ndf2.head()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6a2aaa78bfbffa403aa7b63026d519b2c8563b
13,203
ipynb
Jupyter Notebook
n1_cell_based_narval_r2b4/source_code/preprocessing.ipynb
agrundner24/iconml_clc
a9f3547fae15593288066fe1d30631a99e4ccbeb
[ "MIT" ]
null
null
null
n1_cell_based_narval_r2b4/source_code/preprocessing.ipynb
agrundner24/iconml_clc
a9f3547fae15593288066fe1d30631a99e4ccbeb
[ "MIT" ]
null
null
null
n1_cell_based_narval_r2b4/source_code/preprocessing.ipynb
agrundner24/iconml_clc
a9f3547fae15593288066fe1d30631a99e4ccbeb
[ "MIT" ]
null
null
null
29.803612
137
0.527153
[ [ [ "## Preprocessing\n\n<!-- Was used to generate: <br>\n*preprocessed_data/cloud_cover_all_days_input_train_1.npy <br>\npreprocessed_data/cloud_cover_all_days_input_valid_1.npy <br>\npreprocessed_data/cloud_cover_all_days_output_train_1.npy <br>\npreprocessed_data/cloud_cover_all_days_output_valid_1.npy* -->", "_____no_output_____" ] ], [ [ "import sys\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport importlib\n\n# from sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.preprocessing import StandardScaler\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Nadam\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nbase_path = '/pf/b/b309170'\npath = base_path + '/my_work/NARVAL/data_var_vertinterp/'\noutput_path = base_path + '/my_work/icon-ml_data/cloud_cover_parameterization/grid_cell_based_v3/based_on_var_interpolated_data'\nmodel_path = \"/pf/b/b309170/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_v3/saved_models\"\n# Add path with my_classes to sys.path\nsys.path.insert(0, base_path + '/workspace_icon-ml/cloud_cover_parameterization/')\n\nfrom my_classes import write_infofile\nfrom my_classes import load_data\n\nNUM = 1\nnp.random.seed(NUM)", "_____no_output_____" ] ], [ [ "## Reading the data\n### Input:\n- fr_land: Fraction of land\n- zg: Geometric height at full levels\n- qv: Specific water vapor content\n- qi: Specific cloud ice content\n- temp: Temperature\n- pres: Pressure\n\n### Output:\n- clc: Cloud Cover\n\nBe careful with the NARVAL file-naming convention when it comes to timestamps when adding 2D-variables.", "_____no_output_____" ] ], [ [ "# Loads the NARVAL data into the data_dict dictionary\norder_of_vars = ['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land', 'clc']\ndata_dict = load_data(source='narval', days='all', vert_interp=True, order_of_vars=order_of_vars)", "_____no_output_____" ], [ "#Reshaping into nd-arrays of equaling shapes (have timesteps x vert x hor)\ndata_dict['zg'] = np.repeat(np.expand_dims(data_dict['zg'], 0), data_dict['qv'].shape[0], axis=0)\ndata_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 0), data_dict['qv'].shape[0], axis=0)\ndata_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 1), data_dict['qv'].shape[1], axis=1)\n\nassert data_dict['fr_land'].shape == data_dict['qv'].shape == data_dict['zg'].shape", "_____no_output_____" ], [ "data_dict.keys()", "_____no_output_____" ], [ "# Reshaping into 1D-arrays and converting dict into a DataFrame-object (the following is based on Aurelien Geron)\nfor key in ['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land', 'clc']:\n data_dict[key] = np.reshape(data_dict[key], -1) \n\ndf = pd.DataFrame.from_dict(data_dict)\ndf.head()", "_____no_output_____" ] ], [ [ "**Downsampling the data (minority class: clc = 0)**", "_____no_output_____" ] ], [ [ "np.max(df.loc[df['clc']>0])['zg']", "_____no_output_____" ], [ "df = df.loc[df['zg'] < 21000] # There are days with clc > 0 at 20500 meters", "_____no_output_____" ], [ "df_noclc = df.loc[df['clc']==0]\nlen(df_noclc)", "_____no_output_____" ], [ "# We ensure that clc != 0 and clc = 0 have the same size\ndownsample_ratio = (len(df) - len(df_noclc))/len(df_noclc)\nprint(downsample_ratio)\nshuffled_indices = np.random.permutation(len(df_noclc))\nset_size = int(len(df_noclc)*downsample_ratio)\ndownsample_indices = shuffled_indices[:set_size] \ndf = pd.concat([df_noclc.iloc[downsample_indices],df.loc[df['clc']!=0]])", "0.68584831442132\n" ] ], [ [ "**Splitting the data into a learning and a test set**", "_____no_output_____" ] ], [ [ "#Splitting the data into a learning and a test set\n\n#Should we use StratifiedShuffleSplit instead to make sure that the test set is representative of the whole dataset?\n#E.g. define categories of specific water vapor and make sure those categories are present in the test set as well\n#-> Geron, p.69\n\ndef split_train_test(df, test_ratio):\n shuffled_indices = np.random.permutation(len(df))\n test_set_size = int(len(df)*test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return df.iloc[train_indices], df.iloc[test_indices]\n \nlearning_set, test_set = split_train_test(df, 0.2)\nprint(len(learning_set), 'training samples, ', len(test_set), 'test samples')", "29424632 training samples, 7356158 test samples\n" ], [ "scaler = StandardScaler()", "_____no_output_____" ], [ "#Split the training set/learning set into a training set and a validation set and rescale\n\ntrain_set, valid_set = split_train_test(learning_set, 0.1)\nif 'clc' in valid_set.columns:\n output_valid = valid_set['clc']\n del valid_set['clc']\nif 'clc' in train_set.columns:\n output_train = train_set['clc']\n del train_set['clc']\nscaler.fit(train_set)\ninput_train = scaler.transform(train_set)\ninput_valid = scaler.transform(valid_set)", "_____no_output_____" ], [ "# Save and scale the test set as well\nif 'clc' in test_set.columns:\n output_test = test_set['clc']\n del test_set['clc']\ninput_test = scaler.transform(test_set)", "_____no_output_____" ], [ "# Save the data\nnp.save(output_path + '/cloud_cover_all_days_input_train_%d.npy'%NUM, input_train)\nnp.save(output_path + '/cloud_cover_all_days_input_valid_%d.npy'%NUM, input_valid)\nnp.save(output_path + '/cloud_cover_all_days_output_train_%d.npy'%NUM, output_train)\nnp.save(output_path + '/cloud_cover_all_days_output_valid_%d.npy'%NUM, output_valid)\nnp.save(output_path + '/cloud_cover_all_days_input_test_%d.npy'%NUM, input_test)\nnp.save(output_path + '/cloud_cover_all_days_output_test_%d.npy'%NUM, output_test)\nwith open(model_path+'/scaler_%d.txt'%NUM, 'w') as file:\n file.write('Standard Scaler mean values:\\n')\n file.write(str(scaler.mean_))\n file.write('\\nStandard Scaler standard deviation:\\n')\n file.write(str(np.sqrt(scaler.var_)))", "_____no_output_____" ], [ "# Write the accompanying info-file\nwith open(model_path + '/model_grid_cell_based_v3_final_%d.txt'%NUM, 'w') as file:\n write_infofile(file, str(learning_set.columns), str(np.array(np.delete(learning_set.columns, 6))), \n model_path, output_path, NUM)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6a2f483766af4b9c66b4fea124476ba254d7c5
430,371
ipynb
Jupyter Notebook
2021_07_16_04_mstp_bootcamp/R/x86_64-conda-linux-gnu-library/3.6/conos/scanpy_integration.ipynb
g2nb/workshop-notebooks
1e22f9569438dd509f3148959ca5b87d78ea5157
[ "BSD-3-Clause" ]
null
null
null
2021_07_16_04_mstp_bootcamp/R/x86_64-conda-linux-gnu-library/3.6/conos/scanpy_integration.ipynb
g2nb/workshop-notebooks
1e22f9569438dd509f3148959ca5b87d78ea5157
[ "BSD-3-Clause" ]
null
null
null
2021_07_16_04_mstp_bootcamp/R/x86_64-conda-linux-gnu-library/3.6/conos/scanpy_integration.ipynb
g2nb/workshop-notebooks
1e22f9569438dd509f3148959ca5b87d78ea5157
[ "BSD-3-Clause" ]
1
2022-01-12T20:17:50.000Z
2022-01-12T20:17:50.000Z
1,434.57
104,088
0.959221
[ [ [ "import os\n\nimport pandas as pd\nimport scipy as sp\nimport scanpy as sc\nsc.set_figure_params(dpi=120)\n\nDATA_PATH = os.path.expanduser(\"./\")", "//anaconda3/envs/scanpy/lib/python3.6/site-packages/matplotlib/__init__.py:886: MatplotlibDeprecationWarning: \nexamples.directory is deprecated; in the future, examples will be found relative to the 'datapath' directory.\n \"found relative to the 'datapath' directory.\".format(key))\n" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "!ls $DATA_PATH", "count_matrix.mtx graph_distances.mtx raw_count_matrix.mtx\r\nembedding.csv metadata.csv scanpy_integration.R\r\ngenes.csv pca.csv scanpy_integration.ipynb\r\ngraph_connectivities.mtx pseudopca.csv\r\n" ], [ "gene_df = pd.read_csv(DATA_PATH + \"genes.csv\")\n\nmetadata = pd.read_csv(DATA_PATH + \"metadata.csv\")\nmetadata.index = metadata.CellId\ndel metadata[\"CellId\"]\n\nembedding_df = pd.read_csv(DATA_PATH + \"embedding.csv\")\n# Decide between using PCA or pseudo-PCA\npseudopca_df = pd.read_csv(DATA_PATH + \"pseudopca.csv\")\n#pca_df = pd.read_csv(DATA_PATH + \"pca.csv\")\n\ngraph_conn_mtx = sp.io.mmread(DATA_PATH + \"graph_connectivities.mtx\")\ngraph_dist_mtx = sp.io.mmread(DATA_PATH + \"graph_distances.mtx\")", "_____no_output_____" ] ], [ [ "### Create ScanPy object ", "_____no_output_____" ] ], [ [ "# Begin by reading in raw counts\nadata = sc.read_mtx(DATA_PATH + \"raw_count_matrix.mtx\")\nadata", "_____no_output_____" ], [ "adata.var_names = gene_df[\"gene\"].values\nadata.obs_names = metadata.index.values\n\nadata.obs = metadata.copy()\n\n# Depends on which PCA you loaded\nadata.X_pca = pseudopca_df.values\nadata.obsm['X_pca'] = pseudopca_df.values\n\n# Name according to embedding you saved\nadata.X_umap = embedding_df.values\nadata.obsm['X_umap'] = embedding_df.values\n\nadata.uns['neighbors'] = dict(connectivities=graph_conn_mtx.tocsr(), distances=graph_dist_mtx.tocsr())\nadata.uns['neighbors']['params'] = dict(n_pcs=pca_df.shape[1], use_rep='X_pca', metric='cosine', method='umap', n_neighbors=30)\n# Assign raw counts to .raw slot, load in normalised counts\nadata.raw = adata\nadata_temp = sc.read_mtx(DATA_PATH + \"count_matrix.mtx\")\nadata.X = adata_temp.X\n\n# Change into categorical variable (helps with plotting)\nadata.obs['Cluster'] = adata.obs['Cluster'].astype('category')\nadata.obs['Dataset'] = adata.obs['Dataset'].astype('category')", "_____no_output_____" ] ], [ [ "## Analysis", "_____no_output_____" ] ], [ [ "sc.pl.pca(adata, color='Dataset', frameon=True)", "_____no_output_____" ], [ "sc.pl.umap(adata, color='Dataset', frameon=True)", "_____no_output_____" ], [ "sc.pl.umap(adata, color='Cluster', frameon=True)", "_____no_output_____" ], [ "sc.pl.umap(adata, color='CD74', frameon=True, cmap='Reds', use_raw=False)", "//anaconda3/envs/scanpy/lib/python3.6/site-packages/anndata/core/anndata.py:846: FutureWarning: In anndata v0.7+, arrays contained within an AnnData object will maintain their dimensionality. For example, prior to v0.7 `adata[0, 0].X` returned a scalar and `adata[0, :]` returned a 1d array, post v0.7 they will return two dimensional arrays. If you would like to get a one dimensional array from your AnnData object, consider using the `adata.obs_vector`, `adata.var_vector` methods or accessing the array directly.\n warn_flatten()\n" ], [ "sc.tl.paga(adata, groups='Cluster')\nsc.pl.paga(adata, color='Cluster', threshold=0.2, fontsize=20, node_size_scale=1, edge_width_scale=0.2, node_size_power=0.3, layout='fr')", "_____no_output_____" ], [ "sc.tl.draw_graph(adata, init_pos='paga')\nsc.pl.draw_graph(adata, color='Cluster', size=2, legend_fontsize=6, frameon=False, edges=True, title=\"\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6a3dc70912b321adfa7d2d6fc3c60bb190fa81
144,765
ipynb
Jupyter Notebook
notebooks/nb062_pandas_dataframes_1.ipynb
hoelzl/ML-Course
efa7ccb7c6583753675bbcda569d3184d1ca98d2
[ "MIT" ]
1
2022-03-02T15:59:11.000Z
2022-03-02T15:59:11.000Z
notebooks/nb062_pandas_dataframes_1.ipynb
hoelzl/ML-Course
efa7ccb7c6583753675bbcda569d3184d1ca98d2
[ "MIT" ]
null
null
null
notebooks/nb062_pandas_dataframes_1.ipynb
hoelzl/ML-Course
efa7ccb7c6583753675bbcda569d3184d1ca98d2
[ "MIT" ]
null
null
null
29.83615
3,615
0.372362
[ [ [ "<img src=\"img/python-logo-notext.svg\"\n style=\"display:block;margin:auto;width:10%\"/>\n<h1 style=\"text-align:center;\">Python: Pandas Data Frames 1</h1>\n<h2 style=\"text-align:center;\">Coding Akademie München GmbH</h2>\n<br/>\n<div style=\"text-align:center;\">Dr. Matthias Hölzl</div>\n<div style=\"text-align:center;\">Allaithy Raed</div>", "_____no_output_____" ], [ "# Data Frames\n\nData Frames sind die am häufigsten verwendete Datenstruktur von Pandas.\n\nSie ermöglichen das bequeme Einlesen, Verarbeiten und Speichern von Daten.\n\nKonzeptionell besteht ein Data Frame aus mehreren `Series`-Instanzen, die einen gemeinsamen Index haben.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Erzeugen eines Data Frames", "_____no_output_____" ], [ "### Aus einem NumPy Array", "_____no_output_____" ] ], [ [ "def create_data_frame():\n rng = np.random.default_rng(42)\n array = rng.normal(size=(5, 4), scale=5.0)\n index = 'A B C D E'.split()\n columns = 'w x y z'.split()\n return pd.DataFrame(array, index=index, columns=columns)", "_____no_output_____" ], [ "df = create_data_frame()\ndf", "_____no_output_____" ], [ "type(df)", "_____no_output_____" ] ], [ [ "### Aus einer CSV-Datei", "_____no_output_____" ] ], [ [ "df_csv = pd.read_csv(\"example_data.csv\")", "_____no_output_____" ], [ "df_csv", "_____no_output_____" ], [ "df_csv = pd.read_csv(\"example_data.csv\", index_col=0)", "_____no_output_____" ], [ "df_csv", "_____no_output_____" ] ], [ [ "### Aus einer Excel Datei", "_____no_output_____" ] ], [ [ "df_excel = pd.read_excel(\"excel_data.xlsx\", index_col=0)", "_____no_output_____" ], [ "df_excel", "_____no_output_____" ], [ "df_excel2 = pd.read_excel(\"excel_other_sheet.xlsx\", index_col=0)", "_____no_output_____" ], [ "df_excel2", "_____no_output_____" ], [ "df_excel2 = pd.read_excel(\"excel_other_sheet.xlsx\", index_col=0, sheet_name='Another Sheet')", "_____no_output_____" ], [ "df_excel2.head()", "_____no_output_____" ] ], [ [ "### Andere Formate:", "_____no_output_____" ] ], [ [ "pd.read_clipboard\npd.read_html\npd.read_json\npd.read_pickle\npd.read_sql; # Verwendet SQLAlchemy um auf eine Datenbank zuzugreifen\n# usw.", "_____no_output_____" ] ], [ [ "### Indizes und Operationen", "_____no_output_____" ] ], [ [ "df_csv.head()", "_____no_output_____" ], [ "df_csv.tail()", "_____no_output_____" ], [ "df = create_data_frame()\ndf['w']", "_____no_output_____" ], [ "type(df['w'])", "_____no_output_____" ], [ "# Sollte nicht verwendet werden...\ndf.w", "_____no_output_____" ], [ "df[['w', 'y']]", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "df.index.is_monotonic_increasing", "_____no_output_____" ], [ "df.size", "_____no_output_____" ], [ "df.ndim", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "### Erzeugen, Umbenennen und Löschen von Spalten", "_____no_output_____" ] ], [ [ "df = create_data_frame()\ndf['Summe aus w und y'] = df['w'] + df['y']", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.rename(columns={'Summe aus w und y': 'w + y'})", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.rename(columns={'Summe aus w und y': 'w + y'}, index={'E': 'Z'}, inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "type(df['y'])", "_____no_output_____" ], [ "del df['y']", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.drop('A')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.drop('B', inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.drop('z', axis=1)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.drop('z', axis=1, inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "## Auswahl", "_____no_output_____" ] ], [ [ "df = create_data_frame()\ndf", "_____no_output_____" ], [ "df['w']", "_____no_output_____" ], [ "# Fehler\n# df['A']", "_____no_output_____" ], [ "df.loc['B']", "_____no_output_____" ], [ "type(df.loc['B'])", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.iloc[1]", "_____no_output_____" ], [ "df.loc[['A', 'C']]", "_____no_output_____" ], [ "df.loc[['A', 'C'], ['x', 'y']]", "_____no_output_____" ], [ "df.loc['B', 'z']", "_____no_output_____" ], [ "df.iloc[[1, 2], [0, 3]]", "_____no_output_____" ], [ "df.iloc[0, 0]", "_____no_output_____" ] ], [ [ "## Bedingte Auswahl", "_____no_output_____" ] ], [ [ "df = create_data_frame()\ndf", "_____no_output_____" ], [ "df > 0", "_____no_output_____" ], [ "df[df > 0]", "_____no_output_____" ], [ "df['w'] > 0", "_____no_output_____" ], [ "df[df['w'] > 0]", "_____no_output_____" ], [ "df[df['w'] > 0][['x', 'y']]", "_____no_output_____" ], [ "df[(df['w'] > 0) & (df['x'] < 0)]", "_____no_output_____" ] ], [ [ "# Information über Data Frames", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(array, index=index, columns=columns)\ndf['txt'] = 'a b c d e'.split()\ndf.iloc[1, 1] = np.nan\ndf", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 5 entries, A to E\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 w 5 non-null float64\n 1 x 4 non-null float64\n 2 y 5 non-null float64\n 3 z 5 non-null float64\n 4 txt 5 non-null object \ndtypes: float64(4), object(1)\nmemory usage: 240.0+ bytes\n" ], [ "df.dtypes", "_____no_output_____" ] ], [ [ "## Data Frame Index", "_____no_output_____" ] ], [ [ "df = create_data_frame()\ndf['txt'] = 'a b c d e'.split()\ndf", "_____no_output_____" ], [ "df.reset_index()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.reset_index(inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.rename(columns={'index': 'old_index'}, inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.set_index('txt')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.set_index('txt', inplace=True)\ndf", "_____no_output_____" ], [ "df.set_index('old_index', inplace=True)\ndf", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 5 entries, A to E\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 w 5 non-null float64\n 1 x 5 non-null float64\n 2 y 5 non-null float64\n 3 z 5 non-null float64\ndtypes: float64(4)\nmemory usage: 200.0+ bytes\n" ], [ "df.index", "_____no_output_____" ], [ "df.index.name = None", "_____no_output_____" ], [ "df", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6a444f3937342c666dfc2f5e6014a7d389ee46
52,654
ipynb
Jupyter Notebook
OCVED_Applied_v2.ipynb
AlejandroBeltranA/OCVED-ML
d9b6fd05cd2ff402da5760311329d04269b24862
[ "MIT" ]
null
null
null
OCVED_Applied_v2.ipynb
AlejandroBeltranA/OCVED-ML
d9b6fd05cd2ff402da5760311329d04269b24862
[ "MIT" ]
null
null
null
OCVED_Applied_v2.ipynb
AlejandroBeltranA/OCVED-ML
d9b6fd05cd2ff402da5760311329d04269b24862
[ "MIT" ]
null
null
null
37.422886
440
0.401641
[ [ [ "<a href=\"https://colab.research.google.com/github/AlejandroBeltranA/OCVED-ML/blob/master/OCVED_Applied_v2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Classifying remaining articles\n\nThis is the 4th of 4 scripts used in ocved.mx\n\nThis script uses the LR model trained in the first script to classify the universe of articles collected from EMIS. A total of 188,492 are classified using the model from OCVED_GSR_Trained.v2.4\n\n", "_____no_output_____" ] ], [ [ "# Mount Google Drive\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "# Install tqdm\n%cd /content/drive/\n!ls\n!pip install tqdm\n", "/content/drive\n'My Drive'\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (4.41.1)\n" ], [ "# Packages used\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk import pos_tag\n#from nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom collections import defaultdict\nfrom nltk.corpus import wordnet as wn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn import model_selection, naive_bayes, svm, linear_model\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support, precision_score, recall_score, f1_score", "_____no_output_____" ] ], [ [ "We download the Spacy lemmatizer again to reduce words to their lemma for normalization. ", "_____no_output_____" ] ], [ [ "%%capture\n!pip install es-lemmatizer\n!pip install -U spacy\n!sudo python -m spacy download es_core_news_sm\n\nimport re\nimport nltk\nnltk.download('stopwords')", "_____no_output_____" ] ], [ [ "We load in the universe of articles collected from EMIS using the scripts in EMIS_scrape repository. \n\nThese articles are downloaded from subnational news sources, regional newspapers, and other sources not specified as national newspapers. There's a lot of noise in these articles. I leave the training articles in the universe since the model should perform well on the articles it was trained on. \n\nThis csv contains 158,496 articles. The majority of these are noise!", "_____no_output_____" ] ], [ [ " emis = pd.read_csv('My Drive/Data/OCVED/Classifier/universe/EMIS_Universe.csv')\n emis", "_____no_output_____" ] ], [ [ "As detailed in script 1, a seperate process collected articles from national newspapers by having RA's manually download these articles. The manual download process took 5 months, students would read each article and determine if it was relevant to the PI's research. In contrast, the scraping and generating traning data took a total of 3 months, with the added advantage that the model can be used for future data collected. \n\nThis process generated 29,995 articles. ", "_____no_output_____" ] ], [ [ "nat = pd.read_csv('My Drive/Data/OCVED/National/txt_docs/National_OCVED.csv')\nnat", "_____no_output_____" ] ], [ [ "We combine these two datasets, making a full universe of articles on DTO's in Mexico. All articles used in the training steps are also included given the model should perform well classifying these. ", "_____no_output_____" ] ], [ [ "data = []\ndata.append(emis)\ndata.append(nat)\ndf = pd.concat(data, axis=0, ignore_index=True, sort=True).sort_values('file_id', ascending= True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "np.random.seed(1000)", "_____no_output_____" ] ], [ [ "Code for removing accents.", "_____no_output_____" ] ], [ [ "import unicodedata\nimport string\n# BEGIN SHAVE_MARKS_LATIN\ndef shave_marks_latin(txt):\n \"\"\"Remove all diacritic marks from Latin base characters\"\"\"\n norm_txt = unicodedata.normalize('NFD', txt) # <1>\n latin_base = False\n keepers = []\n for c in norm_txt:\n if unicodedata.combining(c) and latin_base: # <2>\n continue # ignore diacritic on Latin base char\n keepers.append(c) # <3>\n # if it isn't combining char, it's a new base char\n if not unicodedata.combining(c): # <4>\n latin_base = c in string.ascii_letters\n shaved = ''.join(keepers)\n return unicodedata.normalize('NFC', shaved) # <5>\n# END SHAVE_MARKS_LATIN\ndef shave_marks(txt):\n \"\"\"Remove all diacritic marks\"\"\"\n norm_txt = unicodedata.normalize('NFD', txt) # <1>\n shaved = ''.join(c for c in norm_txt\n if not unicodedata.combining(c)) # <2>\n return unicodedata.normalize('NFC', shaved) # <3>\n# END SHAVE_MARKS", "_____no_output_____" ] ], [ [ "Let's load the tokenizer and lemmatizer in. ", "_____no_output_____" ] ], [ [ "from es_lemmatizer import lemmatize\nimport es_core_news_sm\n\nnlp = es_core_news_sm.load()\nnlp.add_pipe(lemmatize, after=\"tagger\")", "_____no_output_____" ] ], [ [ "Stopwords removed to reduce noise and reduce the number of not useful features.", "_____no_output_____" ] ], [ [ "from nltk.corpus import stopwords\n\n##Creating a list of stop words and adding custom stopwords\nstop_words = set(stopwords.words(\"spanish\"))\n##Creating a list of custom stopwords\nnew_words = [\"daily\", \"newspaper\", \"reforma\", \"publication\", \"universal\", \"iv\", \"one\", \"two\", \"august\" , \"excelsior\", \"online\",\n \"november\", \"july\", \"september\", \"june\", \"october\", \"december\", \"print\", \"edition\", \"news\", \"milenio\", \"january\", \"international\",\n \"march\", \"april\", \"july\", \"february\", \"may\", \"october\", \"el occidental\", \"comments\", \"powered\", \"display\", \"space\", \n \"javascript\", \"trackpageview\", \"enablelinktracking\", \"location\", \"protocol\", \"weboperations\", \"settrackerurl\", \"left\", \n \"setsiteid\", \"createelement\", \"getelementsbytagname\", \"parentnode\", \"insertbefore\", \"writeposttexto\", \"everykey\", \"passwords\"\n \"writecolumnaderechanotas\", \"anteriorsiguente\", \"anteriorsiguiente\", \"writefooter\", \"align\", \"googletag\", \"writeaddthis\", \"writefooteroem\", \n \"diario delicias\", \"diario tampico\", \"the associated press\", \"redaccion\" , \"national\", \"diario yucatan\", \"mural\", \"periodico\", \n \"new\", \"previously\", \"shown\" , \"a\", \"para\", \"tener\" , \"haber\", \"ser\" , \"mexico city\", \"states\", \"city\", \"and\", \"elsolde\", \"recomendamos\", \n \"diario chihuahua\" , \"diario juarez\" , \"el norte\", \"voz frontera\" , \"regional\" , \"de\" , \"el sol\" , \"el\" , \"sudcaliforniano\" , \"washington\",\n \"union morelos\", \"milenio\" , \"notimex\", \"el financiero\" , \"financiero\" , \"forum magazine\" , \"economista\" , \"gmail\" , \"financial\", \"el\" , \"de\",\n \"la\", \"del\", \"de+el\" , \"a+el\" , \"shortcode\" , \"caption\", \"cfcfcf\", \"float\", \"item\", \"width\", \"follow\", \"aaannnnn\", \"gmannnnn\", \n \"dslnnnnn\", \"jtjnnnnn\", \"lcgnnnnn\", \"jgcnnnnn\", \"vhannnnn\", \"mtc\", \"eleconomista\", \"monitoreoif\", \"infosel\", \"gallery\", \n \"heaven\", \"div\", \"push\" , \"translate\", \"google\"]\nstop_words = stop_words.union(new_words)\nstop_words = shave_marks(repr(stop_words))", "_____no_output_____" ], [ "dataset = df", "_____no_output_____" ] ], [ [ "Process for cleaning out the text and generating the corpus. ", "_____no_output_____" ] ], [ [ "corpus = []\nfor i in dataset.itertuples():\n#for i in tqdm(range(1, 2000)):\n text = shave_marks_latin(i.text)\n #Remove punctuations\n text = re.sub('[^a-zA-Z]', ' ', text)\n #Convert to lowercase\n #text = shave_marks_latin(text)\n #text = text.lower()\n #remove tags\n text=re.sub(\"&lt;/?.*?&gt;\",\" &lt;&gt; \",text)\n # remove special characters and digits\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n text = re.sub(' +', ' ', text)\n #Lemmatisation\n doc = nlp(text)\n text = [token.lemma_ for token in doc if token.lemma_ not in stop_words] \n text = \" \".join(text)\n text = shave_marks(text)\n file_id = i.file_id\n original = i.text\n corpus.append({ 'text': text, 'file_id': file_id , \"original\": original})\nprint (\"done\")", "done\n" ], [ "data = pd.DataFrame(corpus)\ndata", "_____no_output_____" ], [ "gen = data['text']", "_____no_output_____" ], [ "gen", "_____no_output_____" ] ], [ [ "Let's look at how frequent some words are in the universe using Tokenizer. ", "_____no_output_____" ] ], [ [ "from keras.preprocessing.text import Tokenizer\n#Using TensorFlow backend. xtrain_count, train_y, xvalid_count\ntokenizer = Tokenizer(num_words=5000)\ntokenizer.fit_on_texts(gen)\n\nX_gen = tokenizer.texts_to_sequences(gen)\n#X_test = tokenizer.texts_to_sequences(valid_x)\n#\nvocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index\n\nprint(gen.iloc[3])\nprint(X_gen[3])\n\n", "_____no_output_____" ], [ "for word in ['sexual', 'cartel', 'sinaloa', 'violencia']:\n print('{}: {}'.format(word, tokenizer.word_index[word]))", "_____no_output_____" ], [ "from keras.preprocessing.sequence import pad_sequences\n\nmaxlen = 100\n\nX_gen = pad_sequences(X_gen, padding='post') #, maxlen=maxlen\n#X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)\n\nprint(X_gen[2, :])", "_____no_output_____" ] ], [ [ "# Application\n\nNow we load in the encoder, model, and vectorizer from script 1 so we can implement it in the application pipeline. ", "_____no_output_____" ] ], [ [ "import pickle\n\npkl_file = open('/content/drive/My Drive/Data/OCVED/Classifier/algorithm/OCVED_encoder_v2.pkl', 'rb')\nencoder = pickle.load(pkl_file) \npkl_file.close()", "_____no_output_____" ] ], [ [ "We used the LR model because it produced the best F1 score of all models. See Osorio & Beltran (2020) for more information on why. ", "_____no_output_____" ] ], [ [ "from sklearn.externals import joblib\n# save the model to disk\nfilename = '/content/drive/My Drive/Data/OCVED/Classifier/algorithm/logistic_model_v2.sav'\n \n# load the model from disk\nlogit_model = joblib.load(filename)", "_____no_output_____" ] ], [ [ "It's important we use the same trained tfidf from the first script in this process. Otherwise the length and words used will be different across vectors!", "_____no_output_____" ] ], [ [ "# create a count vectorizer object \nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nimport pickle\n\n#count_vect = CountVectorizer(analyzer='word', token_pattern=r'\\w{1,}')\n#count_vect.fit(data['text'])\n\n# transform the training and validation data using count vectorizer object\n#xtrain_count = count_vect.transform(gen)\n#xtrain_count\n\n#pickle.dump(xtrain_count, open(\"/content/drive/My Drive/Data/Bogota/categorized_articles/tfidf.pickle\", \"wb\"))\n\npkl_file = open(\"/content/drive/My Drive/Data/OCVED/Classifier/algorithm/Tfidf_vect_3.pickle\", 'rb')\ntfidf = pickle.load(pkl_file) \npkl_file.close()", "_____no_output_____" ], [ "tfidf", "_____no_output_____" ], [ "gen_2 = tfidf.transform(gen)\ngen_2", "_____no_output_____" ] ], [ [ "Now we finally ask the logit model to generate predictions for each article. It reviews the numeric contents and makes a predictions. Anything that has above a .5 probability of being DTO related is classified as such. ", "_____no_output_____" ] ], [ [ "# make a prediction\ny_label = logit_model.predict(gen_2)\n# show the inputs and predicted outputs\nprint(\"X=%s, Predicted=%s\" % (gen_2[0], y_label[0]))", "_____no_output_____" ], [ "# make a prediction\ny_prob = logit_model.predict_proba(gen_2)[:,1]\n# show the inputs and predicted outputs\ny_prob", "_____no_output_____" ] ], [ [ "Now we want to save the output, first in a csv. ", "_____no_output_____" ] ], [ [ "data['y_label'] = y_label\n\ndata['y_prob'] = y_prob", "_____no_output_____" ], [ "data.to_csv('My Drive/Data/OCVED/Classifier/predictions_v3/logit_OCVED_pred_v3.csv')", "_____no_output_____" ] ], [ [ "Here I save them as .txt files for use in Eventus ID. ", "_____no_output_____" ] ], [ [ "data = data[data.y_label == 1 ]\ndata", "_____no_output_____" ], [ "for i in tqdm(dataset.itertuples()):\n text = shave_marks_latin(i.text)\n #Remove punctuations\n #text = re.sub('[^a-zA-Z]', ' ', text)\n #Convert to lowercase\n #remove tags\n #text=re.sub(\"&lt;/?.*?&gt;\",\" &lt;&gt; \",text)\n # remove special characters and digits\n #text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n #text = re.sub(' +', ' ', text)\n file_id = i.file_id\n original = i.original\n dirty = 'My Drive/Data/OCVED/Classifier/predictions_v3/dirty/'\n clean = 'My Drive/Data/OCVED/Classifier/predictions_v3/clean/'\n dirty_file = dirty + file_id\n clean_file = clean + file_id\n with open(dirty_file, 'w') as f:\n f.write(original)\n with open(clean_file, 'w') as c:\n c.write(text)\n", "_____no_output_____" ], [ "print (\"script has completed\")", "_____no_output_____" ] ], [ [ "This takes a long time so I have it print out the time it finished. ", "_____no_output_____" ] ], [ [ "!rm /etc/localtime\n!ln -s /usr/share/zoneinfo/America/Phoenix /etc/localtime\n!date", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6a4eac12cae74ac56c6ddae24abde09a36fa6e
269,564
ipynb
Jupyter Notebook
notebooks/read_OLCI.ipynb
dzelge/xcube
1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd
[ "MIT" ]
null
null
null
notebooks/read_OLCI.ipynb
dzelge/xcube
1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd
[ "MIT" ]
3
2019-05-13T14:11:07.000Z
2019-06-14T09:48:31.000Z
notebooks/read_OLCI.ipynb
CyanoAlert/xcube
b89e7a9adcb348b0f5fc86c38704fb93980b7def
[ "MIT" ]
null
null
null
477.104425
207,569
0.821538
[ [ [ "import xarray as xr\nimport gdal as gdl", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom mpl_toolkits.basemap import Basemap", "_____no_output_____" ], [ "ds = gdl.Open('/Users/gunnar/src/data/dcs4cop/07/01/O_L2_0021_GCWNS_2017182100224_v1.0.nc')", "_____no_output_____" ], [ "print('File list:', ds.GetFileList())\nprint('Width:', ds.RasterXSize)\nprint('Height:', ds.RasterYSize)\nprint('Coordinate system:', ds.GetProjection())\ngt = ds.GetGeoTransform() # captures origin and pixel size\nprint('Origin:', (gt[0], gt[3]))\nprint('Pixel size:', (gt[1], gt[5]))\nprint('Upper Left Corner:', gdl.ApplyGeoTransform(gt,0,0))\nprint('Upper Right Corner:', gdl.ApplyGeoTransform(gt,ds.RasterXSize,0))\nprint('Lower Left Corner:', gdl.ApplyGeoTransform(gt,0,ds.RasterYSize))\nprint('Lower Right Corner:',\ngdl.ApplyGeoTransform(gt,ds.RasterXSize,ds.RasterYSize))\nprint('Center:', gdl.ApplyGeoTransform(gt,ds.RasterXSize/2,ds.RasterYSize/2))\n#print('Metadata:', ds.GetMetadata())\nprint('Image Structure Metadata:', ds.GetMetadata('IMAGE_STRUCTURE'))\nprint('Number of bands:', ds.RasterCount)\nfor i in range(1, ds.RasterCount+1):\n band = ds.GetRasterBand(i) # in GDAL, band are indexed starting at 1!\n interp = band.GetColorInterpretation()\n interp_name = gdal.GetColorInterpretationName(interp)\n (w,h)=band.GetBlockSize()\n print('Band %d, block size %dx%d, color interp %s' % (i,w,h,interp_name))\n ovr_count = band.GetOverviewCount()\n for j in range(ovr_count):\n ovr_band = band.GetOverview(j) # but overview bands starting at 0\n print(' Overview %d: %dx%d'%(j, ovr_band.XSize, ovr_band.YSize))", "File list: ['/Users/gunnar/src/data/dcs4cop/07/01/O_L2_0021_GCWNS_2017182100224_v1.0.nc']\nWidth: 512\nHeight: 512\nCoordinate system: \nOrigin: (0.0, 0.0)\nPixel size: (1.0, 1.0)\nUpper Left Corner: [0.0, 0.0]\nUpper Right Corner: [512.0, 0.0]\nLower Left Corner: [0.0, 512.0]\nLower Right Corner: [512.0, 512.0]\nCenter: [256.0, 256.0]\nImage Structure Metadata: {}\nNumber of bands: 0\n" ], [ "gdl.Info(ds)", "_____no_output_____" ], [ "res = ds.GetRasterBand('TP_latitude')", "_____no_output_____" ], [ "res", "_____no_output_____" ], [ "dat = xr.open_dataset('../07/01/O_L2_0021_GCWNS_2017182100224_v1.0.nc')", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "dat.lat.plot.imshow()", "_____no_output_____" ], [ "dat.rtoa_12.plot()", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "res = dat.chl_lov_bourgneuf.data\nma_res = np.ma.array(res, mask =np.isnan(res))\nvmin = None\nvmax = None", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "fig = plt.figure()\n#ax = fig.add_axes([0.05,0.05,0.9,0.9])\n#m = Basemap(projection='kav7',lon_0=54.,resolution='c')\nm = Basemap(projection='merc',llcrnrlat=-45,urcrnrlat=65,\\\n llcrnrlon=-10,urcrnrlon=10,lat_ts=2,resolution='c')\n\nx,y = m(dat.lon.data,dat.lat.data)\nm.drawcoastlines()\n#m.drawmapboundary(fill_color='0.3')\nccmap = plt.cm.jet\n#ccmap.set_bad(\"gray\",1.)\nim = m.pcolor(y,x,res,cmap=ccmap)", "_____no_output_____" ], [ "plt.imshow(res)", "_____no_output_____" ], [ "x,y = m(dat.lon.data,dat.lat.data)", "_____no_output_____" ], [ "x.", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6a62c259519f8123bb07d5001cd1b276236654
531,816
ipynb
Jupyter Notebook
Projects/DNN-Speech-Recognizer/vui_notebook.ipynb
stalebi/NLP
26c212694715cb633da9070aa805893c0b76fff6
[ "MIT" ]
null
null
null
Projects/DNN-Speech-Recognizer/vui_notebook.ipynb
stalebi/NLP
26c212694715cb633da9070aa805893c0b76fff6
[ "MIT" ]
null
null
null
Projects/DNN-Speech-Recognizer/vui_notebook.ipynb
stalebi/NLP
26c212694715cb633da9070aa805893c0b76fff6
[ "MIT" ]
null
null
null
345.784135
167,680
0.863709
[ [ [ "# Artificial Intelligence Nanodegree\n\n## Voice User Interfaces\n\n## Project: Speech Recognition with Neural Networks\n\n---\n\nIn this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n\nThe rubric contains _optional_ \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. If you decide to pursue the \"Stand Out Suggestions\", you should include the code in this Jupyter notebook.\n\n---\n\n## Introduction \n\nIn this notebook, you will build a deep neural network that functions as part of an end-to-end automatic speech recognition (ASR) pipeline! Your completed pipeline will accept raw audio as input and return a predicted transcription of the spoken language. The full pipeline is summarized in the figure below.\n\n<img src=\"images/pipeline.png\">\n\n- **STEP 1** is a pre-processing step that converts raw audio to one of two feature representations that are commonly used for ASR. \n- **STEP 2** is an acoustic model which accepts audio features as input and returns a probability distribution over all potential transcriptions. After learning about the basic types of neural networks that are often used for acoustic modeling, you will engage in your own investigations, to design your own acoustic model!\n- **STEP 3** in the pipeline takes the output from the acoustic model and returns a predicted transcription. \n\nFeel free to use the links below to navigate the notebook:\n- [The Data](#thedata)\n- [**STEP 1**](#step1): Acoustic Features for Speech Recognition\n- [**STEP 2**](#step2): Deep Neural Networks for Acoustic Modeling\n - [Model 0](#model0): RNN\n - [Model 1](#model1): RNN + TimeDistributed Dense\n - [Model 2](#model2): CNN + RNN + TimeDistributed Dense\n - [Model 3](#model3): Deeper RNN + TimeDistributed Dense\n - [Model 4](#model4): Bidirectional RNN + TimeDistributed Dense\n - [Models 5+](#model5)\n - [Compare the Models](#compare)\n - [Final Model](#final)\n- [**STEP 3**](#step3): Obtain Predictions\n\n<a id='thedata'></a>\n## The Data\n\nWe begin by investigating the dataset that will be used to train and evaluate your pipeline. [LibriSpeech](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a large corpus of English-read speech, designed for training and evaluating models for ASR. The dataset contains 1000 hours of speech derived from audiobooks. We will work with a small subset in this project, since larger-scale data would take a long while to train. However, after completing this project, if you are interested in exploring further, you are encouraged to work with more of the data that is provided [online](http://www.openslr.org/12/).\n\nIn the code cells below, you will use the `vis_train_features` module to visualize a training example. The supplied argument `index=0` tells the module to extract the first example in the training set. (You are welcome to change `index=0` to point to a different training example, if you like, but please **DO NOT** amend any other code in the cell.) The returned variables are:\n- `vis_text` - transcribed text (label) for the training example.\n- `vis_raw_audio` - raw audio waveform for the training example.\n- `vis_mfcc_feature` - mel-frequency cepstral coefficients (MFCCs) for the training example.\n- `vis_spectrogram_feature` - spectrogram for the training example. \n- `vis_audio_path` - the file path to the training example.", "_____no_output_____" ] ], [ [ "from data_generator import vis_train_features\n\n# extract label and audio features for a single training example\nvis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features()", "There are 2023 total training examples.\n" ] ], [ [ "The following code cell visualizes the audio waveform for your chosen example, along with the corresponding transcript. You also have the option to play the audio in the notebook!", "_____no_output_____" ] ], [ [ "from IPython.display import Markdown, display\nfrom data_generator import vis_train_features, plot_raw_audio\nfrom IPython.display import Audio\n%matplotlib inline\n\n# plot audio signal\nplot_raw_audio(vis_raw_audio)\n# print length of audio signal\ndisplay(Markdown('**Shape of Audio Signal** : ' + str(vis_raw_audio.shape)))\n# print transcript corresponding to audio clip\ndisplay(Markdown('**Transcript** : ' + str(vis_text)))\n# play the audio file\nAudio(vis_audio_path)", "_____no_output_____" ] ], [ [ "<a id='step1'></a>\n## STEP 1: Acoustic Features for Speech Recognition\n\nFor this project, you won't use the raw audio waveform as input to your model. Instead, we provide code that first performs a pre-processing step to convert the raw audio to a feature representation that has historically proven successful for ASR models. Your acoustic model will accept the feature representation as input.\n\nIn this project, you will explore two possible feature representations. _After completing the project_, if you'd like to read more about deep learning architectures that can accept raw audio input, you are encouraged to explore this [research paper](https://pdfs.semanticscholar.org/a566/cd4a8623d661a4931814d9dffc72ecbf63c4.pdf).\n\n### Spectrograms\n\nThe first option for an audio feature representation is the [spectrogram](https://www.youtube.com/watch?v=_FatxGN3vAM). In order to complete this project, you will **not** need to dig deeply into the details of how a spectrogram is calculated; but, if you are curious, the code for calculating the spectrogram was borrowed from [this repository](https://github.com/baidu-research/ba-dls-deepspeech). The implementation appears in the `utils.py` file in your repository.\n\nThe code that we give you returns the spectrogram as a 2D tensor, where the first (_vertical_) dimension indexes time, and the second (_horizontal_) dimension indexes frequency. To speed the convergence of your algorithm, we have also normalized the spectrogram. (You can see this quickly in the visualization below by noting that the mean value hovers around zero, and most entries in the tensor assume values close to zero.)", "_____no_output_____" ] ], [ [ "from data_generator import plot_spectrogram_feature\n\n# plot normalized spectrogram\nplot_spectrogram_feature(vis_spectrogram_feature)\n# print shape of spectrogram\ndisplay(Markdown('**Shape of Spectrogram** : ' + str(vis_spectrogram_feature.shape)))", "_____no_output_____" ] ], [ [ "### Mel-Frequency Cepstral Coefficients (MFCCs)\n\nThe second option for an audio feature representation is [MFCCs](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum). You do **not** need to dig deeply into the details of how MFCCs are calculated, but if you would like more information, you are welcome to peruse the [documentation](https://github.com/jameslyons/python_speech_features) of the `python_speech_features` Python package. Just as with the spectrogram features, the MFCCs are normalized in the supplied code.\n\nThe main idea behind MFCC features is the same as spectrogram features: at each time window, the MFCC feature yields a feature vector that characterizes the sound within the window. Note that the MFCC feature is much lower-dimensional than the spectrogram feature, which could help an acoustic model to avoid overfitting to the training dataset. ", "_____no_output_____" ] ], [ [ "from data_generator import plot_mfcc_feature\n\n# plot normalized MFCC\nplot_mfcc_feature(vis_mfcc_feature)\n# print shape of MFCC\ndisplay(Markdown('**Shape of MFCC** : ' + str(vis_mfcc_feature.shape)))", "_____no_output_____" ] ], [ [ "When you construct your pipeline, you will be able to choose to use either spectrogram or MFCC features. If you would like to see different implementations that make use of MFCCs and/or spectrograms, please check out the links below:\n- This [repository](https://github.com/baidu-research/ba-dls-deepspeech) uses spectrograms.\n- This [repository](https://github.com/mozilla/DeepSpeech) uses MFCCs.\n- This [repository](https://github.com/buriburisuri/speech-to-text-wavenet) also uses MFCCs.\n- This [repository](https://github.com/pannous/tensorflow-speech-recognition/blob/master/speech_data.py) experiments with raw audio, spectrograms, and MFCCs as features.", "_____no_output_____" ], [ "<a id='step2'></a>\n## STEP 2: Deep Neural Networks for Acoustic Modeling\n\nIn this section, you will experiment with various neural network architectures for acoustic modeling. \n\nYou will begin by training five relatively simple architectures. **Model 0** is provided for you. You will write code to implement **Models 1**, **2**, **3**, and **4**. If you would like to experiment further, you are welcome to create and train more models under the **Models 5+** heading. \n\nAll models will be specified in the `sample_models.py` file. After importing the `sample_models` module, you will train your architectures in the notebook.\n\nAfter experimenting with the five simple architectures, you will have the opportunity to compare their performance. Based on your findings, you will construct a deeper architecture that is designed to outperform all of the shallow models.\n\nFor your convenience, we have designed the notebook so that each model can be specified and trained on separate occasions. That is, say you decide to take a break from the notebook after training **Model 1**. Then, you need not re-execute all prior code cells in the notebook before training **Model 2**. You need only re-execute the code cell below, that is marked with **`RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK`**, before transitioning to the code cells corresponding to **Model 2**.", "_____no_output_____" ] ], [ [ "#####################################################################\n# RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK #\n#####################################################################\n\n# allocate 50% of GPU memory (if you like, feel free to change this)\nfrom keras.backend.tensorflow_backend import set_session\nimport tensorflow as tf \nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\nset_session(tf.Session(config=config))\n\n# watch for any changes in the sample_models module, and reload it automatically\n%load_ext autoreload\n%autoreload 2\n# import NN architectures for speech recognition\nfrom sample_models import *\n# import function for training acoustic model\nfrom train_utils import train_model", "Using TensorFlow backend.\n" ] ], [ [ "<a id='model0'></a>\n### Model 0: RNN\n\nGiven their effectiveness in modeling sequential data, the first acoustic model you will use is an RNN. As shown in the figure below, the RNN we supply to you will take the time sequence of audio features as input.\n\n<img src=\"images/simple_rnn.png\" width=\"50%\">\n\nAt each time step, the speaker pronounces one of 28 possible characters, including each of the 26 letters in the English alphabet, along with a space character (\" \"), and an apostrophe (').\n\nThe output of the RNN at each time step is a vector of probabilities with 29 entries, where the $i$-th entry encodes the probability that the $i$-th character is spoken in the time sequence. (The extra 29th character is an empty \"character\" used to pad training examples within batches containing uneven lengths.) If you would like to peek under the hood at how characters are mapped to indices in the probability vector, look at the `char_map.py` file in the repository. The figure below shows an equivalent, rolled depiction of the RNN that shows the output layer in greater detail. \n\n<img src=\"images/simple_rnn_unrolled.png\" width=\"60%\">\n\nThe model has already been specified for you in Keras. To import it, you need only run the code cell below. ", "_____no_output_____" ] ], [ [ "model_0 = simple_rnn_model(input_dim=161) # change to 13 if you would like to use MFCC features", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nrnn (GRU) (None, None, 29) 16617 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 16,617\nTrainable params: 16,617\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "As explored in the lesson, you will train the acoustic model with the [CTC loss](http://www.cs.toronto.edu/~graves/icml_2006.pdf) criterion. Custom loss functions take a bit of hacking in Keras, and so we have implemented the CTC loss function for you, so that you can focus on trying out as many deep learning architectures as possible :). If you'd like to peek at the implementation details, look at the `add_ctc_loss` function within the `train_utils.py` file in the repository.\n\nTo train your architecture, you will use the `train_model` function within the `train_utils` module; it has already been imported in one of the above code cells. The `train_model` function takes three **required** arguments:\n- `input_to_softmax` - a Keras model instance.\n- `pickle_path` - the name of the pickle file where the loss history will be saved.\n- `save_model_path` - the name of the HDF5 file where the model will be saved.\n\nIf we have already supplied values for `input_to_softmax`, `pickle_path`, and `save_model_path`, please **DO NOT** modify these values. \n\nThere are several **optional** arguments that allow you to have more control over the training process. You are welcome to, but not required to, supply your own values for these arguments.\n- `minibatch_size` - the size of the minibatches that are generated while training the model (default: `20`).\n- `spectrogram` - Boolean value dictating whether spectrogram (`True`) or MFCC (`False`) features are used for training (default: `True`).\n- `mfcc_dim` - the size of the feature dimension to use when generating MFCC features (default: `13`).\n- `optimizer` - the Keras optimizer used to train the model (default: `SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)`). \n- `epochs` - the number of epochs to use to train the model (default: `20`). If you choose to modify this parameter, make sure that it is *at least* 20.\n- `verbose` - controls the verbosity of the training output in the `model.fit_generator` method (default: `1`).\n- `sort_by_duration` - Boolean value dictating whether the training and validation sets are sorted by (increasing) duration before the start of the first epoch (default: `False`).\n\nThe `train_model` function defaults to using spectrogram features; if you choose to use these features, note that the acoustic model in `simple_rnn_model` should have `input_dim=161`. Otherwise, if you choose to use MFCC features, the acoustic model should have `input_dim=13`.\n\nWe have chosen to use `GRU` units in the supplied RNN. If you would like to experiment with `LSTM` or `SimpleRNN` cells, feel free to do so here. If you change the `GRU` units to `SimpleRNN` cells in `simple_rnn_model`, you may notice that the loss quickly becomes undefined (`nan`) - you are strongly encouraged to check this for yourself! This is due to the [exploding gradients problem](http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/). We have already implemented [gradient clipping](https://arxiv.org/pdf/1211.5063.pdf) in your optimizer to help you avoid this issue.\n\n__IMPORTANT NOTE:__ If you notice that your gradient has exploded in any of the models below, feel free to explore more with gradient clipping (the `clipnorm` argument in your optimizer) or swap out any `SimpleRNN` cells for `LSTM` or `GRU` cells. You can also try restarting the kernel to restart the training process.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_0, \n pickle_path='model_0.pickle', \n save_model_path='model_0.h5',\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 201s 2s/step - loss: 863.4197 - val_loss: 758.4004\nEpoch 2/20\n101/101 [==============================] - 206s 2s/step - loss: 779.2375 - val_loss: 754.1321\nEpoch 3/20\n101/101 [==============================] - 207s 2s/step - loss: 778.5367 - val_loss: 763.9194\nEpoch 4/20\n101/101 [==============================] - 207s 2s/step - loss: 778.1956 - val_loss: 755.1016\nEpoch 5/20\n101/101 [==============================] - 207s 2s/step - loss: 777.9224 - val_loss: 755.0797\nEpoch 6/20\n101/101 [==============================] - 205s 2s/step - loss: 778.2159 - val_loss: 764.5734\nEpoch 7/20\n101/101 [==============================] - 203s 2s/step - loss: 778.1061 - val_loss: 750.3344\nEpoch 8/20\n101/101 [==============================] - 204s 2s/step - loss: 777.6708 - val_loss: 758.4562\nEpoch 9/20\n101/101 [==============================] - 204s 2s/step - loss: 778.2762 - val_loss: 757.3553\nEpoch 10/20\n101/101 [==============================] - 203s 2s/step - loss: 777.8207 - val_loss: 754.0588\nEpoch 11/20\n101/101 [==============================] - 205s 2s/step - loss: 777.9359 - val_loss: 758.6773\nEpoch 12/20\n101/101 [==============================] - 205s 2s/step - loss: 778.0664 - val_loss: 765.6966\nEpoch 13/20\n101/101 [==============================] - 204s 2s/step - loss: 778.1385 - val_loss: 753.6465\nEpoch 14/20\n101/101 [==============================] - 204s 2s/step - loss: 777.5685 - val_loss: 751.2699\nEpoch 15/20\n101/101 [==============================] - 203s 2s/step - loss: 778.1518 - val_loss: 757.7434\nEpoch 16/20\n101/101 [==============================] - 202s 2s/step - loss: 777.6515 - val_loss: 750.4529\nEpoch 17/20\n101/101 [==============================] - 202s 2s/step - loss: 777.7967 - val_loss: 756.9623\nEpoch 18/20\n101/101 [==============================] - 204s 2s/step - loss: 778.1606 - val_loss: 760.4542\nEpoch 19/20\n101/101 [==============================] - 204s 2s/step - loss: 778.3957 - val_loss: 758.0700\nEpoch 20/20\n101/101 [==============================] - 204s 2s/step - loss: 778.5125 - val_loss: 758.8711\n" ] ], [ [ "<a id='model1'></a>\n### (IMPLEMENTATION) Model 1: RNN + TimeDistributed Dense\n\nRead about the [TimeDistributed](https://keras.io/layers/wrappers/) wrapper and the [BatchNormalization](https://keras.io/layers/normalization/) layer in the Keras documentation. For your next architecture, you will add [batch normalization](https://arxiv.org/pdf/1510.01378.pdf) to the recurrent layer to reduce training times. The `TimeDistributed` layer will be used to find more complex patterns in the dataset. The unrolled snapshot of the architecture is depicted below.\n\n<img src=\"images/rnn_model.png\" width=\"60%\">\n\nThe next figure shows an equivalent, rolled depiction of the RNN that shows the (`TimeDistrbuted`) dense and output layers in greater detail. \n\n<img src=\"images/rnn_model_unrolled.png\" width=\"60%\">\n\nUse your research to complete the `rnn_model` function within the `sample_models.py` file. The function should specify an architecture that satisfies the following requirements:\n- The first layer of the neural network should be an RNN (`SimpleRNN`, `LSTM`, or `GRU`) that takes the time sequence of audio features as input. We have added `GRU` units for you, but feel free to change `GRU` to `SimpleRNN` or `LSTM`, if you like!\n- Whereas the architecture in `simple_rnn_model` treated the RNN output as the final layer of the model, you will use the output of your RNN as a hidden layer. Use `TimeDistributed` to apply a `Dense` layer to each of the time steps in the RNN output. Ensure that each `Dense` layer has `output_dim` units.\n\nUse the code cell below to load your model into the `model_1` variable. Use a value for `input_dim` that matches your chosen audio features, and feel free to change the values for `units` and `activation` to tweak the behavior of your recurrent layer.", "_____no_output_____" ] ], [ [ "model_1 = rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n units=200,\n activation='relu')", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nrnn (GRU) (None, None, 200) 217200 \n_________________________________________________________________\nbn_rnn (BatchNormalization) (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_1 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 223,829\nTrainable params: 223,429\nNon-trainable params: 400\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_1.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_1.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_1, \n pickle_path='model_1.pickle', \n save_model_path='model_1.h5',\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 202s 2s/step - loss: 320.7306 - val_loss: 257.0365\nEpoch 2/20\n101/101 [==============================] - 204s 2s/step - loss: 212.1180 - val_loss: 200.5045\nEpoch 3/20\n101/101 [==============================] - 205s 2s/step - loss: 187.9231 - val_loss: 178.6631\nEpoch 4/20\n101/101 [==============================] - 206s 2s/step - loss: 171.8086 - val_loss: 170.6553\nEpoch 5/20\n101/101 [==============================] - 207s 2s/step - loss: 160.8722 - val_loss: 159.8379\nEpoch 6/20\n101/101 [==============================] - 207s 2s/step - loss: 153.3083 - val_loss: 157.5459\nEpoch 7/20\n101/101 [==============================] - 204s 2s/step - loss: 148.1035 - val_loss: 152.8002\nEpoch 8/20\n101/101 [==============================] - 203s 2s/step - loss: 143.5319 - val_loss: 152.1107\nEpoch 9/20\n101/101 [==============================] - 206s 2s/step - loss: 139.7687 - val_loss: 148.1985\nEpoch 10/20\n101/101 [==============================] - 207s 2s/step - loss: 136.6366 - val_loss: 146.1626\nEpoch 11/20\n101/101 [==============================] - 205s 2s/step - loss: 133.7788 - val_loss: 146.8093\nEpoch 12/20\n101/101 [==============================] - 206s 2s/step - loss: 131.8096 - val_loss: 147.7021\nEpoch 13/20\n101/101 [==============================] - 203s 2s/step - loss: 129.1586 - val_loss: 141.7081\nEpoch 14/20\n101/101 [==============================] - 203s 2s/step - loss: 128.4949 - val_loss: 147.1700\nEpoch 15/20\n101/101 [==============================] - 203s 2s/step - loss: 130.3222 - val_loss: 143.5201\nEpoch 16/20\n101/101 [==============================] - 207s 2s/step - loss: 127.9923 - val_loss: 145.4370\nEpoch 17/20\n101/101 [==============================] - 203s 2s/step - loss: 128.3426 - val_loss: 145.6495\nEpoch 18/20\n101/101 [==============================] - 204s 2s/step - loss: 128.1332 - val_loss: 145.6177\nEpoch 19/20\n101/101 [==============================] - 206s 2s/step - loss: 129.7786 - val_loss: 142.2641\nEpoch 20/20\n101/101 [==============================] - 205s 2s/step - loss: 129.2931 - val_loss: 142.8207\n" ] ], [ [ "<a id='model2'></a>\n### (IMPLEMENTATION) Model 2: CNN + RNN + TimeDistributed Dense\n\nThe architecture in `cnn_rnn_model` adds an additional level of complexity, by introducing a [1D convolution layer](https://keras.io/layers/convolutional/#conv1d). \n\n<img src=\"images/cnn_rnn_model.png\" width=\"100%\">\n\nThis layer incorporates many arguments that can be (optionally) tuned when calling the `cnn_rnn_model` module. We provide sample starting parameters, which you might find useful if you choose to use spectrogram audio features. \n\nIf you instead want to use MFCC features, these arguments will have to be tuned. Note that the current architecture only supports values of `'same'` or `'valid'` for the `conv_border_mode` argument.\n\nWhen tuning the parameters, be careful not to choose settings that make the convolutional layer overly small. If the temporal length of the CNN layer is shorter than the length of the transcribed text label, your code will throw an error.\n\nBefore running the code cell below, you must modify the `cnn_rnn_model` function in `sample_models.py`. Please add batch normalization to the recurrent layer, and provide the same `TimeDistributed` layer as before.", "_____no_output_____" ] ], [ [ "model_2 = cnn_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n filters=200,\n kernel_size=11, \n conv_stride=2,\n conv_border_mode='valid',\n units=200)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nconv1d (Conv1D) (None, None, 200) 354400 \n_________________________________________________________________\nbn_conv_1d (BatchNormalizati (None, None, 200) 800 \n_________________________________________________________________\nrnn (SimpleRNN) (None, None, 200) 80200 \n_________________________________________________________________\nbn_rnn (BatchNormalization) (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_2 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 442,029\nTrainable params: 441,229\nNon-trainable params: 800\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_2.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_2.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_2, \n pickle_path='model_2.pickle', \n save_model_path='model_2.h5', \n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 55s 543ms/step - loss: 235.1776 - val_loss: 208.8868\nEpoch 2/20\n101/101 [==============================] - 52s 516ms/step - loss: 174.1126 - val_loss: 165.1317\nEpoch 3/20\n101/101 [==============================] - 51s 508ms/step - loss: 153.9230 - val_loss: 150.5055\nEpoch 4/20\n101/101 [==============================] - 52s 511ms/step - loss: 142.0084 - val_loss: 146.9465\nEpoch 5/20\n101/101 [==============================] - 51s 509ms/step - loss: 134.2025 - val_loss: 142.6079\nEpoch 6/20\n101/101 [==============================] - 51s 506ms/step - loss: 127.7370 - val_loss: 141.7532\nEpoch 7/20\n101/101 [==============================] - 52s 511ms/step - loss: 122.9743 - val_loss: 138.8485\nEpoch 8/20\n101/101 [==============================] - 52s 510ms/step - loss: 118.3054 - val_loss: 135.9816\nEpoch 9/20\n101/101 [==============================] - 52s 511ms/step - loss: 114.2164 - val_loss: 138.5036\nEpoch 10/20\n101/101 [==============================] - 51s 508ms/step - loss: 110.5076 - val_loss: 136.7757\nEpoch 11/20\n101/101 [==============================] - 52s 512ms/step - loss: 106.7345 - val_loss: 136.1353\nEpoch 12/20\n101/101 [==============================] - 51s 507ms/step - loss: 103.6651 - val_loss: 138.6468\nEpoch 13/20\n101/101 [==============================] - 51s 506ms/step - loss: 100.6714 - val_loss: 141.2400\nEpoch 14/20\n101/101 [==============================] - 51s 501ms/step - loss: 97.4967 - val_loss: 138.8228\nEpoch 15/20\n101/101 [==============================] - 52s 511ms/step - loss: 94.7186 - val_loss: 139.3867\nEpoch 16/20\n101/101 [==============================] - 52s 511ms/step - loss: 91.6943 - val_loss: 141.7643\nEpoch 17/20\n101/101 [==============================] - 51s 509ms/step - loss: 89.2094 - val_loss: 141.5147\nEpoch 18/20\n101/101 [==============================] - 51s 506ms/step - loss: 86.4419 - val_loss: 143.5464\nEpoch 19/20\n101/101 [==============================] - 52s 512ms/step - loss: 83.9027 - val_loss: 146.9398\nEpoch 20/20\n101/101 [==============================] - 51s 509ms/step - loss: 81.5493 - val_loss: 148.1152\n" ] ], [ [ "<a id='model3'></a>\n### (IMPLEMENTATION) Model 3: Deeper RNN + TimeDistributed Dense\n\nReview the code in `rnn_model`, which makes use of a single recurrent layer. Now, specify an architecture in `deep_rnn_model` that utilizes a variable number `recur_layers` of recurrent layers. The figure below shows the architecture that should be returned if `recur_layers=2`. In the figure, the output sequence of the first recurrent layer is used as input for the next recurrent layer.\n\n<img src=\"images/deep_rnn_model.png\" width=\"80%\">\n\nFeel free to change the supplied values of `units` to whatever you think performs best. You can change the value of `recur_layers`, as long as your final value is greater than 1. (As a quick check that you have implemented the additional functionality in `deep_rnn_model` correctly, make sure that the architecture that you specify here is identical to `rnn_model` if `recur_layers=1`.)", "_____no_output_____" ] ], [ [ "model_3 = deep_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n units=200,\n recur_layers=2) ", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nrnn0 (GRU) (None, None, 200) 217200 \n_________________________________________________________________\nbn_rnn0 (BatchNormalization) (None, None, 200) 800 \n_________________________________________________________________\nrnn1 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn1 (BatchNormalization) (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_1 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 465,229\nTrainable params: 464,429\nNon-trainable params: 800\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_3.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_3.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_3, \n pickle_path='model_3.pickle', \n save_model_path='model_3.h5', \n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 355s 4s/step - loss: 291.6895 - val_loss: 234.1990\nEpoch 2/20\n101/101 [==============================] - 364s 4s/step - loss: 235.8854 - val_loss: 225.5699\nEpoch 3/20\n101/101 [==============================] - 365s 4s/step - loss: 227.7585 - val_loss: 214.2314\nEpoch 4/20\n101/101 [==============================] - 364s 4s/step - loss: 215.1120 - val_loss: 203.0433\nEpoch 5/20\n101/101 [==============================] - 359s 4s/step - loss: 198.1648 - val_loss: 180.7732\nEpoch 6/20\n101/101 [==============================] - 361s 4s/step - loss: 175.0434 - val_loss: 169.0384\nEpoch 7/20\n101/101 [==============================] - 362s 4s/step - loss: 160.9066 - val_loss: 162.5481\nEpoch 8/20\n101/101 [==============================] - 363s 4s/step - loss: 151.3562 - val_loss: 156.2483\nEpoch 9/20\n101/101 [==============================] - 362s 4s/step - loss: 144.8936 - val_loss: 145.2730\nEpoch 10/20\n101/101 [==============================] - 363s 4s/step - loss: 137.6546 - val_loss: 147.4781\nEpoch 11/20\n101/101 [==============================] - 364s 4s/step - loss: 132.5407 - val_loss: 141.1147\nEpoch 12/20\n101/101 [==============================] - 360s 4s/step - loss: 127.6175 - val_loss: 136.5643\nEpoch 13/20\n101/101 [==============================] - 364s 4s/step - loss: 122.9004 - val_loss: 139.3857\nEpoch 14/20\n101/101 [==============================] - 362s 4s/step - loss: 118.9409 - val_loss: 132.5889\nEpoch 15/20\n101/101 [==============================] - 364s 4s/step - loss: 115.5481 - val_loss: 134.0299\nEpoch 16/20\n101/101 [==============================] - 364s 4s/step - loss: 112.3487 - val_loss: 131.1075\nEpoch 17/20\n101/101 [==============================] - 362s 4s/step - loss: 108.7652 - val_loss: 129.8585\nEpoch 18/20\n101/101 [==============================] - 360s 4s/step - loss: 105.9164 - val_loss: 128.4326\nEpoch 19/20\n101/101 [==============================] - 363s 4s/step - loss: 103.5448 - val_loss: 129.0905\nEpoch 20/20\n101/101 [==============================] - 363s 4s/step - loss: 100.5853 - val_loss: 128.4792\n" ] ], [ [ "<a id='model4'></a>\n### (IMPLEMENTATION) Model 4: Bidirectional RNN + TimeDistributed Dense\n\nRead about the [Bidirectional](https://keras.io/layers/wrappers/) wrapper in the Keras documentation. For your next architecture, you will specify an architecture that uses a single bidirectional RNN layer, before a (`TimeDistributed`) dense layer. The added value of a bidirectional RNN is described well in [this paper](http://www.cs.toronto.edu/~hinton/absps/DRNN_speech.pdf).\n> One shortcoming of conventional RNNs is that they are only able to make use of previous context. In speech recognition, where whole utterances are transcribed at once, there is no reason not to exploit future context as well. Bidirectional RNNs (BRNNs) do this by processing the data in both directions with two separate hidden layers which are then fed forwards to the same output layer.\n\n<img src=\"images/bidirectional_rnn_model.png\" width=\"80%\">\n\nBefore running the code cell below, you must complete the `bidirectional_rnn_model` function in `sample_models.py`. Feel free to use `SimpleRNN`, `LSTM`, or `GRU` units. When specifying the `Bidirectional` wrapper, use `merge_mode='concat'`.", "_____no_output_____" ] ], [ [ "model_4 = bidirectional_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n units=200)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nbidirectional_1 (Bidirection (None, None, 400) 434400 \n_________________________________________________________________\ntime_distributed_1 (TimeDist (None, None, 29) 11629 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 446,029\nTrainable params: 446,029\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_4.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_4.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_4, \n pickle_path='model_4.pickle', \n save_model_path='model_4.h5', \n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 374s 4s/step - loss: 337.8698 - val_loss: 304.9481\nEpoch 2/20\n101/101 [==============================] - 385s 4s/step - loss: 259.3429 - val_loss: 220.6742\nEpoch 3/20\n101/101 [==============================] - 383s 4s/step - loss: 222.6662 - val_loss: 202.7981\nEpoch 4/20\n101/101 [==============================] - 377s 4s/step - loss: 209.2241 - val_loss: 192.1842\nEpoch 5/20\n101/101 [==============================] - 379s 4s/step - loss: 198.6499 - val_loss: 192.1847\nEpoch 6/20\n101/101 [==============================] - 373s 4s/step - loss: 190.0401 - val_loss: 184.9302\nEpoch 7/20\n101/101 [==============================] - 369s 4s/step - loss: 181.5625 - val_loss: 176.2727\nEpoch 8/20\n101/101 [==============================] - 368s 4s/step - loss: 173.9274 - val_loss: 172.6158\nEpoch 9/20\n101/101 [==============================] - 368s 4s/step - loss: 167.2631 - val_loss: 167.0714\nEpoch 10/20\n101/101 [==============================] - 363s 4s/step - loss: 160.7881 - val_loss: 162.8354\nEpoch 11/20\n101/101 [==============================] - 368s 4s/step - loss: 154.9127 - val_loss: 160.9346\nEpoch 12/20\n101/101 [==============================] - 364s 4s/step - loss: 149.1890 - val_loss: 157.1117\nEpoch 13/20\n101/101 [==============================] - 368s 4s/step - loss: 143.9096 - val_loss: 154.5242\nEpoch 14/20\n101/101 [==============================] - 366s 4s/step - loss: 139.1482 - val_loss: 151.1132\nEpoch 15/20\n101/101 [==============================] - 367s 4s/step - loss: 134.9178 - val_loss: 149.4291\nEpoch 16/20\n101/101 [==============================] - 365s 4s/step - loss: 130.4482 - val_loss: 148.3456\nEpoch 17/20\n101/101 [==============================] - 367s 4s/step - loss: 126.5642 - val_loss: 149.6986\nEpoch 18/20\n101/101 [==============================] - 366s 4s/step - loss: 122.8972 - val_loss: 146.5263\nEpoch 19/20\n101/101 [==============================] - 365s 4s/step - loss: 119.1460 - val_loss: 147.0540\nEpoch 20/20\n101/101 [==============================] - 366s 4s/step - loss: 115.9069 - val_loss: 146.7494\n" ] ], [ [ "<a id='model5'></a>\n### (OPTIONAL IMPLEMENTATION) Models 5+\n\nIf you would like to try out more architectures than the ones above, please use the code cell below. Please continue to follow the same convention for saving the models; for the $i$-th sample model, please save the loss at **`model_i.pickle`** and saving the trained model at **`model_i.h5`**.", "_____no_output_____" ] ], [ [ "## (Optional) TODO: Try out some more models!\n### Feel free to use as many code cells as needed.\nmodel_5 = deep_bidirectional_rnn(input_dim=161, # change to 13 if you would like to use MFCC features\n units=200,\n recur_layers=2)\n\ntrain_model(input_to_softmax=model_5, \n pickle_path='model_5.pickle', \n save_model_path='model_5.h5', \n spectrogram=True) # change to False if you would like to use MFCC features", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nbidirectional_2 (Bidirection (None, None, 400) 434400 \n_________________________________________________________________\nbn_bidir_rnn0 (BatchNormaliz (None, None, 400) 1600 \n_________________________________________________________________\nbidirectional_3 (Bidirection (None, None, 400) 721200 \n_________________________________________________________________\nbn_bidir_rnn1 (BatchNormaliz (None, None, 400) 1600 \n_________________________________________________________________\ntime_distributed_2 (TimeDist (None, None, 29) 11629 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 1,170,429\nTrainable params: 1,168,829\nNon-trainable params: 1,600\n_________________________________________________________________\nNone\nEpoch 1/20\n101/101 [==============================] - 682s 7s/step - loss: 304.0819 - val_loss: 240.0879\nEpoch 2/20\n101/101 [==============================] - 692s 7s/step - loss: 242.9390 - val_loss: 227.8609\nEpoch 3/20\n101/101 [==============================] - 694s 7s/step - loss: 232.2447 - val_loss: 220.1399\nEpoch 4/20\n101/101 [==============================] - 697s 7s/step - loss: 223.3953 - val_loss: 219.7281\nEpoch 5/20\n101/101 [==============================] - 697s 7s/step - loss: 204.3883 - val_loss: 187.3938\nEpoch 6/20\n101/101 [==============================] - 693s 7s/step - loss: 181.1707 - val_loss: 179.5328\nEpoch 7/20\n101/101 [==============================] - 694s 7s/step - loss: 166.3472 - val_loss: 164.9973\nEpoch 8/20\n101/101 [==============================] - 692s 7s/step - loss: 154.7606 - val_loss: 157.0600\nEpoch 9/20\n101/101 [==============================] - 700s 7s/step - loss: 145.7413 - val_loss: 154.8901\nEpoch 10/20\n101/101 [==============================] - 696s 7s/step - loss: 137.7642 - val_loss: 144.6453\nEpoch 11/20\n101/101 [==============================] - 693s 7s/step - loss: 131.0320 - val_loss: 144.4152\nEpoch 12/20\n101/101 [==============================] - 699s 7s/step - loss: 125.4546 - val_loss: 142.1175\nEpoch 13/20\n101/101 [==============================] - 698s 7s/step - loss: 119.6070 - val_loss: 142.1743\nEpoch 14/20\n101/101 [==============================] - 695s 7s/step - loss: 114.7312 - val_loss: 136.2981\nEpoch 15/20\n101/101 [==============================] - 697s 7s/step - loss: 109.9499 - val_loss: 138.5201\nEpoch 16/20\n101/101 [==============================] - 697s 7s/step - loss: 105.4097 - val_loss: 141.7499\nEpoch 17/20\n101/101 [==============================] - 694s 7s/step - loss: 100.9714 - val_loss: 136.1262\nEpoch 18/20\n101/101 [==============================] - 694s 7s/step - loss: 96.3304 - val_loss: 137.7605\nEpoch 19/20\n101/101 [==============================] - 696s 7s/step - loss: 92.0700 - val_loss: 139.8215\nEpoch 20/20\n101/101 [==============================] - 697s 7s/step - loss: 87.9068 - val_loss: 141.5756\n" ] ], [ [ "<a id='compare'></a>\n### Compare the Models\n\nExecute the code cell below to evaluate the performance of the drafted deep learning models. The training and validation loss are plotted for each model.", "_____no_output_____" ] ], [ [ "from glob import glob\nimport numpy as np\nimport _pickle as pickle\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set_style(style='white')\n\n# obtain the paths for the saved model history\nall_pickles = sorted(glob(\"results/*.pickle\"))\n# extract the name of each model\nmodel_names = [item[8:-7] for item in all_pickles]\n# extract the loss history for each model\nvalid_loss = [pickle.load( open( i, \"rb\" ) )['val_loss'] for i in all_pickles]\ntrain_loss = [pickle.load( open( i, \"rb\" ) )['loss'] for i in all_pickles]\n# save the number of epochs used to train each model\nnum_epochs = [len(valid_loss[i]) for i in range(len(valid_loss))]\n\nfig = plt.figure(figsize=(16,5))\n\n# plot the training loss vs. epoch for each model\nax1 = fig.add_subplot(121)\nfor i in range(len(all_pickles)):\n ax1.plot(np.linspace(1, num_epochs[i], num_epochs[i]), \n train_loss[i], label=model_names[i])\n# clean up the plot\nax1.legend() \nax1.set_xlim([1, max(num_epochs)])\nplt.xlabel('Epoch')\nplt.ylabel('Training Loss')\n\n# plot the validation loss vs. epoch for each model\nax2 = fig.add_subplot(122)\nfor i in range(len(all_pickles)):\n ax2.plot(np.linspace(1, num_epochs[i], num_epochs[i]), \n valid_loss[i], label=model_names[i])\n# clean up the plot\nax2.legend() \nax2.set_xlim([1, max(num_epochs)])\nplt.xlabel('Epoch')\nplt.ylabel('Validation Loss')\nplt.show()", "_____no_output_____" ] ], [ [ "__Question 1:__ Use the plot above to analyze the performance of each of the attempted architectures. Which performs best? Provide an explanation regarding why you think some models perform better than others. \n\n__Answer:__\n- Model_0 is a simple RNN network with one layer and 16,617 trainable parameters. Its model performance is the worst compared to the other trained networks. Because of its simple structure, this performance is pretty expected.\n<br>\n- Model_1 becomes more complex by having a simple RNN followed by Batch Normalization and a Dense layer in Time Distributed architecture. The total number of trainable parameters jumps to 223,429 here and we observe a significant improvement in model performance compared to model_0. \n- Model_2 adds a convolution layer before RNN layer in model_1 and increases the complexity of the model. This additional layer leads to 441,229 trainable parameters and improves the performance of the model significantly compared to other previous models. Model_2 is also the best model compared to all the six trained models. Having one convolution layer also decreases the training time significantly, while it converges pretty fast. \n- Model_3 is another version of model_1 with two recurrent layers and total 464,429 trainable parameters. Due to its more complex structure compared to model_0 and model_1, it has a better performance. However, it could not outperform model_2 in terms of performance, speed, and convergence. In other words, having convolution layers in the network could significantly improve the final model.\n- Model_4 is another version of model_1 while it calls bidirectional architecture before a Dense layer in Time Distributed architecture with 446,029 trainable parameters. It outperforms model_0 but not the rest. Surprisingly, model_1 performs better than model_4 with less trainable parameters. Because of the complex structure of this model, the speed and converges rate are pretty low. \n- Model_5 is another version of model_3 with two recurrent bidirectional layers with 1,168,829 trainable parameters. Still, we observe similar patterns of model_3 on it, while it requires two times of training time in model_3. ", "_____no_output_____" ], [ "<a id='final'></a>\n### (IMPLEMENTATION) Final Model\n\nNow that you've tried out many sample models, use what you've learned to draft your own architecture! While your final acoustic model should not be identical to any of the architectures explored above, you are welcome to merely combine the explored layers above into a deeper architecture. It is **NOT** necessary to include new layer types that were not explored in the notebook.\n\nHowever, if you would like some ideas for even more layer types, check out these ideas for some additional, optional extensions to your model:\n\n- If you notice your model is overfitting to the training dataset, consider adding **dropout**! To add dropout to [recurrent layers](https://faroit.github.io/keras-docs/1.0.2/layers/recurrent/), pay special attention to the `dropout_W` and `dropout_U` arguments. This [paper](http://arxiv.org/abs/1512.05287) may also provide some interesting theoretical background.\n- If you choose to include a convolutional layer in your model, you may get better results by working with **dilated convolutions**. If you choose to use dilated convolutions, make sure that you are able to accurately calculate the length of the acoustic model's output in the `model.output_length` lambda function. You can read more about dilated convolutions in Google's [WaveNet paper](https://arxiv.org/abs/1609.03499). For an example of a speech-to-text system that makes use of dilated convolutions, check out this GitHub [repository](https://github.com/buriburisuri/speech-to-text-wavenet). You can work with dilated convolutions [in Keras](https://keras.io/layers/convolutional/) by paying special attention to the `padding` argument when you specify a convolutional layer.\n- If your model makes use of convolutional layers, why not also experiment with adding **max pooling**? Check out [this paper](https://arxiv.org/pdf/1701.02720.pdf) for example architecture that makes use of max pooling in an acoustic model.\n- So far, you have experimented with a single bidirectional RNN layer. Consider stacking the bidirectional layers, to produce a [deep bidirectional RNN](https://www.cs.toronto.edu/~graves/asru_2013.pdf)!\n\nAll models that you specify in this repository should have `output_length` defined as an attribute. This attribute is a lambda function that maps the (temporal) length of the input acoustic features to the (temporal) length of the output softmax layer. This function is used in the computation of CTC loss; to see this, look at the `add_ctc_loss` function in `train_utils.py`. To see where the `output_length` attribute is defined for the models in the code, take a look at the `sample_models.py` file. You will notice this line of code within most models:\n```\nmodel.output_length = lambda x: x\n```\nThe acoustic model that incorporates a convolutional layer (`cnn_rnn_model`) has a line that is a bit different:\n```\nmodel.output_length = lambda x: cnn_output_length(\n x, kernel_size, conv_border_mode, conv_stride)\n```\n\nIn the case of models that use purely recurrent layers, the lambda function is the identity function, as the recurrent layers do not modify the (temporal) length of their input tensors. However, convolutional layers are more complicated and require a specialized function (`cnn_output_length` in `sample_models.py`) to determine the temporal length of their output.\n\nYou will have to add the `output_length` attribute to your final model before running the code cell below. Feel free to use the `cnn_output_length` function, if it suits your model. ", "_____no_output_____" ] ], [ [ "# specify the model\nmodel_end = final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, \n recur_layers=2,conv_border_mode='valid', units=200)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nconv_1d (Conv1D) (None, None, 200) 354400 \n_________________________________________________________________\nbn_conv_1d (BatchNormalizati (None, None, 200) 800 \n_________________________________________________________________\nrnn_0 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_0 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\nrnn_1 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_1 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_1 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 843,829\nTrainable params: 842,629\nNon-trainable params: 1,200\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_end.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_end.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_end, \n pickle_path='model_end.pickle', \n save_model_path='model_end.h5', \n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n101/101 [==============================] - 199s 2s/step - loss: 254.8929 - val_loss: 237.9475\nEpoch 2/20\n101/101 [==============================] - 200s 2s/step - loss: 205.2799 - val_loss: 179.8017\nEpoch 3/20\n101/101 [==============================] - 198s 2s/step - loss: 181.6581 - val_loss: 160.1042\nEpoch 4/20\n101/101 [==============================] - 200s 2s/step - loss: 167.5584 - val_loss: 152.5782\nEpoch 5/20\n101/101 [==============================] - 200s 2s/step - loss: 158.1633 - val_loss: 147.0048\nEpoch 6/20\n101/101 [==============================] - 199s 2s/step - loss: 150.8291 - val_loss: 143.1773\nEpoch 7/20\n101/101 [==============================] - 197s 2s/step - loss: 145.2660 - val_loss: 137.9886\nEpoch 8/20\n101/101 [==============================] - 195s 2s/step - loss: 140.6717 - val_loss: 132.6372\nEpoch 9/20\n101/101 [==============================] - 195s 2s/step - loss: 136.6061 - val_loss: 129.4273\nEpoch 10/20\n101/101 [==============================] - 196s 2s/step - loss: 132.9062 - val_loss: 128.3508\nEpoch 11/20\n101/101 [==============================] - 197s 2s/step - loss: 129.5179 - val_loss: 126.6662\nEpoch 12/20\n101/101 [==============================] - 194s 2s/step - loss: 126.2961 - val_loss: 124.6045\nEpoch 13/20\n101/101 [==============================] - 195s 2s/step - loss: 123.3544 - val_loss: 124.3521\nEpoch 14/20\n101/101 [==============================] - 195s 2s/step - loss: 120.5297 - val_loss: 120.9315\nEpoch 15/20\n101/101 [==============================] - 195s 2s/step - loss: 118.0435 - val_loss: 118.5025\nEpoch 16/20\n101/101 [==============================] - 196s 2s/step - loss: 115.8599 - val_loss: 118.2910\nEpoch 17/20\n101/101 [==============================] - 196s 2s/step - loss: 113.5319 - val_loss: 117.4414\nEpoch 18/20\n101/101 [==============================] - 195s 2s/step - loss: 111.3128 - val_loss: 117.9812\nEpoch 19/20\n101/101 [==============================] - 197s 2s/step - loss: 109.4320 - val_loss: 116.0183\nEpoch 20/20\n101/101 [==============================] - 195s 2s/step - loss: 107.0705 - val_loss: 114.8790\n" ] ], [ [ "__Question 2:__ Describe your final model architecture and your reasoning at each step. \n\n__Answer:__ \n- The final model includes both a convolutional layer and also multi recurrent layers with 842,629 trainable parameters. \n- The convolutional layer tries to encode more complex features from spectrogram to the recurrent model. The 'relu' activation function is called at this layer, and the it is followed by Batch Normalization. \n- Next, two recurrent layers with GRU architecture and Batch Normalization are added to the network. Also, the dropout ratio is set 0.3 to make sure the overfitting is not going to happen. The gap between loss and validation loss validates it.\n- Finally, a Time Distributed Dense layer followed by softmax activation is added to the network to perform logits calculation. \n- The final model outperforms all the previous trained models. The train and validation predictions seem to be pretty close to what we expect. Still, the model could improve by adding more convolutional layers, recurrent layers, more epochs, and a larger data set.", "_____no_output_____" ], [ "<a id='step3'></a>\n## STEP 3: Obtain Predictions\n\nWe have written a function for you to decode the predictions of your acoustic model. To use the function, please execute the code cell below.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_generator import AudioGenerator\nfrom keras import backend as K\nfrom utils import int_sequence_to_text\nfrom IPython.display import Audio\n\ndef get_predictions(index, partition, input_to_softmax, model_path):\n \"\"\" Print a model's decoded predictions\n Params:\n index (int): The example you would like to visualize\n partition (str): One of 'train' or 'validation'\n input_to_softmax (Model): The acoustic model\n model_path (str): Path to saved acoustic model's weights\n \"\"\"\n # load the train and test data\n data_gen = AudioGenerator()\n data_gen.load_train_data()\n data_gen.load_validation_data()\n \n # obtain the true transcription and the audio features \n if partition == 'validation':\n transcr = data_gen.valid_texts[index]\n audio_path = data_gen.valid_audio_paths[index]\n data_point = data_gen.normalize(data_gen.featurize(audio_path))\n elif partition == 'train':\n transcr = data_gen.train_texts[index]\n audio_path = data_gen.train_audio_paths[index]\n data_point = data_gen.normalize(data_gen.featurize(audio_path))\n else:\n raise Exception('Invalid partition! Must be \"train\" or \"validation\"')\n \n # obtain and decode the acoustic model's predictions\n input_to_softmax.load_weights(model_path)\n prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0))\n output_length = [input_to_softmax.output_length(data_point.shape[0])] \n pred_ints = (K.eval(K.ctc_decode(\n prediction, output_length)[0][0])+1).flatten().tolist()\n \n # play the audio file, and display the true and predicted transcriptions\n print('-'*80)\n Audio(audio_path)\n print('True transcription:\\n' + '\\n' + transcr)\n print('-'*80)\n print('Predicted transcription:\\n' + '\\n' + ''.join(int_sequence_to_text(pred_ints)))\n print('-'*80)", "_____no_output_____" ] ], [ [ "Use the code cell below to obtain the transcription predicted by your final model for the first example in the training dataset.", "_____no_output_____" ] ], [ [ "get_predictions(index=0, \n partition='train',\n input_to_softmax = final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, \n recur_layers=2,conv_border_mode='valid', units=200),\n model_path='results/model_end.h5')", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nconv_1d (Conv1D) (None, None, 200) 354400 \n_________________________________________________________________\nbn_conv_1d (BatchNormalizati (None, None, 200) 800 \n_________________________________________________________________\nrnn_0 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_0 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\nrnn_1 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_1 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_4 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 843,829\nTrainable params: 842,629\nNon-trainable params: 1,200\n_________________________________________________________________\nNone\n--------------------------------------------------------------------------------\nTrue transcription:\n\nher father is a most remarkable person to say the least\n--------------------------------------------------------------------------------\nPredicted transcription:\n\ne fither es om mosfr morkable pursinto sa thet lycet\n--------------------------------------------------------------------------------\n" ] ], [ [ "Use the next code cell to visualize the model's prediction for the first example in the validation dataset.", "_____no_output_____" ] ], [ [ "get_predictions(index=0, \n partition='validation',\n input_to_softmax=final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, \n recur_layers=2,conv_border_mode='valid', units=200), \n model_path='results/model_end.h5')", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nconv_1d (Conv1D) (None, None, 200) 354400 \n_________________________________________________________________\nbn_conv_1d (BatchNormalizati (None, None, 200) 800 \n_________________________________________________________________\nrnn_0 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_0 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\nrnn_1 (GRU) (None, None, 200) 240600 \n_________________________________________________________________\nbn_rnn_1 (BatchNormalization (None, None, 200) 800 \n_________________________________________________________________\ntime_distributed_6 (TimeDist (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 843,829\nTrainable params: 842,629\nNon-trainable params: 1,200\n_________________________________________________________________\nNone\n--------------------------------------------------------------------------------\nTrue transcription:\n\nthe bogus legislature numbered thirty six members\n--------------------------------------------------------------------------------\nPredicted transcription:\n\nthe bobis ligesligtur noverd therty sics memvers\n--------------------------------------------------------------------------------\n" ] ], [ [ "One standard way to improve the results of the decoder is to incorporate a language model. We won't pursue this in the notebook, but you are welcome to do so as an _optional extension_. \n\nIf you are interested in creating models that provide improved transcriptions, you are encouraged to download [more data](http://www.openslr.org/12/) and train bigger, deeper models. But beware - the model will likely take a long while to train. For instance, training this [state-of-the-art](https://arxiv.org/pdf/1512.02595v1.pdf) model would take 3-6 weeks on a single GPU!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a6a63cc43c4210b93494b56422fc9fd23452307
11,923
ipynb
Jupyter Notebook
labs-neural-networks/intro-tensorflow/Tutorials.ipynb
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
3
2018-12-11T03:03:15.000Z
2020-02-11T19:38:07.000Z
labs-neural-networks/intro-tensorflow/Tutorials.ipynb
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
6
2017-05-31T20:58:32.000Z
2021-02-16T23:13:15.000Z
labs-neural-networks/intro-tensorflow/Tutorials.ipynb
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
null
null
null
92.426357
8,718
0.844754
[ [ [ "# Tutorials\n\n\n### MNIST\n\nResources:\n- tensorflow docs for [MNIST tutorial](https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html)\n- 3 notebooks that come with this docker image", "_____no_output_____" ] ], [ [ "\nfrom IPython.display import Image\nimport base64\nwith open('mnist-img.txt', 'r') as myfile:\n data=myfile.read().replace('\\n', '')\n #print type(data)\n #print data\nImage(data=base64.decodestring(data))\n", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Temporary code from tutorial\n```python\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nx = tf.placeholder(tf.float32, [None, 784])\n#print type(x)\n\n#mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True) #data sets seem to block jupyter kernel\n #3_mnist_from_scratch.ipynb - should check other source\n#print type(mnist)\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\ny = tf.nn.softmax(tf.matmul(x, W) + b)\ny_ = tf.placeholder(tf.float32, [None, 10])\n\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n \ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a6a6afd8fd523186b54dd7645913db518584c00
1,161
ipynb
Jupyter Notebook
Capstone_Project_Notebook.ipynb
knitkuldeep/Data-Science-Capstone-week2
33d5224dea52bfc3b3407790d29c09621499a5eb
[ "MIT" ]
null
null
null
Capstone_Project_Notebook.ipynb
knitkuldeep/Data-Science-Capstone-week2
33d5224dea52bfc3b3407790d29c09621499a5eb
[ "MIT" ]
null
null
null
Capstone_Project_Notebook.ipynb
knitkuldeep/Data-Science-Capstone-week2
33d5224dea52bfc3b3407790d29c09621499a5eb
[ "MIT" ]
null
null
null
17.328358
67
0.510767
[ [ [ "## This notebook will be mainly used for the capstone project", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "print('Hello Capstone Project Course!')", "Hello Capstone Project Course!\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
4a6a6d5f84a302473fef3111bf204343ae9762bc
406,935
ipynb
Jupyter Notebook
notebooks/system_analysis/throughput_analysis.ipynb
zfphil/htdeblur
ac557284f9913292721a6b9f943ff9b921043978
[ "BSD-3-Clause" ]
2
2020-01-16T18:30:55.000Z
2020-02-06T08:33:51.000Z
notebooks/system_analysis/throughput_analysis.ipynb
zfphil/htdeblur
ac557284f9913292721a6b9f943ff9b921043978
[ "BSD-3-Clause" ]
null
null
null
notebooks/system_analysis/throughput_analysis.ipynb
zfphil/htdeblur
ac557284f9913292721a6b9f943ff9b921043978
[ "BSD-3-Clause" ]
null
null
null
287.993631
159,024
0.917896
[ [ [ "%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom libwallerlab.opticsalgorithms.motiondeblur import blurkernel", "/Users/zfphil/anaconda3/lib/python3.6/site-packages/skimage/viewer/utils/core.py:10: UserWarning: Recommended matplotlib backend is `Agg` for full skimage.viewer functionality.\n warn(\"Recommended matplotlib backend is `Agg` for full \"\n" ] ], [ [ "# Overview\nThis notebook explores a SNR vs. acquisition time analysis for strobed illumination, stop and stare, and coded illumination acquisition strategies.\n\nFirst, we determine a relationship between t_frame (frame rate) and t_exposure (exposure time). Then, we relate t_exposure to SNR for each method. These relationships should be smooth but non-linear.", "_____no_output_____" ] ], [ [ "# Define constants\nps = 6.5e-3 #mm\nmag = 20\nps_eff = ps / mag #um\nn_px = np.asarray([2100, 2500])\nfov = n_px * ps_eff\nmotion_axis = 0\nmotion_velocity_mm_s = 20\nmotion_acceleration_mm_s_s = 1e4\n\nt_settle = 0.1 #s\nt_ro = 0.01 #s\n\nfigure_directory = '/Users/zfphil/Desktop/figures/'\n!mkdir -p /Users/zfphil/Desktop/figures/", "_____no_output_____" ], [ "np.random.choice(10)", "_____no_output_____" ], [ "def genBlurVector_rand(kernel_length, beta=0.5, n_tests=10, metric='dnf'):\n '''\n This is a helper function for solving for a blur vector in terms of it's condition #\n '''\n kernel_list = []\n n_elements_max = math.floor(beta * kernel_length)\n for test in range(n_tests):\n indicies = np.random.permutation(kernel_length)\n kernel = np.zeros(kernel_length)\n kernel[indicies[:n_elements_max]] = 1.0\n \n# indicies = np.arange(kernel_length)\n# for index in range(n_elements_max):\n# rand_index = np.random.randint(0, high=np.size(indicies)-1, size=1)\n# kernel[indicies[rand_index]] = 1.\n# indicies = np.delete(indicies, rand_index)\n\n rand_index = np.random.permutation(kernel_length)[n_elements_max]\n kernel[rand_index] = beta * kernel_length - np.sum(kernel)\n assert beta * kernel_length - np.sum(kernel) <= 1\n kernel_list.append(kernel)\n\n if metric == 'cond':\n # Determine kernel with best conditioon #\n metric_best = 1e10\n kernel_best = []\n for kernel in kernel_list:\n spectra = np.abs(np.fft.fft(kernel))\n kappa = np.max(spectra) / np.min(spectra)\n if kappa < metric_best:\n kernel_best = kernel\n metric_best = kappa\n else:\n # Determine kernel with best conditioon #\n metric_best = 1e10\n kernel_best = []\n for kernel in kernel_list:\n dnf = (np.sum(1 / np.abs(scipy.fftpack.fft(kernel)) ** 2))\n if dnf < metric_best:\n kernel_best = kernel\n metric_best = dnf\n\n return (metric_best, kernel_best)\n\n# import math\n# def condNumToDnf(cond, blur_length, image_size, beta=0.1):\n# dnf = ((blur_length * beta) ** 2 / cond ** 2) * math.sqrt(np.prod(image_size))\n# return dnf\n \n# # condNumToDnf(40, 50, (1000,1000))\nimport scipy\ndef calcDnfFromKernel(x):\n from libwallerlab.utilities.opticstools import Ft, iFt\n return (np.sum(1 / np.abs(scipy.fftpack.fft(x)) ** 2))\n\ndef getOptimalDnf(kernel_size, beta=0.5, n_tests=100, metric = 'dnf'):\n dnf, x = genBlurVector_rand(100, beta=beta, n_tests=n_tests, metric=metric)\n return(calcDnfFromKernel(x))", "_____no_output_____" ], [ "getOptimalDnf(100, n_tests=200, metric='dnf')", "/Users/zfphil/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:38: RuntimeWarning: divide by zero encountered in true_divide\n" ], [ "def frameRateToExposure(t_frame, acquisition_strategy, motion_velocity_mm_s=10, \n motion_acceleration_mm_s_s=1e4, t_readout=0.01, t_settle=0.1, \n fov=[1,1], motion_axis=0, ps_eff_mm=6.5e-3/20, beta_coded=0.5,\n min_strobe_time_s=10e-6):\n \n\n if 'strobe' in acquisition_strategy:\n t_exp_camera = t_frame - t_readout\n v = fov[motion_axis] / t_frame\n t_illum_strobe = ps_eff / v\n if t_illum_strobe < min_strobe_time_s:\n t_exp = 0\n else:\n t_exp = t_illum_strobe\n \n # No deconvolution here\n dnf = 1\n \n elif 'stop_and_stare' in acquisition_strategy:\n t_start_stop = motion_velocity_mm_s / motion_acceleration_mm_s_s\n d_start_stop = 0.5 * motion_acceleration_mm_s_s * t_start_stop ** 2 \n t_move = (fov[motion_axis] - d_start_stop) / motion_velocity_mm_s\n t_exp_camera = t_frame - t_move - t_start_stop + t_readout\n t_exp = t_exp_camera # Illumination is on the whole time\n \n # No deconvolution here\n dnf = 1\n \n elif 'code' in acquisition_strategy:\n t_exp_camera = t_frame - t_readout\n \n # Determine kernel length\n kernel_length = int(np.ceil(t_exp_camera / t_frame * fov[motion_axis] / ps_eff))\n kernel_length = max(kernel_length, 1)\n \n if kernel_length == 1:\n dnf = 1\n else:\n# dnf = blurkernel.dnfUpperBound(kernel_length, beta_coded)\n dnf = getOptimalDnf(kernel_length, beta=beta_coded, n_tests=10)\n \n t_exp_camera = t_frame - t_readout\n v = fov[motion_axis] / t_frame\n t_illum_strobe = ps_eff / v\n if t_illum_strobe < min_strobe_time_s:\n t_exp = 0\n else:\n t_exp = t_exp_camera * beta_coded\n \n# # assert t_exp > 0\n if t_exp <= 0 or t_exp_camera <= 0:\n t_exp = 0\n\n \n return(t_exp, dnf)\n\nframe_time = 0.1\nt_strobe, dnf_strobd = frameRateToExposure(frame_time, 'strobe', fov=fov)\nsnr_strobe = blurkernel.dnf2snr(dnf_strobd, t_strobe*1000)\nprint(\"Strobed illumination will have exposure time %.5f seconds and SNR %.5f\" % (t_strobe, snr_strobe))\n\nt_sns, dnf_sns = frameRateToExposure(frame_time, 'stop_and_stare', fov=fov)\nsnr_sns = blurkernel.dnf2snr(dnf_sns, t_sns*1000)\nprint(\"Stop-and-stare illumination will have exposure time %.5f seconds and SNR %.5f\" % (t_sns, snr_sns))\n\nt_coded, dnf_coded = frameRateToExposure(frame_time, 'code', fov=fov)\nsnr_coded = blurkernel.dnf2snr(dnf_coded, t_coded*1000)\nprint(\"Coded illumination will have exposure time %.5f seconds and SNR %.5f\" % (t_coded, snr_coded))", "Strobed illumination will have exposure time 0.00005 seconds and SNR 11.75121\nStop-and-stare illumination will have exposure time 0.04125 seconds and SNR 351.61446\nCoded illumination will have exposure time 0.04500 seconds and SNR 27.58764\n" ] ], [ [ "## Plot SNR vs Frame Rate", "_____no_output_____" ] ], [ [ "frame_rates = np.arange(1,80,0.1)\nsnr_strobe_list = []\nsnr_sns_list = []\nsnr_coded_list_25 = []\nsnr_coded_list_10 = []\nsnr_coded_list_50 = []\nsnr_coded_list_75 = []\nsnr_coded_list_99 = []\n\nfor index, rate in enumerate(frame_rates):\n t_frame = 1 / rate\n \n t_strobe, dnf_strobe = frameRateToExposure(t_frame, 'strobe', fov=fov)\n snr_strobe_list.append(blurkernel.dnf2snr(dnf_strobe, t_strobe*1000))\n\n t_sns, dnf_sns = frameRateToExposure(t_frame, 'stop_and_stare', fov=fov)\n snr_sns_list.append(blurkernel.dnf2snr(dnf_sns, t_sns*1000))\n\n t_coded_10, dnf_coded_10 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.05)\n snr_coded_list_10.append(blurkernel.dnf2snr(dnf_coded_10, t_coded_10*1000))\n \n t_coded_50, dnf_coded_50 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.5)\n snr_coded_list_50.append(blurkernel.dnf2snr(dnf_coded_50, t_coded_50*1000))\n \n# t_coded_75, dnf_coded_75 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.75)\n# snr_coded_list_75.append(blurkernel.dnf2snr(dnf_coded_75, t_coded_75))\n \n t_coded_99, dnf_coded_99 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.95)\n snr_coded_list_99.append(blurkernel.dnf2snr(dnf_coded_99, t_coded_99*1000))\n# snr_coded_list.append(0)\n# print(\"Coded illumination will have exposure time %.3f seconds and SNR %.2f\" % (t_coded, snr_coded))\n# print(\"Finished rate %d of %d\" % (index, len(frame_rates)))\n\n", "/Users/zfphil/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:38: RuntimeWarning: divide by zero encountered in true_divide\n" ], [ "# plt.style.use('seaborn-dark')\njtplot.style()\n# plt.style.use('classic')\nplt.figure(figsize=(12,8))\nplt.semilogy(frame_rates, snr_coded_list_10, 'b-')\nplt.semilogy(frame_rates, snr_coded_list_50, 'g-')\nplt.semilogy(frame_rates, snr_coded_list_99, 'y')\nplt.semilogy(frame_rates, snr_sns_list, 'r-', linewidth=2)\nplt.semilogy(frame_rates, snr_strobe_list, 'w-', linewidth=2)\n\nplt.ylim((0.5, 5000))\nplt.xlim((0,75))\n\nplt.legend(('Coded, 5% Illuminated', 'Coded, 50% Illuminated', 'Coded, 95% Illuminated', 'Stop-and-Stare', 'Strobed'), fontsize=24)\nplt.xlabel('Frame Rate (Hz)', fontsize=28)\nplt.ylabel('SNR', fontsize=28)\nax = plt.gca()\n\n\nfor tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(24) \nfor tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(24)\n \nplt.grid('on', which='both')\nplt.tight_layout()\nplt.savefig(figure_directory + 'strobe_sns_coded.png', transparent=True)", "_____no_output_____" ], [ "# plt.style.use('seaborn-dark')\njtplot.style()\n# plt.style.use('classic')\nplt.figure(figsize=(12,8))\nplt.semilogy(frame_rates, snr_sns_list, 'r-', linewidth=2)\nplt.semilogy(frame_rates, snr_strobe_list, 'w-', linewidth=2)\n\nplt.ylim((0.5, 5000))\nplt.xlim((0,75))\n\nplt.legend(('Stop-and-Stare', 'Strobed'), fontsize=24)\nplt.xlabel('Frame Rate (Hz)', fontsize=28)\nplt.ylabel('SNR', fontsize=28)\nax = plt.gca()\n\nfor tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(24) \nfor tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(24)\n \n\n\nplt.grid('on', which='both')\nplt.tight_layout()\nplt.savefig(figure_directory + 'strobe_sns.png', transparent=True)", "_____no_output_____" ] ], [ [ "# Blur Kernel Optimization", "_____no_output_____" ] ], [ [ "data = np.load('single_illums.npz')\nkernel_vector = data['kernel_vector']\nkernel_random = data['kernel_random']\nblur_kernel_map = np.zeros(object_size)\n\nfor position_index, position in enumerate(point_list):\n blur_kernel_map[position[0], position[1]] = kernel_vector[position_index]", "_____no_output_____" ], [ "num_frames = iterates.shape[1]\n\niterates = np.array(result['history']['x']) #.T\nprint(iterates.shape)\ntotal_its = iterates.shape[1]\ninterval = total_its / num_frames\n#interval=2\n#ax = plt.subplot2grid((6, 1), (1, 5))\n#ax = plt.subplot2grid((6, 1), (1, 0), colspan=5)\n\ninitial_power_spectrum = 0;\nblur_operator = W * 0.5*np.sum(kernel_map, 0).astype(np.complex64).reshape(-1)\nstatic_power_spectrum = np.sum(np.abs(wotf.Ft(blur_operator.reshape(image_size))), axis=0)\n\nsigma_min_static = np.amin(static_power_spectrum)\nsigma_min_static = np.amax(static_power_spectrum)\n\n# Generate spatial frequency coordintes\nps = 6.5\nfov = 2000 * 6.5e-3/20\ndk = 1/fov\nfreqs = np.arange(-len(static_power_spectrum) // 2, len(static_power_spectrum) // 2) * dk\nassert len(freqs) == len(static_power_spectrum)\n\nkernel_random = iterates[:,0]\n\nfor i in range(num_frames):\n illum = iterates[:,int(interval*i)]\n\n blur_operator_illum = W * (kernel_map.T.dot(iterates[:,int(interval*i)])).T.astype(np.complex64).reshape(-1)\n power_spectrum = np.sum(np.abs(wotf.Ft(blur_operator_illum.reshape(image_size))), axis=0)\n sigma_min = np.amin(power_spectrum)\n sigma_max = np.amax(power_spectrum)\n condition = sigma_max/sigma_min\n \n if i==0:\n initial_power_spectrum = power_spectrum\n \n fig = plt.figure(figsize=(10,5))\n ax1 = plt.subplot2grid((8, 1), (0, 0), rowspan=4)\n ax2 = plt.subplot2grid((8, 1), (6, 0), rowspan=2)\n \n ax2.step(illum, 'orange', linewidth=3)\n ax2.set_ylim([-0.1,1.1])\n ax2.set_xlim([0,24])\n ax2.set_title('Illumination Pattern', fontsize=24, color='w')\n ax1.set_title('Power Spectrum', fontsize=24, color='w')\n# ax1.set_xlim([0,127])\n# ax1.set_ylim([10,10^4])\n# ax2.set_xticklabels([])\n ax1.set_ylabel('Energy', color='w')\n ax1.set_xlabel('Spatial Frequencey (cycles/mm)', color='w')\n \n ax2.set_ylabel('Intensity', color='w')\n ax2.set_xlabel('Position', color='w')\n \n ax2.xaxis.set_ticks_position('none')\n ax2.yaxis.set_ticks_position('none')\n #ax2.axison = False\n ax2.set_yticklabels([0,0,1])\n# ax1.semilogy(initial_power_spectrum, '--', color='white')\n# ax1.semilogy(static_power_spectrum, '--', color='white')\n ax1.semilogy(freqs, sigma_min*np.ones(power_spectrum.size), color='r', linewidth=3)\n ax1.semilogy(freqs, sigma_max*np.ones(power_spectrum.size), color='r', linewidth=3)\n ax1.semilogy(freqs, power_spectrum, color='blue', linewidth=3)\n ax1.set_ylim((10,6000))\n# ax1.set_xticklabels([])\n #ax1.set_yticklabels([])\n #plt.suptitle('iteration '+str(int(interval*i))+',\\t$\\kappa=$'+str(np.round(condition,3)))\n \n plt.text(0.6,4.7,'iteration '+str(int(interval*i))+', $\\kappa=$'+str(np.round(condition,3)),fontsize=15, color='w')\n \n # Set Axis Colors\n for ax in [ax1, ax2]:\n ax.tick_params(axis='both', which='major', labelsize=14, color='w')\n ax.tick_params(axis='both', which='minor', labelsize=14, color='w')\n [i.set_color(\"w\") for i in ax.get_xticklabels()]\n [i.set_color(\"w\") for i in ax.get_yticklabels()]\n plt.savefig(\"images/power_spectrum_optimization\" + str(i) + \".png\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6a70c88a5ec987939fe3888372a4310e8db21c
10,472
ipynb
Jupyter Notebook
2_LinearRegression_AdjustedR_S34_L215/sklearn - Multiple Linear Regression and Adjusted R-squared - Exercise.ipynb
yoni2k/data-science-bootcamp
edce41641020552a0c73112ef3c67ccdf70d2e08
[ "MIT" ]
null
null
null
2_LinearRegression_AdjustedR_S34_L215/sklearn - Multiple Linear Regression and Adjusted R-squared - Exercise.ipynb
yoni2k/data-science-bootcamp
edce41641020552a0c73112ef3c67ccdf70d2e08
[ "MIT" ]
null
null
null
2_LinearRegression_AdjustedR_S34_L215/sklearn - Multiple Linear Regression and Adjusted R-squared - Exercise.ipynb
yoni2k/data-science-bootcamp
edce41641020552a0c73112ef3c67ccdf70d2e08
[ "MIT" ]
null
null
null
22.186441
182
0.403361
[ [ [ "# Adjusted R-squared - Exercise\n\nUsing the code from the lecture, create a function which will calculate the adjusted R-squared for you, given the independent variable(s) (x) and the dependent variable (y).\n\nCheck if you function is working properly.\n\nPlease solve the exercise at the bottom of the notebook (in order to check if it is working you must run all previous cells).", "_____no_output_____" ], [ "## Import the relevant libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nfrom sklearn.linear_model import LinearRegression", "_____no_output_____" ] ], [ [ "## Load the data", "_____no_output_____" ] ], [ [ "data = pd.read_csv('1.02. Multiple linear regression.csv')\ndata.head()", "_____no_output_____" ], [ "data.describe()", "_____no_output_____" ] ], [ [ "## Create the multiple linear regression", "_____no_output_____" ], [ "### Declare the dependent and independent variables", "_____no_output_____" ] ], [ [ "x = data[['SAT','Rand 1,2,3']]\ny = data['GPA']", "_____no_output_____" ] ], [ [ "### Regression itself", "_____no_output_____" ] ], [ [ "reg = LinearRegression()\nreg.fit(x,y)", "_____no_output_____" ], [ "reg.coef_", "_____no_output_____" ], [ "reg.intercept_", "_____no_output_____" ] ], [ [ "### Calculating the R-squared", "_____no_output_____" ] ], [ [ "reg.score(x,y)", "_____no_output_____" ] ], [ [ "### Formula for Adjusted R^2\n\n$R^2_{adj.} = 1 - (1-R^2)*\\frac{n-1}{n-p-1}$", "_____no_output_____" ] ], [ [ "x.shape", "_____no_output_____" ], [ "r2 = reg.score(x,y)\nn = x.shape[0]\np = x.shape[1]\n\nadjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)\nadjusted_r2", "_____no_output_____" ] ], [ [ "### Adjusted R^2 function", "_____no_output_____" ] ], [ [ "def adjusted_r(x,y,reg):\n n = x.shape[0]\n p = x.shape[1]\n r2 = reg.score(x,y)\n return 1-(1-r2)*(n-1)/(n-p-1)", "_____no_output_____" ], [ "adjusted_r(x,y,reg)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6a747ac575336550853f42a141fe87442cc3e0
278,187
ipynb
Jupyter Notebook
src/pix2pix/notebook_pix2pix_colab_yhk.ipynb
yogeshhk/MidcurveNN
98e11abdac1c6ab5c76e59dcbdd9ce8c08549a67
[ "MIT" ]
22
2019-06-08T03:25:06.000Z
2022-01-15T19:35:38.000Z
src/pix2pix/notebook_pix2pix_colab_yhk.ipynb
yogeshhk/MidcurveNN
98e11abdac1c6ab5c76e59dcbdd9ce8c08549a67
[ "MIT" ]
5
2019-10-06T08:10:24.000Z
2021-12-08T01:33:44.000Z
src/pix2pix/notebook_pix2pix_colab_yhk.ipynb
yogeshhk/MidcurveNN
98e11abdac1c6ab5c76e59dcbdd9ce8c08549a67
[ "MIT" ]
7
2019-07-06T05:16:32.000Z
2021-12-09T09:35:03.000Z
257.819277
110,584
0.846952
[ [ [ "**Pix-2-Pix Model using TensorFlow and Keras**\n\nA port of pix-2-pix model built using TensorFlow's high level `tf.keras` API.\n\nNote: GPU is required to make this model train quickly. Otherwise it could take hours.\n\nOriginal : https://www.kaggle.com/vikramtiwari/pix-2-pix-model-using-tensorflow-and-keras/notebook", "_____no_output_____" ], [ "## Installations", "_____no_output_____" ] ], [ [ "requirements = \"\"\"\ntensorflow\ndrawSvg\nmatplotlib\nnumpy\nscipy\npillow\n#urllib\n#skimage\nscikit-image\n#gzip\n#pickle\n\"\"\"\n%store requirements > requirements.txt", "Writing 'requirements' (str) to file 'requirements.txt'.\n" ], [ "!pip install -r requirements.txt", "Collecting git+https://www.github.com/keras-team/keras-contrib.git (from -r requirements.txt (line 3))\n Cloning https://www.github.com/keras-team/keras-contrib.git to /tmp/pip-req-build-dt2em6rp\n Running command git clone -q https://www.github.com/keras-team/keras-contrib.git /tmp/pip-req-build-dt2em6rp\nRequirement already satisfied: keras in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 2)) (2.2.4)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 4)) (3.0.3)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 5)) (1.16.4)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 6)) (1.3.0)\nRequirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 7)) (4.3.0)\nRequirement already satisfied: scikit-image in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 10)) (0.15.0)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras->-r requirements.txt (line 2)) (2.8.0)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from keras->-r requirements.txt (line 2)) (1.12.0)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras->-r requirements.txt (line 2)) (1.1.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras->-r requirements.txt (line 2)) (3.13)\nRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras->-r requirements.txt (line 2)) (1.0.8)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 4)) (2.5.3)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 4)) (2.4.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 4)) (1.1.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->-r requirements.txt (line 4)) (0.10.0)\nRequirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow->-r requirements.txt (line 7)) (0.46)\nRequirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r requirements.txt (line 10)) (2.3)\nRequirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r requirements.txt (line 10)) (1.0.3)\nRequirement already satisfied: imageio>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r requirements.txt (line 10)) (2.4.1)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib->-r requirements.txt (line 4)) (41.0.1)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image->-r requirements.txt (line 10)) (4.4.0)\nBuilding wheels for collected packages: keras-contrib\n Building wheel for keras-contrib (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /tmp/pip-ephem-wheel-cache-ddzahsem/wheels/11/27/c8/4ed56de7b55f4f61244e2dc6ef3cdbaff2692527a2ce6502ba\nSuccessfully built keras-contrib\nInstalling collected packages: keras-contrib\nSuccessfully installed keras-contrib-2.0.8\n" ] ], [ [ "## Data Import", "_____no_output_____" ] ], [ [ "# !mkdir datasets\n# URL=\"https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facade.tar.gz\"\n# TAR_FILE=\"./datasets/facade.tar.gz\"\n# TARGET_DIR=\"./datasets/facade/\"\n# !wget -N URL -O TAR_FILE\n# !mkdir TARGET_DIR\n# !tar -zxvf TAR_FILE -C ./datasets/\n# !rm TAR_FILE\n\n#_URL = 'https://drive.google.com/uc?export=download&id=1dnLTTT19YROjpjwZIZpJ1fxAd91cGBJv'\n#path_to_zip = tf.keras.utils.get_file('pix2pix.zip', origin=_URL,extract=True)\n#PATH = os.path.join(os.path.dirname(path_to_zip), 'pix2pix/')", "_____no_output_____" ] ], [ [ "## Imports", "_____no_output_____" ] ], [ [ "import os\nimport datetime\nimport imageio\nimport skimage\nimport scipy # \n# from PIL import Image as Img\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom glob import glob\nfrom IPython.display import Image\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndatafolderpath = \"/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/\"\ndatasetpath = datafolderpath+ \"pix2pix/datasets/pix2pix/\"\n# # datasetpath = \"./\"", "_____no_output_____" ], [ "# Run this cell to mount your Google Drive.\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "\n!ls $datafolderpath", "ls: cannot access '/content/drive/My': No such file or directory\nls: cannot access 'Drive/ToDos/Research/MidcurveNN/code/data/': No such file or directory\n" ], [ "class DataLoader():\n def __init__(self, dataset_name, img_res=(256, 256)):\n self.dataset_name = dataset_name\n self.img_res = img_res\n \n def binarize(self, image):\n h, w = image.shape\n for i in range(h):\n for j in range(w):\n if image[i][j] < 195:\n image[i][j] = 0\n return image\n\n def load_data(self, batch_size=1, is_testing=False):\n data_type = \"train\" if not is_testing else \"test\"\n path = glob(datafolderpath+'%s/datasets/%s/%s/*' % (self.dataset_name, self.dataset_name, data_type))\n #path = glob(PATH + '%s/*' % (data_type))\n batch_images = np.random.choice(path, size=batch_size)\n\n imgs_A = []\n imgs_B = []\n for img_path in batch_images:\n img = self.imread(img_path)\n img = self.binarize(img)\n img = np.expand_dims(img, axis=-1)\n h, w, _ = img.shape\n _w = int(w/2)\n img_A, img_B = img[:, :_w, :], img[:, _w:, :]\n\n # img_A = scipy.misc.imresize(img_A, self.img_res)\n # img_A = np.array(Img.fromarray(img_A).resize(self.img_res))\n #img_A = np.array(skimage.transform.resize(img_A,self.img_res))\n # img_B = scipy.misc.imresize(img_B, self.img_res)\n # img_B = np.array(Img.fromarray(img_B).resize(self.img_res))\n #img_B = np.array(skimage.transform.resize(img_B,self.img_res))\n\n # If training => do random flip\n if not is_testing and np.random.random() < 0.5:\n img_A = np.fliplr(img_A)\n img_B = np.fliplr(img_B)\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1.\n imgs_B = np.array(imgs_B)/127.5 - 1.\n\n return imgs_A, imgs_B\n\n def load_batch(self, batch_size=1, is_testing=False):\n data_type = \"train\" if not is_testing else \"val\"\n path = glob(datafolderpath+'%s/datasets/%s/%s/*' % (self.dataset_name, self.dataset_name, data_type))\n #path = glob(PATH + '%s/*' % (data_type))\n self.n_batches = int(len(path) / batch_size)\n\n for i in range(self.n_batches-1):\n batch = path[i*batch_size:(i+1)*batch_size]\n imgs_A, imgs_B = [], []\n for img in batch:\n img = self.imread(img)\n img = self.binarize(img)\n img = np.expand_dims(img, axis=-1)\n h, w, _ = img.shape\n half_w = int(w/2)\n img_A = img[:, :half_w, :]\n img_B = img[:, half_w:, :]\n\n # img_A = scipy.misc.imresize(img_A, self.img_res)\n # img_A = np.array(Img.fromarray(img_A).resize(self.img_res))\n #img_A = np.array(skimage.transform.resize(img_A,self.img_res))\n # img_B = scipy.misc.imresize(img_B, self.img_res)\n # img_B = np.array(Img.fromarray(img_B).resize(self.img_res))\n #img_B = np.array(skimage.transform.resize(img_B,self.img_res))\n\n if not is_testing and np.random.random() > 0.5:\n img_A = np.fliplr(img_A)\n img_B = np.fliplr(img_B)\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1.\n imgs_B = np.array(imgs_B)/127.5 - 1.\n\n yield imgs_A, imgs_B\n\n\n def imread(self, path):\n return imageio.imread(path).astype(np.float)", "_____no_output_____" ], [ "class Pix2Pix():\n def __init__(self):\n # Input shape\n self.img_rows = 256\n self.img_cols = 256\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n\n # Configure data loader\n self.dataset_name = 'pix2pix'\n self.data_loader = DataLoader(dataset_name=self.dataset_name,\n img_res=(self.img_rows, self.img_cols))\n\n\n # Calculate output shape of D (PatchGAN)\n patch = int(self.img_rows / 2**4)\n self.disc_patch = (patch, patch, 1)\n\n # Number of filters in the first layer of G and D\n self.gf = int(self.img_rows/4) # 64\n self.df = int(self.img_rows/4) # 64\n\n optimizer = tf.keras.optimizers.Adam(0.0002, 0.5)\n\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss='mse',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n #-------------------------\n # Construct Computational\n # Graph of Generator\n #-------------------------\n\n # Build the generator\n self.generator = self.build_generator()\n\n # Input images and their conditioning images\n img_A = tf.keras.layers.Input(shape=self.img_shape)\n img_B = tf.keras.layers.Input(shape=self.img_shape)\n\n # By conditioning on B generate a fake version of A\n #fake_A = self.generator(img_B)\n\n #By conditioning on A generate a fake version of B\n fake_B = self.generator(img_A)\n \n # For the combined model we will only train the generator\n self.discriminator.trainable = False\n\n # Discriminators determines validity of translated images / condition pairs\n #valid = self.discriminator([fake_A, img_B])\n \n valid = self.discriminator([img_A, fake_B])\n\n self.combined = tf.keras.models.Model(inputs=[img_A, img_B], outputs=[valid, fake_B])\n self.combined.compile(loss=['mse', 'mae'],\n loss_weights=[1, 100],\n optimizer=optimizer)\n\n def build_generator(self):\n \"\"\"U-Net Generator\"\"\"\n\n def conv2d(layer_input, filters, f_size=4, bn=True):\n \"\"\"Layers used during downsampling\"\"\"\n d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n return d\n\n def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n \"\"\"Layers used during upsampling\"\"\"\n u = tf.keras.layers.UpSampling2D(size=2)(layer_input)\n u = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = tf.keras.layers.Dropout(dropout_rate)(u)\n u = tf.keras.layers.BatchNormalization(momentum=0.8)(u)\n u = tf.keras.layers.Concatenate()([u, skip_input])\n return u\n\n # Image input\n d0 = tf.keras.layers.Input(shape=self.img_shape)\n\n # Downsampling\n d1 = conv2d(d0, self.gf, bn=False)\n d2 = conv2d(d1, self.gf*2)\n d3 = conv2d(d2, self.gf*4)\n d4 = conv2d(d3, self.gf*8)\n d5 = conv2d(d4, self.gf*8)\n d6 = conv2d(d5, self.gf*8)\n d7 = conv2d(d6, self.gf*8)\n\n # Upsampling\n u1 = deconv2d(d7, d6, self.gf*8)\n u2 = deconv2d(u1, d5, self.gf*8)\n u3 = deconv2d(u2, d4, self.gf*8)\n u4 = deconv2d(u3, d3, self.gf*4)\n u5 = deconv2d(u4, d2, self.gf*2)\n u6 = deconv2d(u5, d1, self.gf)\n\n u7 = tf.keras.layers.UpSampling2D(size=2)(u6)\n output_img = tf.keras.layers.Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7)\n\n return tf.keras.models.Model(d0, output_img)\n\n def build_discriminator(self):\n\n def d_layer(layer_input, filters, f_size=4, bn=True):\n \"\"\"Discriminator layer\"\"\"\n d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n return d\n\n img_A = tf.keras.layers.Input(shape=self.img_shape)\n img_B = tf.keras.layers.Input(shape=self.img_shape)\n\n # Concatenate image and conditioning image by channels to produce input\n combined_imgs = tf.keras.layers.Concatenate(axis=-1)([img_A, img_B])\n\n d1 = d_layer(combined_imgs, self.df, bn=False)\n d2 = d_layer(d1, self.df*2)\n d3 = d_layer(d2, self.df*4)\n d4 = d_layer(d3, self.df*8)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)\n\n return tf.keras.models.Model([img_A, img_B], validity)\n\n def train(self, epochs, batch_size=1, sample_interval=50):\n start_time = datetime.datetime.now()\n\n # Adversarial loss ground truths\n valid = np.ones((batch_size,) + self.disc_patch)\n fake = np.zeros((batch_size,) + self.disc_patch)\n\n for epoch in range(epochs):\n for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Condition on B and generate a translated version\n #fake_A = self.generator.predict(imgs_B)\n\n #Condition on A and generate a translated version\n fake_B = self.generator.predict(imgs_A)\n\n # Train the discriminators (original images = real / generated = Fake)\n d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)\n d_loss_fake = self.discriminator.train_on_batch([imgs_A, fake_B], fake)\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n\n # -----------------\n # Train Generator\n # -----------------\n\n # Train the generators\n g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_B])\n\n elapsed_time = datetime.datetime.now() - start_time\n # Plot the progress\n print (\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s\" % (epoch, epochs,\n batch_i, self.data_loader.n_batches,\n d_loss[0], 100*d_loss[1],\n g_loss[0],\n elapsed_time))\n\n # If at save interval => save generated image samples\n if batch_i % sample_interval == 0:\n self.sample_images(epoch, batch_i)\n\n def sample_images(self, epoch, batch_i):\n os.makedirs(datafolderpath+'images/%s' % self.dataset_name, exist_ok=True)\n r, c = 3, 3\n\n imgs_A, imgs_B = self.data_loader.load_data(batch_size=3, is_testing=True)\n fake_B = self.generator.predict(imgs_A)\n\n gen_imgs = np.concatenate([imgs_A, fake_B, imgs_B])\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n titles = ['Condition', 'Generated', 'Original']\n fig, axs = plt.subplots(r, c, figsize=(15,15))\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt][:,:,0], cmap='gray')\n axs[i, j].set_title(titles[i])\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(datafolderpath+\"images/%s/%d_%d.png\" % (self.dataset_name, epoch, batch_i))\n plt.close()", "_____no_output_____" ], [ "gan = Pix2Pix()\n# gan.train(epochs=200, batch_size=1, sample_interval=200)", "_____no_output_____" ], [ "gan.train(epochs=2, batch_size=1, sample_interval=200)\n# training logs are hidden in published notebook", "[Epoch 0/2] [Batch 0/268] [D loss: 10.981784, acc: 27%] [G loss: 105.881699] time: 0:00:14.708430\n[Epoch 0/2] [Batch 1/268] [D loss: 2.747050, acc: 49%] [G loss: 82.725914] time: 0:00:18.957927\n[Epoch 0/2] [Batch 2/268] [D loss: 2.094071, acc: 56%] [G loss: 64.095383] time: 0:00:20.210713\n[Epoch 0/2] [Batch 3/268] [D loss: 2.037389, acc: 68%] [G loss: 55.840153] time: 0:00:20.993789\n[Epoch 0/2] [Batch 4/268] [D loss: 2.660841, acc: 67%] [G loss: 51.921574] time: 0:00:21.800849\n[Epoch 0/2] [Batch 5/268] [D loss: 4.701869, acc: 67%] [G loss: 42.899521] time: 0:00:22.604292\n[Epoch 0/2] [Batch 6/268] [D loss: 3.547069, acc: 40%] [G loss: 42.863689] time: 0:00:23.379793\n[Epoch 0/2] [Batch 7/268] [D loss: 2.422915, acc: 56%] [G loss: 37.543488] time: 0:00:24.177720\n[Epoch 0/2] [Batch 8/268] [D loss: 3.381839, acc: 47%] [G loss: 36.235023] time: 0:00:25.179363\n[Epoch 0/2] [Batch 9/268] [D loss: 1.907106, acc: 85%] [G loss: 33.160801] time: 0:00:26.093107\n[Epoch 0/2] [Batch 10/268] [D loss: 3.220186, acc: 76%] [G loss: 30.851946] time: 0:00:26.917524\n[Epoch 0/2] [Batch 11/268] [D loss: 3.830930, acc: 54%] [G loss: 29.705523] time: 0:00:27.713045\n[Epoch 0/2] [Batch 12/268] [D loss: 2.309875, acc: 80%] [G loss: 32.172623] time: 0:00:28.522217\n[Epoch 0/2] [Batch 13/268] [D loss: 1.970862, acc: 36%] [G loss: 27.286730] time: 0:00:29.403636\n[Epoch 0/2] [Batch 14/268] [D loss: 1.005874, acc: 74%] [G loss: 26.967375] time: 0:00:30.359194\n[Epoch 0/2] [Batch 15/268] [D loss: 3.776114, acc: 22%] [G loss: 32.427296] time: 0:00:31.170280\n[Epoch 0/2] [Batch 16/268] [D loss: 3.123112, acc: 60%] [G loss: 25.701683] time: 0:00:31.928382\n[Epoch 0/2] [Batch 17/268] [D loss: 3.248322, acc: 71%] [G loss: 24.904493] time: 0:00:32.834097\n[Epoch 0/2] [Batch 18/268] [D loss: 0.922108, acc: 77%] [G loss: 24.477497] time: 0:00:33.670402\n[Epoch 0/2] [Batch 19/268] [D loss: 0.656756, acc: 75%] [G loss: 24.585941] time: 0:00:34.462736\n[Epoch 0/2] [Batch 20/268] [D loss: 1.047903, acc: 50%] [G loss: 22.075245] time: 0:00:35.375660\n[Epoch 0/2] [Batch 21/268] [D loss: 0.684929, acc: 83%] [G loss: 21.048639] time: 0:00:36.146434\n[Epoch 0/2] [Batch 22/268] [D loss: 0.378021, acc: 78%] [G loss: 19.643406] time: 0:00:37.075189\n[Epoch 0/2] [Batch 23/268] [D loss: 0.415312, acc: 76%] [G loss: 21.184881] time: 0:00:37.853917\n[Epoch 0/2] [Batch 24/268] [D loss: 0.622900, acc: 86%] [G loss: 18.389254] time: 0:00:38.763000\n[Epoch 0/2] [Batch 25/268] [D loss: 0.424036, acc: 83%] [G loss: 18.211752] time: 0:00:39.581085\n[Epoch 0/2] [Batch 26/268] [D loss: 0.369210, acc: 84%] [G loss: 17.600496] time: 0:00:40.379722\n[Epoch 0/2] [Batch 27/268] [D loss: 0.652362, acc: 88%] [G loss: 18.773438] time: 0:00:41.292345\n[Epoch 0/2] [Batch 28/268] [D loss: 0.329907, acc: 84%] [G loss: 19.703989] time: 0:00:42.119569\n[Epoch 0/2] [Batch 29/268] [D loss: 0.418307, acc: 87%] [G loss: 16.278702] time: 0:00:42.956500\n[Epoch 0/2] [Batch 30/268] [D loss: 0.434530, acc: 80%] [G loss: 18.341930] time: 0:00:43.905918\n[Epoch 0/2] [Batch 31/268] [D loss: 0.289283, acc: 90%] [G loss: 15.414997] time: 0:00:44.842383\n[Epoch 0/2] [Batch 32/268] [D loss: 0.353832, acc: 90%] [G loss: 14.825281] time: 0:00:45.647134\n[Epoch 0/2] [Batch 33/268] [D loss: 0.308422, acc: 86%] [G loss: 17.133360] time: 0:00:46.483758\n[Epoch 0/2] [Batch 34/268] [D loss: 0.669427, acc: 81%] [G loss: 15.343877] time: 0:00:47.382622\n[Epoch 0/2] [Batch 35/268] [D loss: 0.290619, acc: 87%] [G loss: 16.340078] time: 0:00:48.187690\n[Epoch 0/2] [Batch 36/268] [D loss: 0.477149, acc: 53%] [G loss: 13.056844] time: 0:00:49.010745\n[Epoch 0/2] [Batch 37/268] [D loss: 0.305098, acc: 81%] [G loss: 13.123245] time: 0:00:49.969589\n[Epoch 0/2] [Batch 38/268] [D loss: 0.262322, acc: 82%] [G loss: 12.160419] time: 0:00:50.885840\n[Epoch 0/2] [Batch 39/268] [D loss: 0.225694, acc: 82%] [G loss: 11.543864] time: 0:00:51.816351\n[Epoch 0/2] [Batch 40/268] [D loss: 0.297901, acc: 80%] [G loss: 13.882371] time: 0:00:52.725831\n[Epoch 0/2] [Batch 41/268] [D loss: 0.313126, acc: 58%] [G loss: 9.294624] time: 0:00:53.552004\n[Epoch 0/2] [Batch 42/268] [D loss: 0.393508, acc: 59%] [G loss: 13.539740] time: 0:00:54.332809\n[Epoch 0/2] [Batch 43/268] [D loss: 0.256080, acc: 63%] [G loss: 8.639797] time: 0:00:55.134501\n[Epoch 0/2] [Batch 44/268] [D loss: 0.290555, acc: 79%] [G loss: 12.687198] time: 0:00:56.100966\n[Epoch 0/2] [Batch 45/268] [D loss: 0.401816, acc: 87%] [G loss: 11.876177] time: 0:00:57.029128\n[Epoch 0/2] [Batch 46/268] [D loss: 0.257508, acc: 83%] [G loss: 11.388842] time: 0:00:57.840695\n[Epoch 0/2] [Batch 47/268] [D loss: 0.239530, acc: 87%] [G loss: 9.815493] time: 0:00:58.735149\n[Epoch 0/2] [Batch 48/268] [D loss: 0.191709, acc: 87%] [G loss: 9.661805] time: 0:00:59.584473\n[Epoch 0/2] [Batch 49/268] [D loss: 0.192647, acc: 92%] [G loss: 9.801064] time: 0:01:00.391844\n[Epoch 0/2] [Batch 50/268] [D loss: 0.178170, acc: 89%] [G loss: 9.388764] time: 0:01:01.216381\n[Epoch 0/2] [Batch 51/268] [D loss: 0.228177, acc: 89%] [G loss: 10.681031] time: 0:01:02.032029\n[Epoch 0/2] [Batch 52/268] [D loss: 0.206288, acc: 90%] [G loss: 9.028593] time: 0:01:02.900562\n[Epoch 0/2] [Batch 53/268] [D loss: 0.222983, acc: 86%] [G loss: 11.714141] time: 0:01:03.868161\n[Epoch 0/2] [Batch 54/268] [D loss: 0.209702, acc: 66%] [G loss: 6.247114] time: 0:01:04.721651\n[Epoch 0/2] [Batch 55/268] [D loss: 0.401942, acc: 67%] [G loss: 11.275800] time: 0:01:05.549551\n[Epoch 0/2] [Batch 56/268] [D loss: 0.246669, acc: 87%] [G loss: 8.928958] time: 0:01:06.285415\n[Epoch 0/2] [Batch 57/268] [D loss: 0.250364, acc: 89%] [G loss: 10.192481] time: 0:01:07.061973\n[Epoch 0/2] [Batch 58/268] [D loss: 0.264927, acc: 70%] [G loss: 9.920525] time: 0:01:07.981118\n[Epoch 0/2] [Batch 59/268] [D loss: 0.174328, acc: 87%] [G loss: 11.800402] time: 0:01:08.953365\n[Epoch 0/2] [Batch 60/268] [D loss: 0.167886, acc: 92%] [G loss: 11.935860] time: 0:01:09.771627\n[Epoch 0/2] [Batch 61/268] [D loss: 0.328284, acc: 89%] [G loss: 11.983109] time: 0:01:10.603612\n[Epoch 0/2] [Batch 62/268] [D loss: 0.268975, acc: 91%] [G loss: 8.733275] time: 0:01:11.480064\n[Epoch 0/2] [Batch 63/268] [D loss: 0.186347, acc: 92%] [G loss: 9.905431] time: 0:01:12.304352\n[Epoch 0/2] [Batch 64/268] [D loss: 0.174952, acc: 91%] [G loss: 10.392522] time: 0:01:13.137555\n[Epoch 0/2] [Batch 65/268] [D loss: 0.250486, acc: 87%] [G loss: 10.046844] time: 0:01:13.924114\n[Epoch 0/2] [Batch 66/268] [D loss: 0.172771, acc: 89%] [G loss: 8.607230] time: 0:01:14.769180\n[Epoch 0/2] [Batch 67/268] [D loss: 0.253889, acc: 89%] [G loss: 8.292444] time: 0:01:15.580105\n[Epoch 0/2] [Batch 68/268] [D loss: 0.288048, acc: 87%] [G loss: 10.717911] time: 0:01:16.564886\n[Epoch 0/2] [Batch 69/268] [D loss: 0.367962, acc: 55%] [G loss: 5.480659] time: 0:01:17.413873\n[Epoch 0/2] [Batch 70/268] [D loss: 0.178363, acc: 86%] [G loss: 8.100766] time: 0:01:18.291556\n[Epoch 0/2] [Batch 71/268] [D loss: 0.201543, acc: 83%] [G loss: 5.499842] time: 0:01:19.119505\n[Epoch 0/2] [Batch 72/268] [D loss: 0.300811, acc: 61%] [G loss: 10.338575] time: 0:01:19.941182\n[Epoch 0/2] [Batch 73/268] [D loss: 0.134318, acc: 96%] [G loss: 5.495667] time: 0:01:20.790151\n[Epoch 0/2] [Batch 74/268] [D loss: 0.130354, acc: 91%] [G loss: 9.343209] time: 0:01:21.797608\n[Epoch 0/2] [Batch 75/268] [D loss: 0.102282, acc: 94%] [G loss: 9.083460] time: 0:01:22.891538\n[Epoch 0/2] [Batch 76/268] [D loss: 0.252826, acc: 94%] [G loss: 4.665810] time: 0:01:23.681328\n[Epoch 0/2] [Batch 77/268] [D loss: 0.094070, acc: 93%] [G loss: 8.340666] time: 0:01:24.477123\n[Epoch 0/2] [Batch 78/268] [D loss: 0.158105, acc: 92%] [G loss: 8.910589] time: 0:01:25.355423\n[Epoch 0/2] [Batch 79/268] [D loss: 0.225201, acc: 88%] [G loss: 4.251089] time: 0:01:26.210255\n[Epoch 0/2] [Batch 80/268] [D loss: 0.232744, acc: 85%] [G loss: 8.745691] time: 0:01:27.042069\n[Epoch 0/2] [Batch 81/268] [D loss: 0.128298, acc: 93%] [G loss: 9.102203] time: 0:01:27.863253\n[Epoch 0/2] [Batch 82/268] [D loss: 0.095325, acc: 94%] [G loss: 7.067022] time: 0:01:28.661945\n[Epoch 0/2] [Batch 83/268] [D loss: 0.241096, acc: 95%] [G loss: 9.222006] time: 0:01:29.474670\n[Epoch 0/2] [Batch 84/268] [D loss: 0.226578, acc: 90%] [G loss: 7.122580] time: 0:01:30.247880\n[Epoch 0/2] [Batch 85/268] [D loss: 0.165303, acc: 92%] [G loss: 6.844867] time: 0:01:31.090254\n[Epoch 0/2] [Batch 86/268] [D loss: 0.145870, acc: 92%] [G loss: 7.055514] time: 0:01:31.907430\n[Epoch 0/2] [Batch 87/268] [D loss: 0.133700, acc: 91%] [G loss: 7.885912] time: 0:01:32.733026\n[Epoch 0/2] [Batch 88/268] [D loss: 0.178412, acc: 89%] [G loss: 9.053495] time: 0:01:33.555766\n[Epoch 0/2] [Batch 89/268] [D loss: 0.134311, acc: 92%] [G loss: 8.732852] time: 0:01:34.480533\n[Epoch 0/2] [Batch 90/268] [D loss: 0.104388, acc: 94%] [G loss: 8.431882] time: 0:01:35.200248\n[Epoch 0/2] [Batch 91/268] [D loss: 0.198308, acc: 76%] [G loss: 3.607203] time: 0:01:35.991644\n[Epoch 0/2] [Batch 92/268] [D loss: 0.248819, acc: 81%] [G loss: 8.343700] time: 0:01:36.846390\n[Epoch 0/2] [Batch 93/268] [D loss: 0.191418, acc: 93%] [G loss: 7.507263] time: 0:01:37.856709\n[Epoch 0/2] [Batch 94/268] [D loss: 0.149406, acc: 92%] [G loss: 5.940534] time: 0:01:38.781999\n[Epoch 0/2] [Batch 95/268] [D loss: 0.160665, acc: 91%] [G loss: 5.520713] time: 0:01:39.622447\n[Epoch 0/2] [Batch 96/268] [D loss: 0.157241, acc: 93%] [G loss: 5.726872] time: 0:01:40.451475\n[Epoch 0/2] [Batch 97/268] [D loss: 0.166985, acc: 90%] [G loss: 6.178223] time: 0:01:41.257792\n[Epoch 0/2] [Batch 98/268] [D loss: 0.264139, acc: 88%] [G loss: 6.022235] time: 0:01:42.078743\n[Epoch 0/2] [Batch 99/268] [D loss: 0.367955, acc: 89%] [G loss: 7.327287] time: 0:01:42.984458\n[Epoch 0/2] [Batch 100/268] [D loss: 0.482164, acc: 84%] [G loss: 10.622207] time: 0:01:43.901270\n[Epoch 0/2] [Batch 101/268] [D loss: 0.229237, acc: 83%] [G loss: 10.958774] time: 0:01:44.749624\n[Epoch 0/2] [Batch 102/268] [D loss: 0.155354, acc: 95%] [G loss: 4.958889] time: 0:01:45.671537\n[Epoch 0/2] [Batch 103/268] [D loss: 0.218621, acc: 97%] [G loss: 9.577101] time: 0:01:46.495438\n[Epoch 0/2] [Batch 104/268] [D loss: 0.188287, acc: 95%] [G loss: 6.624455] time: 0:01:47.259077\n[Epoch 0/2] [Batch 105/268] [D loss: 0.240079, acc: 89%] [G loss: 6.587315] time: 0:01:48.022809\n[Epoch 0/2] [Batch 106/268] [D loss: 0.201110, acc: 71%] [G loss: 3.139123] time: 0:01:48.780107\n[Epoch 0/2] [Batch 107/268] [D loss: 0.187445, acc: 97%] [G loss: 3.334103] time: 0:01:49.633000\n[Epoch 0/2] [Batch 108/268] [D loss: 0.166585, acc: 87%] [G loss: 7.374913] time: 0:01:50.509901\n[Epoch 0/2] [Batch 109/268] [D loss: 0.170303, acc: 91%] [G loss: 7.404650] time: 0:01:51.380099\n[Epoch 0/2] [Batch 110/268] [D loss: 0.159774, acc: 89%] [G loss: 2.794493] time: 0:01:52.253143\n[Epoch 0/2] [Batch 111/268] [D loss: 0.110390, acc: 96%] [G loss: 5.728540] time: 0:01:53.069130\n[Epoch 0/2] [Batch 112/268] [D loss: 0.151458, acc: 90%] [G loss: 4.801531] time: 0:01:53.856797\n[Epoch 0/2] [Batch 113/268] [D loss: 0.260234, acc: 85%] [G loss: 4.809408] time: 0:01:54.595734\n[Epoch 0/2] [Batch 114/268] [D loss: 0.462660, acc: 84%] [G loss: 5.245414] time: 0:01:55.391086\n[Epoch 0/2] [Batch 115/268] [D loss: 0.919482, acc: 62%] [G loss: 7.441872] time: 0:01:56.344634\n[Epoch 0/2] [Batch 116/268] [D loss: 2.731024, acc: 81%] [G loss: 7.712674] time: 0:01:57.169019\n[Epoch 0/2] [Batch 117/268] [D loss: 5.308433, acc: 52%] [G loss: 9.004658] time: 0:01:58.099871\n[Epoch 0/2] [Batch 118/268] [D loss: 3.372885, acc: 61%] [G loss: 5.166845] time: 0:01:58.864732\n[Epoch 0/2] [Batch 119/268] [D loss: 1.018369, acc: 84%] [G loss: 2.899927] time: 0:01:59.664428\n[Epoch 0/2] [Batch 120/268] [D loss: 0.280895, acc: 90%] [G loss: 6.976927] time: 0:02:00.531094\n[Epoch 0/2] [Batch 121/268] [D loss: 0.154641, acc: 92%] [G loss: 7.550140] time: 0:02:01.380338\n[Epoch 0/2] [Batch 122/268] [D loss: 0.144990, acc: 94%] [G loss: 5.989964] time: 0:02:02.217728\n[Epoch 0/2] [Batch 123/268] [D loss: 0.144718, acc: 98%] [G loss: 4.962803] time: 0:02:03.015614\n[Epoch 0/2] [Batch 124/268] [D loss: 0.082732, acc: 95%] [G loss: 4.768465] time: 0:02:03.810763\n[Epoch 0/2] [Batch 125/268] [D loss: 0.314968, acc: 90%] [G loss: 7.446101] time: 0:02:04.775467\n[Epoch 0/2] [Batch 126/268] [D loss: 0.129297, acc: 94%] [G loss: 5.482934] time: 0:02:05.575858\n[Epoch 0/2] [Batch 127/268] [D loss: 0.103420, acc: 93%] [G loss: 4.951503] time: 0:02:06.430998\n[Epoch 0/2] [Batch 128/268] [D loss: 0.080505, acc: 95%] [G loss: 4.414116] time: 0:02:07.398902\n[Epoch 0/2] [Batch 129/268] [D loss: 0.092434, acc: 94%] [G loss: 5.660404] time: 0:02:08.310243\n[Epoch 0/2] [Batch 130/268] [D loss: 0.160359, acc: 91%] [G loss: 4.554512] time: 0:02:09.211597\n[Epoch 0/2] [Batch 131/268] [D loss: 0.195883, acc: 69%] [G loss: 3.759223] time: 0:02:10.063994\n[Epoch 0/2] [Batch 132/268] [D loss: 0.425501, acc: 59%] [G loss: 1.770502] time: 0:02:10.934314\n[Epoch 0/2] [Batch 133/268] [D loss: 0.202724, acc: 76%] [G loss: 1.851524] time: 0:02:11.874745\n[Epoch 0/2] [Batch 134/268] [D loss: 0.566320, acc: 42%] [G loss: 4.074591] time: 0:02:12.688226\n[Epoch 0/2] [Batch 135/268] [D loss: 0.344660, acc: 40%] [G loss: 1.335171] time: 0:02:13.617627\n[Epoch 0/2] [Batch 136/268] [D loss: 0.357034, acc: 26%] [G loss: 2.787927] time: 0:02:14.433646\n[Epoch 0/2] [Batch 137/268] [D loss: 0.486647, acc: 34%] [G loss: 2.598639] time: 0:02:15.273894\n[Epoch 0/2] [Batch 138/268] [D loss: 0.321180, acc: 50%] [G loss: 1.993953] time: 0:02:16.100014\n[Epoch 0/2] [Batch 139/268] [D loss: 0.407629, acc: 36%] [G loss: 1.755347] time: 0:02:16.908672\n[Epoch 0/2] [Batch 140/268] [D loss: 0.364201, acc: 18%] [G loss: 1.553655] time: 0:02:17.803345\n[Epoch 0/2] [Batch 141/268] [D loss: 0.516319, acc: 8%] [G loss: 1.634444] time: 0:02:18.706328\n[Epoch 0/2] [Batch 142/268] [D loss: 0.477931, acc: 13%] [G loss: 1.966009] time: 0:02:19.562921\n[Epoch 0/2] [Batch 143/268] [D loss: 0.345688, acc: 38%] [G loss: 0.711484] time: 0:02:20.391503\n[Epoch 0/2] [Batch 144/268] [D loss: 0.324469, acc: 50%] [G loss: 1.253824] time: 0:02:21.384088\n[Epoch 0/2] [Batch 145/268] [D loss: 0.337393, acc: 37%] [G loss: 0.552125] time: 0:02:22.193561\n[Epoch 0/2] [Batch 146/268] [D loss: 0.341018, acc: 39%] [G loss: 1.687922] time: 0:02:22.967086\n[Epoch 0/2] [Batch 147/268] [D loss: 0.420426, acc: 12%] [G loss: 0.682717] time: 0:02:23.807141\n[Epoch 0/2] [Batch 148/268] [D loss: 0.407950, acc: 15%] [G loss: 1.910601] time: 0:02:24.622607\n[Epoch 0/2] [Batch 149/268] [D loss: 0.346430, acc: 16%] [G loss: 0.624478] time: 0:02:25.555877\n[Epoch 0/2] [Batch 150/268] [D loss: 0.347439, acc: 40%] [G loss: 1.689431] time: 0:02:26.432589\n[Epoch 0/2] [Batch 151/268] [D loss: 0.279787, acc: 54%] [G loss: 1.427534] time: 0:02:27.245759\n[Epoch 0/2] [Batch 152/268] [D loss: 0.317332, acc: 24%] [G loss: 1.511920] time: 0:02:27.990716\n[Epoch 0/2] [Batch 153/268] [D loss: 0.293575, acc: 45%] [G loss: 1.248483] time: 0:02:28.777706\n[Epoch 0/2] [Batch 154/268] [D loss: 0.345986, acc: 44%] [G loss: 0.514202] time: 0:02:29.550215\n[Epoch 0/2] [Batch 155/268] [D loss: 0.304767, acc: 44%] [G loss: 1.205941] time: 0:02:30.327112\n[Epoch 0/2] [Batch 156/268] [D loss: 0.329469, acc: 40%] [G loss: 0.669679] time: 0:02:31.096007\n[Epoch 0/2] [Batch 157/268] [D loss: 0.346820, acc: 13%] [G loss: 0.609565] time: 0:02:32.042146\n[Epoch 0/2] [Batch 158/268] [D loss: 0.327819, acc: 39%] [G loss: 0.639540] time: 0:02:32.780727\n[Epoch 0/2] [Batch 159/268] [D loss: 0.359854, acc: 19%] [G loss: 1.087464] time: 0:02:33.556648\n[Epoch 0/2] [Batch 160/268] [D loss: 0.346937, acc: 13%] [G loss: 1.296940] time: 0:02:34.302057\n[Epoch 0/2] [Batch 161/268] [D loss: 0.302665, acc: 61%] [G loss: 1.189180] time: 0:02:35.164549\n[Epoch 0/2] [Batch 162/268] [D loss: 0.326753, acc: 49%] [G loss: 1.013025] time: 0:02:36.112916\n[Epoch 0/2] [Batch 163/268] [D loss: 0.290799, acc: 53%] [G loss: 1.387647] time: 0:02:36.908196\n[Epoch 0/2] [Batch 164/268] [D loss: 0.294013, acc: 22%] [G loss: 1.251189] time: 0:02:37.857316\n[Epoch 0/2] [Batch 165/268] [D loss: 0.277477, acc: 47%] [G loss: 1.008085] time: 0:02:38.678755\n[Epoch 0/2] [Batch 166/268] [D loss: 0.306257, acc: 24%] [G loss: 0.612817] time: 0:02:39.461228\n[Epoch 0/2] [Batch 167/268] [D loss: 0.320802, acc: 39%] [G loss: 0.509924] time: 0:02:40.295964\n[Epoch 0/2] [Batch 168/268] [D loss: 0.323187, acc: 26%] [G loss: 1.115168] time: 0:02:41.381880\n[Epoch 0/2] [Batch 169/268] [D loss: 0.330281, acc: 19%] [G loss: 1.042496] time: 0:02:42.158813\n[Epoch 0/2] [Batch 170/268] [D loss: 0.320954, acc: 32%] [G loss: 0.984105] time: 0:02:42.976411\n[Epoch 0/2] [Batch 171/268] [D loss: 0.287415, acc: 46%] [G loss: 1.148712] time: 0:02:45.203645\n[Epoch 0/2] [Batch 172/268] [D loss: 0.380014, acc: 14%] [G loss: 1.260698] time: 0:02:46.063813\n[Epoch 0/2] [Batch 173/268] [D loss: 0.329709, acc: 58%] [G loss: 0.776585] time: 0:02:46.858395\n[Epoch 0/2] [Batch 174/268] [D loss: 0.304243, acc: 24%] [G loss: 0.960031] time: 0:02:47.657240\n[Epoch 0/2] [Batch 175/268] [D loss: 0.311930, acc: 39%] [G loss: 1.348520] time: 0:02:48.436879\n[Epoch 0/2] [Batch 176/268] [D loss: 0.329760, acc: 47%] [G loss: 0.620120] time: 0:02:49.180580\n[Epoch 0/2] [Batch 177/268] [D loss: 0.344208, acc: 20%] [G loss: 0.790130] time: 0:02:50.028125\n[Epoch 0/2] [Batch 178/268] [D loss: 0.308623, acc: 44%] [G loss: 1.025683] time: 0:02:50.843254\n[Epoch 0/2] [Batch 179/268] [D loss: 0.322464, acc: 21%] [G loss: 1.175819] time: 0:02:51.651231\n[Epoch 0/2] [Batch 180/268] [D loss: 0.309999, acc: 45%] [G loss: 0.594779] time: 0:02:52.628773\n[Epoch 0/2] [Batch 181/268] [D loss: 0.303865, acc: 35%] [G loss: 0.563572] time: 0:02:53.445755\n[Epoch 0/2] [Batch 182/268] [D loss: 0.294537, acc: 48%] [G loss: 0.981146] time: 0:02:54.261119\n[Epoch 0/2] [Batch 183/268] [D loss: 0.293437, acc: 41%] [G loss: 1.159376] time: 0:02:55.104049\n[Epoch 0/2] [Batch 184/268] [D loss: 0.358135, acc: 14%] [G loss: 1.625163] time: 0:02:55.854376\n[Epoch 0/2] [Batch 185/268] [D loss: 0.292393, acc: 50%] [G loss: 0.793892] time: 0:02:56.837967\n[Epoch 0/2] [Batch 186/268] [D loss: 0.293584, acc: 41%] [G loss: 0.684479] time: 0:02:57.706566\n[Epoch 0/2] [Batch 187/268] [D loss: 0.298469, acc: 16%] [G loss: 0.574271] time: 0:02:58.619402\n[Epoch 0/2] [Batch 188/268] [D loss: 0.297930, acc: 39%] [G loss: 0.680973] time: 0:02:59.481141\n[Epoch 0/2] [Batch 189/268] [D loss: 0.316568, acc: 18%] [G loss: 0.891179] time: 0:03:00.337477\n[Epoch 0/2] [Batch 190/268] [D loss: 0.292620, acc: 32%] [G loss: 0.824922] time: 0:03:01.231119\n[Epoch 0/2] [Batch 191/268] [D loss: 0.318621, acc: 41%] [G loss: 0.734840] time: 0:03:02.026400\n[Epoch 0/2] [Batch 192/268] [D loss: 0.319589, acc: 19%] [G loss: 0.591805] time: 0:03:02.791827\n[Epoch 0/2] [Batch 193/268] [D loss: 0.293188, acc: 42%] [G loss: 0.861924] time: 0:03:03.635710\n[Epoch 0/2] [Batch 194/268] [D loss: 0.305258, acc: 36%] [G loss: 0.482484] time: 0:03:04.443767\n[Epoch 0/2] [Batch 195/268] [D loss: 0.323223, acc: 22%] [G loss: 0.836155] time: 0:03:05.358785\n[Epoch 0/2] [Batch 196/268] [D loss: 0.321479, acc: 67%] [G loss: 0.712461] time: 0:03:06.240991\n[Epoch 0/2] [Batch 197/268] [D loss: 0.326348, acc: 17%] [G loss: 1.378330] time: 0:03:06.987609\n[Epoch 0/2] [Batch 198/268] [D loss: 0.313363, acc: 28%] [G loss: 0.626028] time: 0:03:07.699143\n[Epoch 0/2] [Batch 199/268] [D loss: 0.301380, acc: 12%] [G loss: 0.522539] time: 0:03:08.468065\n[Epoch 0/2] [Batch 200/268] [D loss: 0.315842, acc: 19%] [G loss: 1.324035] time: 0:03:09.260045\n[Epoch 0/2] [Batch 201/268] [D loss: 0.298596, acc: 16%] [G loss: 0.529161] time: 0:03:12.643567\n[Epoch 0/2] [Batch 202/268] [D loss: 0.315947, acc: 18%] [G loss: 1.574727] time: 0:03:13.441992\n[Epoch 0/2] [Batch 203/268] [D loss: 0.309641, acc: 24%] [G loss: 1.286882] time: 0:03:14.355110\n[Epoch 0/2] [Batch 204/268] [D loss: 0.284355, acc: 26%] [G loss: 0.666807] time: 0:03:15.134318\n[Epoch 0/2] [Batch 205/268] [D loss: 0.308982, acc: 31%] [G loss: 0.595560] time: 0:03:15.886024\n[Epoch 0/2] [Batch 206/268] [D loss: 0.305795, acc: 40%] [G loss: 0.585608] time: 0:03:16.676594\n[Epoch 0/2] [Batch 207/268] [D loss: 0.329650, acc: 16%] [G loss: 0.473160] time: 0:03:17.402875\n[Epoch 0/2] [Batch 208/268] [D loss: 0.301901, acc: 41%] [G loss: 0.525054] time: 0:03:18.355195\n[Epoch 0/2] [Batch 209/268] [D loss: 0.302780, acc: 24%] [G loss: 0.648806] time: 0:03:19.129071\n[Epoch 0/2] [Batch 210/268] [D loss: 0.283968, acc: 24%] [G loss: 0.631097] time: 0:03:20.054598\n[Epoch 0/2] [Batch 211/268] [D loss: 0.289627, acc: 22%] [G loss: 0.657598] time: 0:03:20.889106\n[Epoch 0/2] [Batch 212/268] [D loss: 0.302338, acc: 44%] [G loss: 0.642818] time: 0:03:21.684863\n[Epoch 0/2] [Batch 213/268] [D loss: 0.299361, acc: 20%] [G loss: 0.906591] time: 0:03:22.610278\n[Epoch 0/2] [Batch 214/268] [D loss: 0.295676, acc: 38%] [G loss: 0.477295] time: 0:03:23.500800\n[Epoch 0/2] [Batch 215/268] [D loss: 0.308347, acc: 15%] [G loss: 1.103749] time: 0:03:24.245732\n[Epoch 0/2] [Batch 216/268] [D loss: 0.306282, acc: 53%] [G loss: 1.438562] time: 0:03:25.086423\n[Epoch 0/2] [Batch 217/268] [D loss: 0.309870, acc: 18%] [G loss: 1.432648] time: 0:03:25.863258\n[Epoch 0/2] [Batch 218/268] [D loss: 0.304910, acc: 13%] [G loss: 0.564983] time: 0:03:26.694613\n[Epoch 0/2] [Batch 219/268] [D loss: 0.281090, acc: 28%] [G loss: 1.401973] time: 0:03:27.599816\n[Epoch 0/2] [Batch 220/268] [D loss: 0.308488, acc: 23%] [G loss: 0.802056] time: 0:03:28.451656\n[Epoch 0/2] [Batch 221/268] [D loss: 0.296588, acc: 59%] [G loss: 0.668823] time: 0:03:29.368078\n[Epoch 0/2] [Batch 222/268] [D loss: 0.310589, acc: 16%] [G loss: 1.164246] time: 0:03:30.114238\n[Epoch 0/2] [Batch 223/268] [D loss: 0.303062, acc: 29%] [G loss: 0.605272] time: 0:03:30.848355\n[Epoch 0/2] [Batch 224/268] [D loss: 0.331264, acc: 16%] [G loss: 1.335448] time: 0:03:31.616839\n[Epoch 0/2] [Batch 225/268] [D loss: 0.307877, acc: 49%] [G loss: 1.302171] time: 0:03:32.566796\n[Epoch 0/2] [Batch 226/268] [D loss: 0.296337, acc: 22%] [G loss: 0.607083] time: 0:03:33.367854\n[Epoch 0/2] [Batch 227/268] [D loss: 0.291730, acc: 23%] [G loss: 0.989055] time: 0:03:34.137780\n[Epoch 0/2] [Batch 228/268] [D loss: 0.310396, acc: 15%] [G loss: 0.519499] time: 0:03:35.037234\n[Epoch 0/2] [Batch 229/268] [D loss: 0.303158, acc: 37%] [G loss: 0.553183] time: 0:03:35.933368\n[Epoch 0/2] [Batch 230/268] [D loss: 0.299463, acc: 15%] [G loss: 0.441416] time: 0:03:36.749753\n[Epoch 0/2] [Batch 231/268] [D loss: 0.304340, acc: 17%] [G loss: 1.336140] time: 0:03:37.478001\n[Epoch 0/2] [Batch 232/268] [D loss: 0.291663, acc: 16%] [G loss: 0.597881] time: 0:03:38.397927\n[Epoch 0/2] [Batch 233/268] [D loss: 0.304229, acc: 22%] [G loss: 1.611199] time: 0:03:39.122307\n[Epoch 0/2] [Batch 234/268] [D loss: 0.300663, acc: 41%] [G loss: 1.249300] time: 0:03:39.895663\n[Epoch 0/2] [Batch 235/268] [D loss: 0.316433, acc: 15%] [G loss: 0.599501] time: 0:03:40.657929\n[Epoch 0/2] [Batch 236/268] [D loss: 0.303476, acc: 43%] [G loss: 0.476576] time: 0:03:41.430813\n[Epoch 0/2] [Batch 237/268] [D loss: 0.360523, acc: 17%] [G loss: 0.787335] time: 0:03:42.220031\n[Epoch 0/2] [Batch 238/268] [D loss: 0.301268, acc: 67%] [G loss: 0.652142] time: 0:03:42.962373\n[Epoch 0/2] [Batch 239/268] [D loss: 0.290869, acc: 17%] [G loss: 1.041226] time: 0:03:43.691454\n[Epoch 0/2] [Batch 240/268] [D loss: 0.286218, acc: 40%] [G loss: 0.649148] time: 0:03:44.609396\n[Epoch 0/2] [Batch 241/268] [D loss: 0.295939, acc: 50%] [G loss: 0.478832] time: 0:03:45.435386\n[Epoch 0/2] [Batch 242/268] [D loss: 0.298775, acc: 26%] [G loss: 0.464854] time: 0:03:46.314257\n[Epoch 0/2] [Batch 243/268] [D loss: 0.300349, acc: 26%] [G loss: 0.783734] time: 0:03:47.100113\n[Epoch 0/2] [Batch 244/268] [D loss: 0.287063, acc: 36%] [G loss: 0.500836] time: 0:03:47.837939\n[Epoch 0/2] [Batch 245/268] [D loss: 0.279343, acc: 31%] [G loss: 0.837266] time: 0:03:48.565823\n[Epoch 0/2] [Batch 246/268] [D loss: 0.297022, acc: 17%] [G loss: 0.573182] time: 0:03:49.325813\n[Epoch 0/2] [Batch 247/268] [D loss: 0.293609, acc: 24%] [G loss: 1.191526] time: 0:03:50.081521\n[Epoch 0/2] [Batch 248/268] [D loss: 0.291898, acc: 17%] [G loss: 0.742836] time: 0:03:51.196780\n[Epoch 0/2] [Batch 249/268] [D loss: 0.295930, acc: 20%] [G loss: 0.481674] time: 0:03:52.047549\n[Epoch 0/2] [Batch 250/268] [D loss: 0.300842, acc: 21%] [G loss: 0.470277] time: 0:03:52.840763\n[Epoch 0/2] [Batch 251/268] [D loss: 0.287982, acc: 43%] [G loss: 0.758448] time: 0:03:53.722385\n[Epoch 0/2] [Batch 252/268] [D loss: 0.290884, acc: 21%] [G loss: 1.120087] time: 0:03:54.580064\n[Epoch 0/2] [Batch 253/268] [D loss: 0.298073, acc: 17%] [G loss: 0.557395] time: 0:03:55.357810\n[Epoch 0/2] [Batch 254/268] [D loss: 0.291107, acc: 14%] [G loss: 0.537968] time: 0:03:56.226495\n[Epoch 0/2] [Batch 255/268] [D loss: 0.287333, acc: 27%] [G loss: 1.405124] time: 0:03:57.021646\n[Epoch 0/2] [Batch 256/268] [D loss: 0.285892, acc: 30%] [G loss: 1.048976] time: 0:03:57.810554\n[Epoch 0/2] [Batch 257/268] [D loss: 0.291867, acc: 24%] [G loss: 0.432277] time: 0:03:58.624070\n[Epoch 0/2] [Batch 258/268] [D loss: 0.283682, acc: 22%] [G loss: 0.683639] time: 0:03:59.389337\n[Epoch 0/2] [Batch 259/268] [D loss: 0.293287, acc: 18%] [G loss: 1.045851] time: 0:04:00.193181\n[Epoch 0/2] [Batch 260/268] [D loss: 0.285971, acc: 24%] [G loss: 1.073325] time: 0:04:01.106771\n[Epoch 0/2] [Batch 261/268] [D loss: 0.285504, acc: 40%] [G loss: 0.677712] time: 0:04:01.965031\n[Epoch 0/2] [Batch 262/268] [D loss: 0.290708, acc: 20%] [G loss: 0.704974] time: 0:04:02.813350\n[Epoch 0/2] [Batch 263/268] [D loss: 0.295743, acc: 21%] [G loss: 1.119153] time: 0:04:03.559145\n[Epoch 0/2] [Batch 264/268] [D loss: 0.295066, acc: 59%] [G loss: 0.488880] time: 0:04:04.353268\n[Epoch 0/2] [Batch 265/268] [D loss: 0.305237, acc: 20%] [G loss: 0.590109] time: 0:04:05.183753\n[Epoch 0/2] [Batch 266/268] [D loss: 0.308253, acc: 45%] [G loss: 0.411455] time: 0:04:05.935293\n[Epoch 1/2] [Batch 0/268] [D loss: 0.300533, acc: 20%] [G loss: 0.930078] time: 0:04:06.540857\n[Epoch 1/2] [Batch 1/268] [D loss: 0.294917, acc: 14%] [G loss: 0.456936] time: 0:04:09.698050\n[Epoch 1/2] [Batch 2/268] [D loss: 0.288949, acc: 38%] [G loss: 0.998450] time: 0:04:10.320238\n[Epoch 1/2] [Batch 3/268] [D loss: 0.299835, acc: 14%] [G loss: 0.579158] time: 0:04:10.918177\n[Epoch 1/2] [Batch 4/268] [D loss: 0.297606, acc: 33%] [G loss: 0.538974] time: 0:04:11.497934\n[Epoch 1/2] [Batch 5/268] [D loss: 0.287266, acc: 12%] [G loss: 0.476837] time: 0:04:12.082781\n[Epoch 1/2] [Batch 6/268] [D loss: 0.282736, acc: 26%] [G loss: 0.559363] time: 0:04:12.677683\n[Epoch 1/2] [Batch 7/268] [D loss: 0.285727, acc: 30%] [G loss: 1.000581] time: 0:04:13.257519\n[Epoch 1/2] [Batch 8/268] [D loss: 0.290033, acc: 38%] [G loss: 0.482144] time: 0:04:13.829993\n[Epoch 1/2] [Batch 9/268] [D loss: 0.287968, acc: 13%] [G loss: 0.429368] time: 0:04:14.407145\n[Epoch 1/2] [Batch 10/268] [D loss: 0.286753, acc: 14%] [G loss: 0.515467] time: 0:04:14.980147\n[Epoch 1/2] [Batch 11/268] [D loss: 0.296771, acc: 20%] [G loss: 0.437307] time: 0:04:15.549207\n[Epoch 1/2] [Batch 12/268] [D loss: 0.289448, acc: 24%] [G loss: 0.728409] time: 0:04:16.124932\n[Epoch 1/2] [Batch 13/268] [D loss: 0.293480, acc: 18%] [G loss: 0.437541] time: 0:04:16.698469\n[Epoch 1/2] [Batch 14/268] [D loss: 0.303262, acc: 15%] [G loss: 0.405853] time: 0:04:17.278573\n[Epoch 1/2] [Batch 15/268] [D loss: 0.281510, acc: 26%] [G loss: 0.659889] time: 0:04:17.862463\n[Epoch 1/2] [Batch 16/268] [D loss: 0.298428, acc: 18%] [G loss: 0.561094] time: 0:04:18.445729\n[Epoch 1/2] [Batch 17/268] [D loss: 0.284284, acc: 55%] [G loss: 0.546154] time: 0:04:19.030885\n[Epoch 1/2] [Batch 18/268] [D loss: 0.299315, acc: 17%] [G loss: 1.044215] time: 0:04:19.621614\n[Epoch 1/2] [Batch 19/268] [D loss: 0.284041, acc: 26%] [G loss: 0.447122] time: 0:04:20.204031\n[Epoch 1/2] [Batch 20/268] [D loss: 0.310463, acc: 28%] [G loss: 0.603819] time: 0:04:20.785360\n[Epoch 1/2] [Batch 21/268] [D loss: 0.295804, acc: 20%] [G loss: 1.042895] time: 0:04:21.375462\n[Epoch 1/2] [Batch 22/268] [D loss: 0.320221, acc: 12%] [G loss: 0.533281] time: 0:04:21.957415\n[Epoch 1/2] [Batch 23/268] [D loss: 0.355114, acc: 75%] [G loss: 0.439516] time: 0:04:22.545632\n[Epoch 1/2] [Batch 24/268] [D loss: 0.296989, acc: 19%] [G loss: 0.419208] time: 0:04:23.115096\n[Epoch 1/2] [Batch 25/268] [D loss: 0.292461, acc: 16%] [G loss: 1.000349] time: 0:04:23.701891\n[Epoch 1/2] [Batch 26/268] [D loss: 0.290078, acc: 38%] [G loss: 1.030200] time: 0:04:24.286791\n[Epoch 1/2] [Batch 27/268] [D loss: 0.294178, acc: 20%] [G loss: 0.657133] time: 0:04:24.868195\n[Epoch 1/2] [Batch 28/268] [D loss: 0.291541, acc: 52%] [G loss: 0.588505] time: 0:04:25.453110\n[Epoch 1/2] [Batch 29/268] [D loss: 0.281905, acc: 23%] [G loss: 0.513661] time: 0:04:26.030216\n[Epoch 1/2] [Batch 30/268] [D loss: 0.280369, acc: 26%] [G loss: 0.628069] time: 0:04:26.614677\n[Epoch 1/2] [Batch 31/268] [D loss: 0.300223, acc: 47%] [G loss: 0.603308] time: 0:04:27.199060\n[Epoch 1/2] [Batch 32/268] [D loss: 0.305149, acc: 13%] [G loss: 0.434226] time: 0:04:27.775757\n[Epoch 1/2] [Batch 33/268] [D loss: 0.285312, acc: 38%] [G loss: 1.021521] time: 0:04:28.356588\n[Epoch 1/2] [Batch 34/268] [D loss: 0.279579, acc: 16%] [G loss: 0.549452] time: 0:04:28.933021\n[Epoch 1/2] [Batch 35/268] [D loss: 0.278358, acc: 39%] [G loss: 0.625760] time: 0:04:29.514791\n[Epoch 1/2] [Batch 36/268] [D loss: 0.290975, acc: 17%] [G loss: 0.535739] time: 0:04:30.093692\n[Epoch 1/2] [Batch 37/268] [D loss: 0.282757, acc: 37%] [G loss: 0.978034] time: 0:04:30.672047\n[Epoch 1/2] [Batch 38/268] [D loss: 0.281616, acc: 25%] [G loss: 0.452334] time: 0:04:31.240202\n[Epoch 1/2] [Batch 39/268] [D loss: 0.285717, acc: 17%] [G loss: 0.989729] time: 0:04:31.814715\n[Epoch 1/2] [Batch 40/268] [D loss: 0.285346, acc: 28%] [G loss: 1.292802] time: 0:04:32.395879\n[Epoch 1/2] [Batch 41/268] [D loss: 0.295427, acc: 15%] [G loss: 0.564309] time: 0:04:32.960388\n[Epoch 1/2] [Batch 42/268] [D loss: 0.287976, acc: 35%] [G loss: 1.300864] time: 0:04:33.555351\n[Epoch 1/2] [Batch 43/268] [D loss: 0.302475, acc: 37%] [G loss: 0.438812] time: 0:04:34.127260\n[Epoch 1/2] [Batch 44/268] [D loss: 0.306182, acc: 19%] [G loss: 0.590563] time: 0:04:34.709085\n[Epoch 1/2] [Batch 45/268] [D loss: 0.279827, acc: 17%] [G loss: 0.611805] time: 0:04:35.284143\n[Epoch 1/2] [Batch 46/268] [D loss: 0.287583, acc: 51%] [G loss: 1.037193] time: 0:04:35.863870\n[Epoch 1/2] [Batch 47/268] [D loss: 0.289660, acc: 16%] [G loss: 1.007941] time: 0:04:36.444944\n[Epoch 1/2] [Batch 48/268] [D loss: 0.284728, acc: 24%] [G loss: 0.979582] time: 0:04:37.013535\n[Epoch 1/2] [Batch 49/268] [D loss: 0.287473, acc: 41%] [G loss: 0.517039] time: 0:04:37.588445\n[Epoch 1/2] [Batch 50/268] [D loss: 0.283780, acc: 39%] [G loss: 0.535422] time: 0:04:38.163077\n[Epoch 1/2] [Batch 51/268] [D loss: 0.286089, acc: 22%] [G loss: 1.006658] time: 0:04:38.741561\n[Epoch 1/2] [Batch 52/268] [D loss: 0.279032, acc: 26%] [G loss: 0.497604] time: 0:04:39.310383\n[Epoch 1/2] [Batch 53/268] [D loss: 0.287674, acc: 61%] [G loss: 0.590585] time: 0:04:39.891535\n[Epoch 1/2] [Batch 54/268] [D loss: 0.294814, acc: 23%] [G loss: 0.508732] time: 0:04:40.458978\n[Epoch 1/2] [Batch 55/268] [D loss: 0.289251, acc: 55%] [G loss: 0.539924] time: 0:04:41.039150\n[Epoch 1/2] [Batch 56/268] [D loss: 0.279033, acc: 21%] [G loss: 0.958781] time: 0:04:41.611160\n[Epoch 1/2] [Batch 57/268] [D loss: 0.276389, acc: 24%] [G loss: 0.520503] time: 0:04:42.185141\n[Epoch 1/2] [Batch 58/268] [D loss: 0.282995, acc: 36%] [G loss: 0.524771] time: 0:04:42.761410\n[Epoch 1/2] [Batch 59/268] [D loss: 0.281352, acc: 26%] [G loss: 0.544289] time: 0:04:43.333033\n" ] ], [ [ "Let's see how our model performed over time.", "_____no_output_____" ] ], [ [ "from PIL import Image as Img", "_____no_output_____" ], [ "Image('/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/images/pix2pix/0_0.png')", "_____no_output_____" ], [ "Img('/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/images/pix2pix/0_200.png')", "_____no_output_____" ] ], [ [ "This is the result of 2 iterations. You can train the model for more than 2 iterations and it will produce better results. Also, try this model with different datasets.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6a817cd1e8bd686bbd7d80de91add6d0f08b84
95,414
ipynb
Jupyter Notebook
bronze/Q48_Reflections_Solutions.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
1
2021-08-15T10:57:16.000Z
2021-08-15T10:57:16.000Z
bronze/Q48_Reflections_Solutions.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/Q48_Reflections_Solutions.ipynb
dev-aditya/QWorld_Summer_School_2021
1b8711327845617ca8dc32ff2a20f461d0ee01c7
[ "Apache-2.0", "CC-BY-4.0" ]
3
2021-08-11T11:12:38.000Z
2021-09-14T09:15:08.000Z
240.944444
21,968
0.912193
[ [ [ "<a href=\"https://qworld.net\" target=\"_blank\" align=\"left\"><img src=\"../qworld/images/header.jpg\" align=\"left\"></a>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\stateplus}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\stateminus}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\I}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & 1} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $\n$ \\newcommand{\\pstate}[1]{ \\lceil \\mspace{-1mu} #1 \\mspace{-1.5mu} \\rfloor } $\n$ \\newcommand{\\greenbit}[1] {\\mathbf{{\\color{green}#1}}} $\n$ \\newcommand{\\bluebit}[1] {\\mathbf{{\\color{blue}#1}}} $\n$ \\newcommand{\\redbit}[1] {\\mathbf{{\\color{red}#1}}} $\n$ \\newcommand{\\brownbit}[1] {\\mathbf{{\\color{brown}#1}}} $\n$ \\newcommand{\\blackbit}[1] {\\mathbf{{\\color{black}#1}}} $", "_____no_output_____" ], [ "<font style=\"font-size:28px;\" align=\"left\"><b> <font color=\"blue\"> Solutions for </font>Reflections </b></font>\n<br>\n_prepared by Abuzer Yakaryilmaz_\n<br><br>", "_____no_output_____" ], [ "<a id=\"task1\"></a>\n<h3> Task 1</h3>\n\nCreate a quantum ciruit with 5 qubits.\n\nApply h-gate (Hadamard operator) to each qubit.\n\nApply z-gate ($Z$ operator) to randomly picked qubits. (i.e., $ mycircuit.z(qreg[i]) $)\n\nApply h-gate to each qubit.\n\nMeasure each qubit.\n\nExecute your program 1000 times.\n\nCompare the outcomes of the qubits affected by z-gates, and the outcomes of the qubits not affected by z-gates.\n\nDoes z-gate change the outcome?\n\nWhy?", "_____no_output_____" ], [ "<h3> Solution </h3>", "_____no_output_____" ] ], [ [ "# import all necessary objects and methods for quantum circuits\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\n# import randrange for random choices\nfrom random import randrange\n\nnumber_of_qubit = 5\n\n# define a quantum register with 5 qubits\nq = QuantumRegister(number_of_qubit)\n\n# define a classical register with 5 bits\nc = ClassicalRegister(number_of_qubit)\n\n# define our quantum circuit\nqc = QuantumCircuit(q,c)\n\n# apply h-gate to all qubits\nfor i in range(number_of_qubit):\n qc.h(q[i])\n\n# apply z-gate to randomly picked qubits\nfor i in range(number_of_qubit):\n if randrange(2) == 0: # the qubit with index i is picked to apply z-gate\n qc.z(q[i])\n \n# apply h-gate to all qubits\nfor i in range(number_of_qubit):\n qc.h(q[i])\n \nqc.barrier()\n \n# measure all qubits\nqc.measure(q,c)\n\n# draw the circuit\ndisplay(qc.draw(output='mpl'))", "_____no_output_____" ], [ "# execute the circuit 1000 times in the local simulator\njob = execute(qc,Aer.get_backend('qasm_simulator'),shots=1000)\ncounts = job.result().get_counts(qc)\nprint(counts)", "{'11010': 1000}\n" ] ], [ [ "<a id=\"task2\"></a>\n<h3> Task 2 </h3>\n\nRandomly create a quantum state and multiply it with Hadamard matrix to find its reflection.\n\nDraw both states.\n\nRepeat the task for a few times.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "A function for randomly creating a 2-dimensional quantum state:", "_____no_output_____" ] ], [ [ "# randomly create a 2-dimensional quantum state\nfrom math import cos, sin, pi\nfrom random import randrange\ndef random_qstate_by_angle():\n angle_degree = randrange(360)\n angle_radian = 2*pi*angle_degree/360\n return [cos(angle_radian),sin(angle_radian)]", "_____no_output_____" ], [ "%run quantum.py\n\ndraw_qubit()\n\n# line of reflection for Hadamard\nfrom matplotlib.pyplot import arrow\narrow(-1.109,-0.459,2.218,0.918,linestyle='dotted',color='red')\n\n[x1,y1] = random_qstate_by_angle()\n\nprint(x1,y1)\n\nsqrttwo=2**0.5\noversqrttwo = 1/sqrttwo\n\n[x2,y2] = [ oversqrttwo*x1 + oversqrttwo*y1 , oversqrttwo*x1 - oversqrttwo*y1 ]\n\nprint(x2,y2)\n\ndraw_quantum_state(x1,y1,\"main\")\n\ndraw_quantum_state(x2,y2,\"ref\")\n\nshow_plt()", "0.8100416404457961 0.5863723567357892\n0.9874138067509114 0.15815806725448367\n" ] ], [ [ "<a id=\"task3\"></a>\n<h3> Task 3 </h3>\n\nFind the matrix representing the reflection over the line $y=x$.\n\n<i>Hint: Think about the reflections of the points $ \\myrvector{0 \\\\ 1} $, $ \\myrvector{-1 \\\\ 0} $, and $ \\myrvector{-\\sqrttwo \\\\ \\sqrttwo} $ over the line $y=x$.</i>\n\nRandomly create a quantum state and multiply it with this matrix to find its reflection over the line $y = x$.\n\nDraw both states.\n\nRepeat the task for a few times.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "The reflection over the line $y=x$ swaps the first and second amplitudes.\n\nThis is the operetor NOT: $ X = \\mymatrix{rr}{0 & 1 \\\\ 1 & 0} $.", "_____no_output_____" ], [ "A function for randomly creating a 2-dimensional quantum state:", "_____no_output_____" ] ], [ [ "# randomly create a 2-dimensional quantum state\nfrom math import cos, sin, pi\nfrom random import randrange\ndef random_qstate_by_angle():\n angle_degree = randrange(360)\n angle_radian = 2*pi*angle_degree/360\n return [cos(angle_radian),sin(angle_radian)]", "_____no_output_____" ] ], [ [ "Reflecting the randomly picked quantum state over the line $y=x$. ", "_____no_output_____" ] ], [ [ "%run quantum.py\n\ndraw_qubit()\n\n# the line y=x\nfrom matplotlib.pyplot import arrow\narrow(-1,-1,2,2,linestyle='dotted',color='red')\n\n[x1,y1] = random_qstate_by_angle()\n\n[x2,y2] = [y1,x1]\n\ndraw_quantum_state(x1,y1,\"main\")\ndraw_quantum_state(x2,y2,\"ref\")\n\nshow_plt()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6a96c4bd2ad48b590f3bc13fa04ad514cc770f
31,759
ipynb
Jupyter Notebook
SentimentDetectionRNN.ipynb
lucianaribeiro/filmood
e0600dcbe659744d416a80d24bedda65522be591
[ "Apache-2.0" ]
1
2020-10-05T03:18:53.000Z
2020-10-05T03:18:53.000Z
SentimentDetectionRNN.ipynb
lucianaribeiro/filmood-rnn
e0600dcbe659744d416a80d24bedda65522be591
[ "Apache-2.0" ]
9
2019-11-30T17:17:04.000Z
2019-12-07T08:36:16.000Z
SentimentDetectionRNN.ipynb
lucianaribeiro/filmood
e0600dcbe659744d416a80d24bedda65522be591
[ "Apache-2.0" ]
null
null
null
58.166667
2,773
0.525268
[ [ [ "<a href=\"https://colab.research.google.com/github/lucianaribeiro/filmood/blob/master/SentimentDetectionRNN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "# Installing Tensorflow\n! pip install --upgrade tensorflow\n\n# Installing Keras\n! pip install --upgrade keras\n\n# Install other packages\n! pip install --upgrade pip nltk numpy", "Collecting tensorflow\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/46/0f/7bd55361168bb32796b360ad15a25de6966c9c1beb58a8e30c01c8279862/tensorflow-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl (86.3MB)\n\u001b[K |████████████████████████████████| 86.3MB 133kB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (3.10.0)\nRequirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.15.0)\nRequirement already satisfied, skipping upgrade: gast==0.2.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (0.2.2)\nRequirement already satisfied, skipping upgrade: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.12.0)\nRequirement already satisfied, skipping upgrade: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (0.1.8)\nRequirement already satisfied, skipping upgrade: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (0.8.1)\nRequirement already satisfied, skipping upgrade: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (0.8.0)\nRequirement already satisfied, skipping upgrade: keras-applications>=1.0.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.0.8)\nRequirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.1.0)\nCollecting tensorboard<2.1.0,>=2.0.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/76/54/99b9d5d52d5cb732f099baaaf7740403e83fe6b0cedde940fabd2b13d75a/tensorboard-2.0.2-py3-none-any.whl (3.8MB)\n\u001b[K |████████████████████████████████| 3.8MB 27.4MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.1.0)\nRequirement already satisfied, skipping upgrade: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.11.2)\nRequirement already satisfied, skipping upgrade: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (0.33.6)\nRequirement already satisfied, skipping upgrade: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (3.1.0)\nRequirement already satisfied, skipping upgrade: numpy<2.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow) (1.17.4)\nCollecting tensorflow-estimator<2.1.0,>=2.0.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/fc/08/8b927337b7019c374719145d1dceba21a8bb909b93b1ad6f8fb7d22c1ca1/tensorflow_estimator-2.0.1-py2.py3-none-any.whl (449kB)\n\u001b[K |████████████████████████████████| 450kB 52.3MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow) (42.0.1)\nRequirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow) (2.8.0)\nRequirement already satisfied, skipping upgrade: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow) (0.4.1)\nRequirement already satisfied, skipping upgrade: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow) (2.21.0)\nCollecting google-auth<2,>=1.6.3\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ec/11/1d90cbfa72a084b08498e8cea1fee199bc965cdac391d241f5ae6257073e/google_auth-1.7.2-py2.py3-none-any.whl (74kB)\n\u001b[K |████████████████████████████████| 81kB 9.2MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow) (0.16.0)\nRequirement already satisfied, skipping upgrade: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.1.0,>=2.0.0->tensorflow) (3.1.1)\nRequirement already satisfied, skipping upgrade: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow) (1.3.0)\nRequirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow) (2.8)\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow) (3.0.4)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow) (2019.9.11)\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard<2.1.0,>=2.0.0->tensorflow) (1.24.3)\nRequirement already satisfied, skipping upgrade: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow) (4.0)\nRequirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow) (0.2.7)\nRequirement already satisfied, skipping upgrade: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow) (3.1.1)\nRequirement already satisfied, skipping upgrade: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.1.0,>=2.0.0->tensorflow) (3.1.0)\nRequirement already satisfied, skipping upgrade: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<4.1,>=3.1.4->google-auth<2,>=1.6.3->tensorboard<2.1.0,>=2.0.0->tensorflow) (0.4.8)\n\u001b[31mERROR: tensorboard 2.0.2 has requirement grpcio>=1.24.3, but you'll have grpcio 1.15.0 which is incompatible.\u001b[0m\n\u001b[31mERROR: google-colab 1.0.0 has requirement google-auth~=1.4.0, but you'll have google-auth 1.7.2 which is incompatible.\u001b[0m\nInstalling collected packages: google-auth, tensorboard, tensorflow-estimator, tensorflow\n Found existing installation: google-auth 1.4.2\n Uninstalling google-auth-1.4.2:\n Successfully uninstalled google-auth-1.4.2\n Found existing installation: tensorboard 1.15.0\n Uninstalling tensorboard-1.15.0:\n Successfully uninstalled tensorboard-1.15.0\n Found existing installation: tensorflow-estimator 1.15.1\n Uninstalling tensorflow-estimator-1.15.1:\n Successfully uninstalled tensorflow-estimator-1.15.1\n Found existing installation: tensorflow 1.15.0\n Uninstalling tensorflow-1.15.0:\n Successfully uninstalled tensorflow-1.15.0\nSuccessfully installed google-auth-1.7.2 tensorboard-2.0.2 tensorflow-2.0.0 tensorflow-estimator-2.0.1\n" ], [ "# Importing the libraries\nfrom keras.datasets import imdb\nfrom keras.preprocessing import sequence\nfrom keras import Sequential\nfrom keras.layers import Embedding, LSTM, Dense, Dropout\nfrom numpy import array", "Using TensorFlow backend.\n" ], [ "# Disable tensor flow warnings for better view\nfrom tensorflow.python.util import deprecation\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n \n# Loading dataset from IMDB\nvocabulary_size = 10000\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size)", "Downloading data from https://s3.amazonaws.com/text-datasets/imdb.npz\n17465344/17464789 [==============================] - 2s 0us/step\n" ], [ "# Inspect a sample review and its label\nprint('Review')\nprint(X_train[6])\nprint('Label')\nprint(y_train[6])\n\n# Review back to the original words\nword2id = imdb.get_word_index()\nid2word = {i: word for word, i in word2id.items()}\nprint('Review with words')\nprint([id2word.get(i, ' ') for i in X_train[6]])\nprint('Label')\nprint(y_train[6])", "Review\n[1, 6740, 365, 1234, 5, 1156, 354, 11, 14, 5327, 6638, 7, 1016, 2, 5940, 356, 44, 4, 1349, 500, 746, 5, 200, 4, 4132, 11, 2, 9363, 1117, 1831, 7485, 5, 4831, 26, 6, 2, 4183, 17, 369, 37, 215, 1345, 143, 2, 5, 1838, 8, 1974, 15, 36, 119, 257, 85, 52, 486, 9, 6, 2, 8564, 63, 271, 6, 196, 96, 949, 4121, 4, 2, 7, 4, 2212, 2436, 819, 63, 47, 77, 7175, 180, 6, 227, 11, 94, 2494, 2, 13, 423, 4, 168, 7, 4, 22, 5, 89, 665, 71, 270, 56, 5, 13, 197, 12, 161, 5390, 99, 76, 23, 2, 7, 419, 665, 40, 91, 85, 108, 7, 4, 2084, 5, 4773, 81, 55, 52, 1901]\nLabel\n1\nDownloading data from https://s3.amazonaws.com/text-datasets/imdb_word_index.json\n1646592/1641221 [==============================] - 1s 1us/step\nReview with words\n['the', 'boiled', 'full', 'involving', 'to', 'impressive', 'boring', 'this', 'as', 'murdering', 'naschy', 'br', 'villain', 'and', 'suggestion', 'need', 'has', 'of', 'costumes', 'b', 'message', 'to', 'may', 'of', 'props', 'this', 'and', 'concentrates', 'concept', 'issue', 'skeptical', 'to', \"god's\", 'he', 'is', 'and', 'unfolds', 'movie', 'women', 'like', \"isn't\", 'surely', \"i'm\", 'and', 'to', 'toward', 'in', \"here's\", 'for', 'from', 'did', 'having', 'because', 'very', 'quality', 'it', 'is', 'and', 'starship', 'really', 'book', 'is', 'both', 'too', 'worked', 'carl', 'of', 'and', 'br', 'of', 'reviewer', 'closer', 'figure', 'really', 'there', 'will', 'originals', 'things', 'is', 'far', 'this', 'make', 'mistakes', 'and', 'was', \"couldn't\", 'of', 'few', 'br', 'of', 'you', 'to', \"don't\", 'female', 'than', 'place', 'she', 'to', 'was', 'between', 'that', 'nothing', 'dose', 'movies', 'get', 'are', 'and', 'br', 'yes', 'female', 'just', 'its', 'because', 'many', 'br', 'of', 'overly', 'to', 'descent', 'people', 'time', 'very', 'bland']\nLabel\n1\n" ], [ "# Ensure that all sequences in a list have the same length\nX_train = sequence.pad_sequences(X_train, maxlen=500)\nX_test = sequence.pad_sequences(X_test, maxlen=500)", "_____no_output_____" ], [ "# Initialising the RNN\nregressor=Sequential()\n\n# Adding a first Embedding layer and some Dropout regularization\nregressor.add(Embedding(vocabulary_size, 32, input_length=500))\nregressor.add(Dropout(0.2))\n\n# Adding a second LSTM layer and some Dropout regularization\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a third LSTM layer and some Dropout regularization\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a fourth LSTM layer and some Dropout regularization\nregressor.add(LSTM(units = 50))\nregressor.add(Dropout(0.2))\n\n# Adding the output layer\nregressor.add(Dense(1, activation='sigmoid'))\n\n# Compiling the RNN\nregressor.compile(loss='binary_crossentropy', \n optimizer='adam', \n metrics=['accuracy'])\n\n\nX_valid, y_valid = X_train[:64], y_train[:64]\nX_train2, y_train2 = X_train[64:], y_train[64:]\nregressor.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=64, epochs=25)\n", "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/indexed_slices.py:424: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n" ], [ "! pip install --upgrade nltk", "Requirement already up-to-date: nltk in /usr/local/lib/python3.6/dist-packages (3.4.5)\nRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from nltk) (1.12.0)\n" ], [ "import nltk \nnltk.download('punkt')\nfrom nltk import word_tokenize", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n" ], [ "# A value close to 0 means the sentiment was negative and a value close to 1 means its a positive review\nword2id = imdb.get_word_index()\ntest=[]\nfor word in word_tokenize(\"this is simply one of the best films ever made\"):\n test.append(word2id[word])\n\ntest=sequence.pad_sequences([test],maxlen=500)\nregressor.predict(test)", "_____no_output_____" ], [ "# A value close to 0 means the sentiment was negative and a value close to 1 means its a positive review\nword2id = imdb.get_word_index()\ntest=[]\nfor word in word_tokenize( \"the script is a real insult to the intelligence of those watching\"):\n test.append(word2id[word])\n\ntest=sequence.pad_sequences([test],maxlen=500)\nregressor.predict(test)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6aaafc7b7397d1e27634b8e97d5a7598c1dbc6
12,234
ipynb
Jupyter Notebook
03-wordemb/wordemb_cbow_pytorch.ipynb
sungjunlee/nn4nlp-code
14f22e663179076167fb889c99b9a03607f65618
[ "Apache-2.0" ]
1
2018-06-14T11:12:29.000Z
2018-06-14T11:12:29.000Z
03-wordemb/wordemb_cbow_pytorch.ipynb
sungjunlee/nn4nlp-code
14f22e663179076167fb889c99b9a03607f65618
[ "Apache-2.0" ]
null
null
null
03-wordemb/wordemb_cbow_pytorch.ipynb
sungjunlee/nn4nlp-code
14f22e663179076167fb889c99b9a03607f65618
[ "Apache-2.0" ]
null
null
null
34.954286
357
0.498529
[ [ [ "!pip install torch torchtext\n!git clone https://github.com/neubig/nn4nlp-code.git", "Collecting torch\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/43/380514bd9663f1bf708abeb359b8b48d3fabb1c8e95bb3427a980a064c57/torch-0.4.0-cp36-cp36m-manylinux1_x86_64.whl (484.0MB)\n\u001b[K 100% |████████████████████████████████| 484.0MB 24kB/s \ntcmalloc: large alloc 1073750016 bytes == 0x5bf5c000 @ 0x7fc0e503f1c4 0x46d6a4 0x5fcbcc 0x4c494d 0x54f3c4 0x553aaf 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54efc1 0x54f24d 0x551ee0 0x54efc1 0x54f24d 0x551ee0 0x54e4c8 0x54f4f6 0x553aaf 0x54e4c8\n\u001b[?25hCollecting torchtext\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/78/90/474d5944d43001a6e72b9aaed5c3e4f77516fbef2317002da2096fd8b5ea/torchtext-0.2.3.tar.gz (42kB)\n\u001b[K 100% |████████████████████████████████| 51kB 12.8MB/s \n\u001b[?25hCollecting tqdm (from torchtext)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d8/ca/6524dfba7a0e850d3fda223693779035ddc8bf5c242acd9ee4eb9e52711a/tqdm-4.23.3-py2.py3-none-any.whl (42kB)\n\u001b[K 100% |████████████████████████████████| 51kB 13.1MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext) (2.18.4)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (2018.4.16)\nRequirement already satisfied: idna<2.7,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (2.6)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext) (1.22)\nBuilding wheels for collected packages: torchtext\n Running setup.py bdist_wheel for torchtext ... \u001b[?25l-\b \b\\\b \bdone\n\u001b[?25h Stored in directory: /content/.cache/pip/wheels/42/a6/f4/b267328bde6bb680094a0c173e8e5627ccc99543abded97204\nSuccessfully built torchtext\nInstalling collected packages: torch, tqdm, torchtext\nSuccessfully installed torch-0.4.0 torchtext-0.2.3 tqdm-4.23.3\nfatal: destination path 'nn4nlp-code' already exists and is not an empty directory.\n" ], [ "from collections import defaultdict\nimport math\nimport time\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F", "_____no_output_____" ], [ "N=2 #length of window on each side (so N=2 gives a total window size of 5, as in t-2 t-1 t t+1 t+2)\nEMB_SIZE = 128 # The size of the embedding\n\nembeddings_location = \"embeddings.txt\" #the file to write the word embeddings to\nlabels_location = \"labels.txt\" #the file to write the labels to\n\n# We reuse the data reading from the language modeling class\nw2i = defaultdict(lambda: len(w2i))\nS = w2i[\"<s>\"]\nUNK = w2i[\"<unk>\"]\ndef read_dataset(filename):\n with open(filename, \"r\") as f:\n for line in f:\n yield [w2i[x] for x in line.strip().split(\" \")]", "_____no_output_____" ], [ "# Read in the data\ntrain = list(read_dataset(\"nn4nlp-code/data/ptb/train.txt\"))\nw2i = defaultdict(lambda: UNK, w2i)\ndev = list(read_dataset(\"nn4nlp-code/data/ptb/valid.txt\"))\ni2w = {v: k for k, v in w2i.items()}\nnwords = len(w2i)\n\nwith open(labels_location, 'w') as labels_file:\n for i in range(nwords):\n labels_file.write(i2w[i] + '\\n')\n", "_____no_output_____" ], [ "class CBOW(nn.Module):\n def __init__(self, vocab_size, embed_dim):\n super(CBOW, self).__init__()\n \n self.embeddings_bag = nn.EmbeddingBag(vocab_size, embed_dim, mode='sum')\n self.fcl = nn.Linear(embed_dim, vocab_size, bias=False)\n \n def forward(self, x):\n x = self.embeddings_bag(x.view(1, -1))\n return self.fcl(x)", "_____no_output_____" ], [ "model = CBOW(nwords, EMB_SIZE)\nloss_fn = nn.CrossEntropyLoss()\nopt = torch.optim.SGD(model.parameters(), lr=0.1)", "_____no_output_____" ], [ "# Calculate the loss value for the entire sentence\ndef calc_sent_loss(sent):\n #add padding to the sentence equal to the size of the window\n #as we need to predict the eos as well, the future window at that point is N past it \n padded_sent = [S] * N + sent + [S] * N\n\n # Step through the sentence\n all_losses = [] \n for i in range(N,len(sent)+N):\n model.zero_grad()\n \n logits = model(torch.LongTensor(padded_sent[i-N:i] + padded_sent[i+1:i+N+1]))\n loss = F.cross_entropy(logits, torch.tensor(padded_sent[i]).view(1))\n loss.backward()\n opt.step()\n all_losses.append(loss.cpu().detach().numpy())\n return sum(all_losses)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "MAX_LEN = 100\n\nfor ITER in range(100):\n print(\"started iter %r\" % ITER)\n # Perform training\n random.shuffle(train)\n train_words, train_loss = 0, 0.0\n start = time.time()\n for sent_id, sent in enumerate(train):\n my_loss = calc_sent_loss(sent)\n train_loss += my_loss\n train_words += len(sent)\n# my_loss.backward()\n# trainer.update()\n if (sent_id+1) % 5000 == 0:\n print(\"--finished %r sentences\" % (sent_id+1))\n print(\"iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs\" % (ITER, train_loss/train_words, math.exp(train_loss/train_words), time.time()-start))\n # Evaluate on dev set\n dev_words, dev_loss = 0, 0.0\n start = time.time()\n for sent_id, sent in enumerate(dev):\n my_loss = calc_sent_loss(sent)\n dev_loss += my_loss\n dev_words += len(sent)\n# trainer.update()\n print(\"iter %r: dev loss/word=%.4f, ppl=%.4f, time=%.2fs\" % (ITER, dev_loss/dev_words, math.exp(dev_loss/dev_words), time.time()-start))\n\n print(\"saving embedding files\")\n with open(embeddings_location, 'w') as embeddings_file:\n W_w_np = W_w_p.as_array()\n for i in range(nwords):\n ith_embedding = '\\t'.join(map(str, W_w_np[i]))\n embeddings_file.write(ith_embedding + '\\n')", "started iter 0\n--finished 5000 sentences\n--finished 10000 sentences\n--finished 15000 sentences\n--finished 20000 sentences\n--finished 25000 sentences\n--finished 30000 sentences\n--finished 35000 sentences\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6abf24e69a284ba2375c797f502df8856f334a
836,823
ipynb
Jupyter Notebook
Modele Deep Leaning 4 Accuracy75p Recall 34p.ipynb
piedacoulisse2/fcdspneumonie
fb053d2f3b19013c5804dd33be0cf198a3d135e1
[ "BSD-2-Clause" ]
null
null
null
Modele Deep Leaning 4 Accuracy75p Recall 34p.ipynb
piedacoulisse2/fcdspneumonie
fb053d2f3b19013c5804dd33be0cf198a3d135e1
[ "BSD-2-Clause" ]
null
null
null
Modele Deep Leaning 4 Accuracy75p Recall 34p.ipynb
piedacoulisse2/fcdspneumonie
fb053d2f3b19013c5804dd33be0cf198a3d135e1
[ "BSD-2-Clause" ]
null
null
null
1,044.722846
415,348
0.951261
[ [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, confusion_matrix\n\nimport glob\nimport cv2\nimport random\n\nimport tensorflow as tf\n\n#print versions\nprint('tensorflow version',tf.__version__)", "tensorflow version 2.4.1\n" ], [ "labels = ['PNEUMONIA', 'NORMAL']\nimg_size = 180\ndef get_training_data(data_dir):\n data = [] \n for label in labels: \n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n for img in os.listdir(path):\n try:\n img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n except Exception as e:\n print(e)\n return np.array(data)", "_____no_output_____" ], [ "train = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/train')\ntest = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/test')\nval = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/val')", "<ipython-input-2-a0442f890243>:15: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray\n return np.array(data)\n" ], [ "train_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/train'\ntest_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/test'\nval_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/val'", "_____no_output_____" ], [ "train_files_original = glob.glob(train_path+'/*/*')\nval_files_original = glob.glob(val_path+'/*/*')\ntest_files = glob.glob(test_path+'/*/*')\n\nprint('number of train samples across classes:', len(train_files_original))\nprint('number of val samples across classes:', len(val_files_original))\nprint('number of test samples across classes:', len(test_files))\n\nfiles = np.unique(train_files_original + val_files_original)\ntrain_files, val_files = train_test_split(files, test_size=0.3, shuffle=True)\n\nprint('number of train samples:', len(train_files))\nprint('number of val samples:', len(val_files))\ncount_normal = len([x for x in train_files if 'NORMAL' in x])\ncount_pneumonia = len([x for x in train_files if 'PNEUMONIA' in x])\n\nprint('Count of NORMAL images in train:', count_normal)\nprint('Count of PNEUMONIA images in train:', count_pneumonia)", "number of train samples across classes: 5216\nnumber of val samples across classes: 16\nnumber of test samples across classes: 624\nnumber of train samples: 3662\nnumber of val samples: 1570\nCount of NORMAL images in train: 966\nCount of PNEUMONIA images in train: 2696\n" ], [ "IMG_SIZE = 180\nx_train = []\ny_train = []\n\nx_val = []\ny_val = []\n\nx_test = []\ny_test = []\n\nfor feature, label in train:\n x_train.append(feature)\n y_train.append(label)\n\nfor feature, label in val:\n x_val.append(feature)\n y_val.append(label)\n\nfor feature, label in test:\n x_test.append(feature)\n y_test.append(label)", "_____no_output_____" ], [ "x_train = np.array(x_train) / 255\nx_val = np.array(x_val) / 255\nx_test = np.array(x_test) / 255", "_____no_output_____" ], [ "x_train = x_train.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\ny_train = np.array(y_train)\n\nx_val = x_val.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\ny_val = np.array(y_val)\n\nx_test = x_test.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\ny_test = np.array(y_test)", "_____no_output_____" ], [ "plt.imshow(x_train[0].reshape(180,180), cmap='gray')\nprint('label = ', y_train[0])", "label = 0\n" ], [ "print(len(x_train))\nprint(len(x_val))\nprint(len(x_test))", "5216\n16\n624\n" ], [ "fig, ax = plt.subplots(3, 3, figsize=(10, 7))\nax = ax.ravel()\nplt.tight_layout()\n\nfor i in range(3):\n random_index = random.randint(0, min(len(x_train), len(x_val), len(x_test)))\n ax[i].imshow(x_train[random_index].reshape(180,180), cmap='gray')\n ax[i].set_title('Set: train, label (Pneumonia =) {}'.format(y_train[random_index]))\n\n ax[i+3].imshow(x_val[random_index].reshape(180,180), cmap='gray')\n ax[i+3].set_title('Set: val, label (Pneumonia =) {}'.format(y_val[random_index]))\n\n ax[i+6].imshow(x_test[random_index].reshape(180,180), cmap='gray')\n ax[i+6].set_title('Set: test, label (Pneumonia =) {}'.format(y_test[random_index]))", "_____no_output_____" ], [ "def conv_block(filters):\n block = tf.keras.Sequential([\n tf.keras.layers.SeparableConv2D(filters, (3,3), activation='relu', padding='same'),\n tf.keras.layers.SeparableConv2D(filters, (3,3), activation='relu', padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.MaxPool2D(), \n ])\n return block", "_____no_output_____" ], [ "def dense_block(units, dropout_rate):\n block = tf.keras.Sequential([\n tf.keras.layers.Dense(units, activation='relu'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dropout(dropout_rate),\n ])\n return block", "_____no_output_____" ], [ "def build_model():\n model = tf.keras.Sequential([\n tf.keras.Input(shape=(IMG_SIZE, IMG_SIZE, 1)),\n \n tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'),\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'),\n tf.keras.layers.MaxPool2D(),\n\n conv_block(32),\n conv_block(64),\n \n conv_block(128),\n tf.keras.layers.Dropout(0.2),\n\n conv_block(256),\n tf.keras.layers.Dropout(0.2),\n\n tf.keras.layers.Flatten(),\n dense_block(256, 0.7),\n dense_block(128, 0.5),\n dense_block(64, 0.3),\n \n tf.keras.layers.Dense(1, activation='sigmoid')\n ])\n return model", "_____no_output_____" ], [ "weight_for_normal = len(x_train) / (2 * count_normal)\nweight_for_pneumonia = len(x_train) / (2 * count_pneumonia)\n\nclass_weight = {0:weight_for_normal, 1:weight_for_pneumonia}\n\nprint('weight for class 0 (normal): {:.3f}'.format(weight_for_normal))\nprint('weight for class 1 (pneumonia): {:.3f}'.format(weight_for_pneumonia))", "weight for class 0 (normal): 2.700\nweight for class 1 (pneumonia): 0.967\n" ], [ "\nmodel_vanilla = build_model()\n\nmetrics = [\n 'accuracy', \n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(name='recall'),\n]\n\nmodel_vanilla.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)", "_____no_output_____" ], [ "epochs = 10\nbatch_size = 100", "_____no_output_____" ], [ "history_vanilla = model_vanilla.fit(\n x=x_train, \n y=y_train, \n epochs=epochs, \n batch_size=batch_size, \n validation_data=(x_val,y_val), \n class_weight=class_weight\n)", "Epoch 1/10\n53/53 [==============================] - 337s 6s/step - loss: 1.5263 - accuracy: 0.6923 - precision: 0.4472 - recall: 0.7309 - val_loss: 0.8697 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 2/10\n53/53 [==============================] - 322s 6s/step - loss: 0.5288 - accuracy: 0.9243 - precision: 0.8366 - recall: 0.8822 - val_loss: 1.5448 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 3/10\n53/53 [==============================] - 310s 6s/step - loss: 0.3884 - accuracy: 0.9401 - precision: 0.9051 - recall: 0.8580 - val_loss: 1.9080 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 4/10\n53/53 [==============================] - 301s 6s/step - loss: 0.2429 - accuracy: 0.9590 - precision: 0.9510 - recall: 0.8838 - val_loss: 2.1000 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 5/10\n53/53 [==============================] - 303s 6s/step - loss: 0.2129 - accuracy: 0.9610 - precision: 0.9501 - recall: 0.8896 - val_loss: 2.2317 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 6/10\n53/53 [==============================] - 293s 6s/step - loss: 0.2243 - accuracy: 0.9557 - precision: 0.9432 - recall: 0.8779 - val_loss: 2.2834 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 7/10\n53/53 [==============================] - 286s 5s/step - loss: 0.1497 - accuracy: 0.9694 - precision: 0.9698 - recall: 0.9090 - val_loss: 2.4166 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 8/10\n53/53 [==============================] - 293s 6s/step - loss: 0.1677 - accuracy: 0.9595 - precision: 0.9719 - recall: 0.8716 - val_loss: 2.5326 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 9/10\n53/53 [==============================] - 295s 6s/step - loss: 0.1482 - accuracy: 0.9635 - precision: 0.9715 - recall: 0.8894 - val_loss: 2.6691 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 10/10\n53/53 [==============================] - 284s 5s/step - loss: 0.1092 - accuracy: 0.9701 - precision: 0.9841 - recall: 0.8983 - val_loss: 2.9848 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\n" ], [ "epochs_array = [i for i in range(epochs)]\nfig, ax = plt.subplots(1,3)\ntrain_precision = history_vanilla.history['precision']\ntrain_recall = history_vanilla.history['recall']\ntrain_loss = history_vanilla.history['loss']\n\nval_precision = history_vanilla.history['val_precision']\nval_recall = history_vanilla.history['val_recall']\nval_loss = history_vanilla.history['val_loss']\nfig.set_size_inches(20,5)\n\nax[0].plot(epochs_array, train_loss, 'g-o', label='Training Loss')\nax[0].plot(epochs_array, val_loss, 'r-o', label='Validation Loss')\nax[0].set_title('Training & Validation Loss')\nax[0].legend()\nax[0].set_xlabel('Epochs')\nax[0].set_ylabel('Loss')\nax[0].grid(True)\n\nax[1].plot(epochs_array, train_precision, 'go-', label='Training Precision')\nax[1].plot(epochs_array, val_precision, 'ro-', label='Validation Precision')\nax[1].set_title('Training & Validation Precision')\nax[1].legend()\nax[1].set_xlabel('Epochs')\nax[1].set_ylabel('Precision')\nax[1].grid(True)\n\nax[2].plot(epochs_array, train_recall, 'go-', label='Training Recall')\nax[2].plot(epochs_array, val_recall, 'ro-', label='Validation Recall')\nax[2].set_title('Training & Validation Recall')\nax[2].legend()\nax[2].set_xlabel('Epochs')\nax[2].set_ylabel('Recall')\nax[2].grid(True)\n\nplt.show()", "_____no_output_____" ], [ "predictions = model_vanilla.predict(x=x_test)\ny_pred = np.round(predictions).reshape(1,-1)[0]", "_____no_output_____" ], [ "def print_results(y_test, y_pred):\n print('Accuracy : {:.5f}'.format(accuracy_score(y_pred , y_test))) \n print('AUC : {:.5f}'.format(roc_auc_score(y_test , y_pred)))\n print('Precision : {:.5f}'.format(precision_score(y_test , y_pred)))\n print('Recall : {:.5f}'.format(recall_score(y_test , y_pred)))\n print('F1 : {:.5f}'.format(f1_score(y_test , y_pred)))\n print('Confusion Matrix : \\n', confusion_matrix(y_test, y_pred))", "_____no_output_____" ], [ "print_results(y_test, y_pred)", "Accuracy : 0.62500\nAUC : 0.50000\nPrecision : 0.00000\nRecall : 0.00000\nF1 : 0.00000\nConfusion Matrix : \n [[390 0]\n [234 0]]\n" ], [ "# compile fine tuned model\n\nmodel_ft = build_model()\n\nmetrics = [\n 'accuracy', \n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(name='recall'),\n]\n\nmodel_ft.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)\n", "_____no_output_____" ], [ "checkpoint_cb = tf.keras.callbacks.ModelCheckpoint('xray_model.h5', save_best_only=True)\nearly_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15, mode='min', verbose=1, restore_best_weights=True)", "_____no_output_____" ], [ "def exponential_decay(lr0, s):\n def exponential_decay_fn(epoch):\n return lr0 * 0.1 **(epoch / s)\n return exponential_decay_fn\n\nexponential_decay_fn = exponential_decay(0.01, 20)\n\nlr_scheduler = tf.keras.callbacks.LearningRateScheduler(exponential_decay_fn)", "_____no_output_____" ], [ "epochs = 10\nbatch_size = 64", "_____no_output_____" ], [ "history_ft = model_ft.fit(\n x=x_train, \n y=y_train, \n epochs=epochs, \n batch_size=batch_size, \n validation_data=(x_val,y_val), \n class_weight=class_weight,\n callbacks = [checkpoint_cb, early_stopping_cb, lr_scheduler]\n)", "Epoch 1/10\n82/82 [==============================] - 219s 3s/step - loss: 1.1462 - accuracy: 0.6881 - precision: 0.4074 - recall: 0.3038 - val_loss: 1.8938 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 2/10\n82/82 [==============================] - 218s 3s/step - loss: 0.4367 - accuracy: 0.8538 - precision: 0.8588 - recall: 0.5041 - val_loss: 2.4904 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 3/10\n82/82 [==============================] - 228s 3s/step - loss: 0.3194 - accuracy: 0.9037 - precision: 0.9029 - recall: 0.7020 - val_loss: 2.3297 - val_accuracy: 0.5625 - val_precision: 1.0000 - val_recall: 0.1250\nEpoch 4/10\n82/82 [==============================] - 229s 3s/step - loss: 0.2358 - accuracy: 0.9242 - precision: 0.9417 - recall: 0.7591 - val_loss: 2.7604 - val_accuracy: 0.5625 - val_precision: 1.0000 - val_recall: 0.1250\nEpoch 5/10\n82/82 [==============================] - 219s 3s/step - loss: 0.2258 - accuracy: 0.9407 - precision: 0.9510 - recall: 0.8081 - val_loss: 1.5811 - val_accuracy: 0.5625 - val_precision: 1.0000 - val_recall: 0.1250\nEpoch 6/10\n82/82 [==============================] - 225s 3s/step - loss: 0.1602 - accuracy: 0.9535 - precision: 0.9674 - recall: 0.8491 - val_loss: 2.7645 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 7/10\n82/82 [==============================] - 218s 3s/step - loss: 0.1304 - accuracy: 0.9662 - precision: 0.9661 - recall: 0.8998 - val_loss: 1.5057 - val_accuracy: 0.5000 - val_precision: 0.0000e+00 - val_recall: 0.0000e+00\nEpoch 8/10\n82/82 [==============================] - 207s 3s/step - loss: 0.1334 - accuracy: 0.9644 - precision: 0.9704 - recall: 0.8866 - val_loss: 1.1271 - val_accuracy: 0.5625 - val_precision: 1.0000 - val_recall: 0.1250\nEpoch 9/10\n82/82 [==============================] - 209s 3s/step - loss: 0.1103 - accuracy: 0.9748 - precision: 0.9749 - recall: 0.9287 - val_loss: 0.8373 - val_accuracy: 0.6875 - val_precision: 1.0000 - val_recall: 0.3750\nEpoch 10/10\n82/82 [==============================] - 225s 3s/step - loss: 0.1100 - accuracy: 0.9728 - precision: 0.9816 - recall: 0.9127 - val_loss: 1.3891 - val_accuracy: 0.5625 - val_precision: 1.0000 - val_recall: 0.1250\n" ], [ "epochs_array = [i for i in range(len(history_ft.history['accuracy']))]\nfig, ax = plt.subplots(1,3)\ntrain_precision = history_ft.history['precision']\ntrain_recall = history_ft.history['recall']\ntrain_loss = history_ft.history['loss']\n\nval_precision = history_ft.history['val_precision']\nval_recall = history_ft.history['val_recall']\nval_loss = history_ft.history['val_loss']\nfig.set_size_inches(20,5)\n\nax[0].plot(epochs_array, train_loss, 'g-o', label='Training Loss')\nax[0].plot(epochs_array, val_loss, 'r-o', label='Validation Loss')\nax[0].set_title('Training & Validation Loss')\nax[0].legend()\nax[0].set_xlabel('Epochs')\nax[0].set_ylabel('Loss')\nax[0].grid(True)\n\nax[1].plot(epochs_array, train_precision, 'go-', label='Training Precision')\nax[1].plot(epochs_array, val_precision, 'ro-', label='Validation Precision')\nax[1].set_title('Training & Validation Precision')\nax[1].legend()\nax[1].set_xlabel('Epochs')\nax[1].set_ylabel('Precision')\nax[1].grid(True)\n\nax[2].plot(epochs_array, train_recall, 'go-', label='Training Recall')\nax[2].plot(epochs_array, val_recall, 'ro-', label='Validation Recall')\nax[2].set_title('Training & Validation Recall')\nax[2].legend()\nax[2].set_xlabel('Epochs')\nax[2].set_ylabel('Recall')\nax[2].grid(True)\nplt.show()", "_____no_output_____" ], [ "predictions = model_ft.predict(x=x_test)\ny_pred = np.round(predictions).reshape(1,-1)[0]", "_____no_output_____" ], [ "print_results(y_test, y_pred)", "Accuracy : 0.75160\nAUC : 0.66966\nPrecision : 0.98765\nRecall : 0.34188\nF1 : 0.50794\nConfusion Matrix : \n [[389 1]\n [154 80]]\n" ], [ "incorrect = np.nonzero(y_test != y_pred)[0]", "_____no_output_____" ], [ "fig, ax = plt.subplots(3, 2, figsize=(15,15))\nax = ax.ravel()\nplt.subplots_adjust(wspace=0.25, hspace=0.75)\nplt.tight_layout()\ni = 0\nfor c in incorrect[:6]:\n ax[i].set_xticks([])\n ax[i].set_yticks([])\n ax[i].imshow(x_test[c].reshape(IMG_SIZE,IMG_SIZE), cmap='gray', interpolation='none')\n ax[i].set_title('Predicted Class: {}, Actual Class: {}'.format(y_pred[c], y_test[c]))\n i += 1 \n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6ac89af285325d9eda36448e6cd7fc19439b59
395,087
ipynb
Jupyter Notebook
src/figure3.ipynb
ivazquez/population-dynamics
0baf8baf4c1e383577e67690861d723c43cd08aa
[ "MIT" ]
1
2017-02-01T21:01:48.000Z
2017-02-01T21:01:48.000Z
src/figure3.ipynb
ivazquez/genetic-variation
0baf8baf4c1e383577e67690861d723c43cd08aa
[ "MIT" ]
null
null
null
src/figure3.ipynb
ivazquez/genetic-variation
0baf8baf4c1e383577e67690861d723c43cd08aa
[ "MIT" ]
null
null
null
207.394751
98,890
0.847894
[ [ [ "# Supplemental Information:\n\n> **\"Clonal heterogeneity influences the fate of new adaptive mutations\"**\n\n> Ignacio Vázquez-García, Francisco Salinas, Jing Li, Andrej Fischer, Benjamin Barré, Johan Hallin, Anders Bergström, Elisa Alonso-Pérez, Jonas Warringer, Ville Mustonen, Gianni Liti\n\n## Figure 3 (+ Supp. Figs.)\n\nThis IPython notebook is provided for reproduction of Figures 2, S3, S4 and S7 of the paper. It can be viewed by copying its URL to nbviewer and it can be run by opening it in binder.", "_____no_output_____" ] ], [ [ "# Load external dependencies\nfrom setup import *\n# Load internal dependencies\nimport config,gmm,plot,utils\n\n%load_ext autoreload\n%autoreload 2\n\n%matplotlib inline", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "ids = pd.read_csv(dir_data+'seq/sample_ids_merged_dup.csv')\n\nids.loc[ids.clone.isnull(),'type'] = 'population'\nids.loc[(ids.clone.notnull()) & (ids.time==0),'type'] = 'ancestral clone'\nids.loc[(ids.clone.notnull()) & (ids.time==32),'type'] = 'evolved clone'\n\nfor seq_type, seq_id in ids.groupby('type'):\n print('{0} sequencing coverage\\nBottom quartile: {1:.2f}x, Top quartile: {2:.2f}x, Min: {3:.2f}x, Max: {4:.2f}x, Median: {5:.2f}x\\n'\\\n .format(seq_type.capitalize(),\n seq_id['coverage'].quantile(.25), \\\n seq_id['coverage'].quantile(.75), \\\n seq_id['coverage'].min(), \\\n seq_id['coverage'].max(), \\\n seq_id['coverage'].median()))", "Ancestral clone sequencing coverage\nBottom quartile: 15.45x, Top quartile: 26.13x, Min: 9.06x, Max: 36.60x, Median: 23.37x\n\nEvolved clone sequencing coverage\nBottom quartile: 28.48x, Top quartile: 31.96x, Min: 26.06x, Max: 36.01x, Median: 30.38x\n\nPopulation sequencing coverage\nBottom quartile: 84.10x, Top quartile: 103.98x, Min: 24.66x, Max: 150.10x, Median: 94.81x\n\n" ] ], [ [ "## Data import", "_____no_output_____" ], [ "Top panels - Import subclonal frequency", "_____no_output_____" ] ], [ [ "# Load data\nseq_st_df = pd.read_csv(dir_data+'seq/subclonality/seq_subclonality.csv', encoding='utf-8')\n\n# Compute cumulative haplotype frequencies for major subclones\nseq_st_df['clonal'] = seq_st_df.apply(\n lambda x: \n x[['subclone A','subclone B','subclone C','subclone D']].fillna(0).sum(), \n axis=1\n)\n# Calculate the remaining bulk fraction\nseq_st_df['bulk'] = 1.0 - seq_st_df['clonal']\n\nseq_st_df.head()", "_____no_output_____" ] ], [ [ "Middle panels - Import mutation counts", "_____no_output_____" ] ], [ [ "# Load data\nseq_dn_df = pd.read_csv(dir_data+'seq/de-novo/seq_de_novo_snv_indel.csv', encoding='utf-8', keep_default_na=False)\n\nprint(seq_dn_df.shape)\nseq_dn_df.head()", "(572, 83)\n" ] ], [ [ "The tally of SNVs and indels across whole-population genome sequences is:", "_____no_output_____" ] ], [ [ "seq_dn_df[(seq_dn_df.clone!='')].groupby(['selection','population','time','variant_type']).size()", "_____no_output_____" ], [ "seq_dn_df[(seq_dn_df.time==0) & (seq_dn_df.clone!='') & (seq_dn_df.ploidy=='haploid')].groupby(['selection','mutation_type','variant_type']).size()", "_____no_output_____" ], [ "seq_dn_df[(seq_dn_df.time==32) & (seq_dn_df.clone!='')].groupby(['selection','mutation_type','variant_type']).size()", "_____no_output_____" ] ], [ [ "Bottom panels - Import phenotype evolution", "_____no_output_____" ] ], [ [ "# Load data\npheno_df = pd.read_csv(dir_data+'pheno/populations/pheno_populations.csv.gz', encoding='utf-8', keep_default_na=False, na_values='NaN')\n\n# Filter out strains used for spatial control\npheno_df = pheno_df[(pheno_df.group == 'ancestral')|\\\n (pheno_df.group == 'evolved')]\n\ngroups_ph = pheno_df.groupby(['group','cross','cross_rep','selection','selection_rep'])\npheno_df = pheno_df[pheno_df.selection_rep != '']\n\nfor (ii,((group,cross,cross_rep,selection,selection_rep),g1)) in enumerate(groups_ph):\n \n if group=='evolved':\n \n df = groups_ph.get_group(('ancestral',cross,cross_rep,selection,'')) \n df.loc[:,'selection_rep'] = df.selection_rep.replace([''],[selection_rep])\n df.loc[:,'population'] = df['background']+'_'+df['cross']+'_'+df['cross_rep'].apply(str)+'_'+df['selection']+'_'+df['selection_rep'].apply(str)\n\n pheno_df = pheno_df.append(df)\n \npheno_df = pheno_df.reset_index(drop=True)\n\n# Set reference as mean phenotype of the ancestral hybrid\ndef normalize_phenotype(df, param_abs='norm_growth_rate', param_rel='rel_growth_rate'):\n df[param_rel] = df[param_abs] - df[df.group=='ancestral'][param_abs].mean()\n return df\n\npheno_df = pheno_df.groupby(['selection','environment','population'], as_index=False).apply(normalize_phenotype, param_abs='norm_growth_rate', param_rel='rel_growth_rate')\npheno_df = pheno_df.groupby(['selection','environment','population'], as_index=False).apply(normalize_phenotype, param_abs='norm_doubling_time', param_rel='rel_doubling_time')\n\n# # Filter out measurement replicates with >5% measurement error\n# pheno_df['pct'] = pheno_df.groupby(['selection','environment','population','group','isolate','gene','genotype_long'])['rel_growth_rate']\\\n# .apply(lambda x: (x-x.mean())/float(x.mean()))\n# pheno_df = pheno_df[abs(pheno_df['pct'])<10]\n\npheno_df.head() # show dataframe header to stdout", "_____no_output_____" ] ], [ [ "## Figure 3 - Subclonal heterogeneity", "_____no_output_____" ] ], [ [ "param = 'rel_growth_rate'\n\npanels = {\n 'HU': {\n 'WAxNA_F12_1_HU_2':0,\n 'WAxNA_F12_1_HU_3':1,\n 'WAxNA_F12_2_HU_3':2\n },\n 'RM': {\n 'WAxNA_F12_1_RM_3':0,\n 'WAxNA_F12_1_RM_4':1,\n 'WAxNA_F12_2_RM_2':2\n }\n}\n\npopulations = panels['HU'].keys()+panels['RM'].keys()\ngroups_st = seq_st_df[seq_st_df.population.isin(populations)]\ngroups_dn = seq_dn_df[(seq_dn_df.population.isin(populations))& \\\n (seq_dn_df.clone=='')& \\\n (seq_dn_df.gene!='non-coding')]\ngroups_ph = pheno_df[pheno_df.population.isin(populations)& \\\n np.isfinite(pheno_df[param])] # Take rows where param is finite\n \ngroups_st = groups_st.groupby('selection')\ngroups_dn = groups_dn.groupby('selection')\ngroups_ph = groups_ph.groupby(['selection','environment'])\n\nfor (ii, environment) in enumerate(['HU','RM']):\n \n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 4), sharey='row')\n \n fig.subplots_adjust(left=0.07,bottom=0.07,right=0.85,top=0.95,hspace=0.3,wspace=0.1)\n \n # Set scales\n for ax in axes[0]:\n ax.set_xlim(0, 32)\n ax.set_ylim(0, 1)\n for ax in axes[1]:\n if environment=='HU':\n ax.set_xlim(-0.3, 0.5)\n ax.set_ylim(0, 0.15)\n elif environment=='RM':\n ax.set_xlim(-0.5, 1.9)\n ax.set_ylim(0, 0.12)\n\n ### Top panels ###\n # De novo mutations #\n for (jj, (population, gdn)) in enumerate(groups_dn.get_group(environment).groupby('population')):\n \n # Retrieve axes\n ax1 = axes[0][panels[environment][population]]\n \n for (gene, cds_pos, sub, protein_pos, amino_acids, consequence), gdx in \\\n gdn.groupby(['gene','cds_position','substitution','protein_position','amino_acids','consequence_short']):\n \n assignment = gdx.assignment.unique()[0]\n mutation_type = gdx.mutation_type.unique()[0]\n \n gdx.time = gdx.time.apply(int)\n gdx = gdx.sort_values('time').reset_index(drop=True)\n gdx = gdx.sort_index()\n \n ax1.plot(\n gdx.index.values, gdx.frequency.values,\n color=config.lineages[assignment]['fill'], \n **utils.merge_two_dicts(config.mutation_type[mutation_type],\n config.consequence_short[consequence])\n )\n \n if mutation_type=='driver':\n index = np.argmax(gdx.frequency)\n ax1.annotate(gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6,\n textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")], zorder=3)\n ax1.annotate(amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], \n xy=(index,gdx.frequency[index]), fontsize=5,\n textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.4, foreground=\"w\")], zorder=3)\n \n # Subclonal frequency #\n for (jj, (population,gst)) in enumerate(groups_st.get_group(environment).groupby('population')):\n\n # Retrieve axes\n ax2 = axes[0][panels[environment][population]]\n\n # Set title\n ax2.set_title(population.replace('_',' '), fontsize=7, weight='bold')\n # \n gst.set_index('time', inplace=True)\n colors=[config.lineages[x]['fill'] for x in ['subclone A','subclone B','bulk']]\n gst[['subclone A','subclone B','bulk']].plot(\n ax=ax2, kind='bar', legend=False, \n stacked=True, rot=0, width=0.75, position=0.5, color=colors\n )\n \n # Rotate the x-axis ticks\n ax2.set_xlabel('', rotation=0)\n\n ### Bottom panels ###\n for (jj, (population, gph)) in enumerate(groups_ph.get_group((environment,environment)).groupby('population')):\n\n # Retrieve axes\n ax3 = axes[1][panels[environment][population]]\n utils.simple_axes(ax3)\n \n for (kk, (time, gt)) in enumerate(gph.groupby('group')):\n \n print(environment, population, time)\n \n x, y = plot.histogram_binned_data(ax, gt[param], bins=34)\n\n ax3.plot(x, y, color=config.population['color'][time], linewidth=0.75)\n \n ax3.fill_between(x, 0, y, label=config.population['long_label'][time], \n alpha=0.45, facecolor=config.population['color'][time])\n \n # Mean of all isolates\n gt_all = gt.groupby(['isolate','gene','genotype_long','assignment'])\n gt_all = gt_all[param].agg(np.mean)#.mean()\n # Mean of random isolates\n gt_random = gt[(gt['assignment']=='')].groupby(['isolate','gene','genotype_long','assignment'])\n gt_random = gt_random[param].agg(np.mean)#.mean()\n # Mean of targeted isolates\n gt_target = gt[(gt['assignment']!='')].groupby(['isolate','gene','genotype_long','assignment'])\n gt_target = gt_target[param].agg(np.mean)#.mean()\n \n # Gaussian mixture model\n X = gt_random[:, np.newaxis]\n N = np.arange(1, 4)\n \n models = gmm.gmm_fit(X, N)\n \n # Compute the AIC and the BIC\n AIC = [m.aic(X) for m in models]\n BIC = [m.bic(X) for m in models]\n M_best = models[np.argmin(BIC)]\n print BIC\n # Mean of the distribution\n for m, v in zip(abs(M_best.means_.ravel()), M_best.covariances_.ravel()):\n print('Mean: %.6f, Variance: %.6f' % (m, v,))\n ax3.plot([m,m], ax3.get_ylim(), \n color=config.population['color'][time], \n linestyle='--', dashes=(4,3), linewidth=1)\n pos = ax3.get_ylim()[0] * 0.75 + ax3.get_ylim()[1] * 0.25\n trans = ax3.get_xaxis_transform() # x in data units, y in axes fraction \n ax3.annotate(\n np.around(m, 2), xy=(m, 0.85), xycoords=trans, fontsize=6,\n color='k', va='center', ha=('right' if time=='ancestral' else 'left'),\n xytext=((-5 if time=='ancestral' else 5),0), textcoords='offset points',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")]\n )\n \n x_data = np.array(gt_all)\n y_data = np.repeat([0.03*(ax3.get_ylim()[1]-ax3.get_ylim()[0])], len(x_data))\n \n markerline, stemlines, baseline = ax3.stem(x_data, y_data)\n \n plt.setp(markerline, 'markerfacecolor', config.population['color'][time], markersize = 0)\n plt.setp(stemlines, linewidth=1, color=config.population['color'][time], \n path_effects=[path_effects.withStroke(linewidth=0.75, foreground=\"w\")])\n plt.setp(baseline, 'color', 'none')\n \n if len(gt_target)>0:\n \n x_data = np.array(gt_target)\n y_data = np.repeat([0.2*(ax3.get_ylim()[1]-ax3.get_ylim()[0])], len(x_data))\n \n markerline, stemlines, baseline = ax3.stem(x_data, y_data)\n \n plt.setp(markerline, 'color', config.population['color'][time], \n markersize = 2.75, markeredgewidth=.75, markeredgecolor='k', zorder=3)\n plt.setp(stemlines, linewidth=.75, color=config.population['color'][time],\n path_effects=[path_effects.withStroke(linewidth=1.25, foreground='k')], zorder=2) \n plt.setp(baseline, 'color', 'none', zorder=1)\n \n for (isolate, gene, genotype, assignment), mean in gt_target.iteritems():\n ax3.annotate(\n gene, xy = (mean, 0.2), xycoords=('data','axes fraction'), \n xytext = (0, 8), textcoords = 'offset points', \n ha = 'center', va = 'top', fontsize = 6, style = 'italic',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")]\n )\n \n # Set axes labels\n axes[0, 1].set_xlabel(r'Time, $t$ (days)')\n axes[0, 0].set_ylabel('Cumulative subclone\\n frequency, $f_j$ (bars)')\n axes[0, 2].twinx().set_ylabel('Allele frequency (lines)', rotation=270, va='baseline')\n axes[1, 1].set_xlabel(r'Rel. growth rate, $\\lambda_{k}(t)$')\n axes[1, 0].set_ylabel('Density')\n\n # Set legends\n leg1 = axes[0, 2].legend(bbox_to_anchor=(1.3, 0.75), frameon=False,\n loc='center left', borderaxespad=0.,\n handlelength=0.75, title='Lineage', prop={'size':6})\n \n driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['driver'])\n passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['passenger'])\n nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'],\n linestyle='', linewidth=1.5,\n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")],\n **config.consequence_short['non-synonymous'])\n syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'],\n linestyle='', linewidth=1.5,\n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")], \n **config.consequence_short['synonymous'])\n leg2 = axes[0, 2].legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], \n ['driver','passenger','non-synonymous','synonymous'], \n bbox_to_anchor=(1.3, 0.25), ncol=1,\n frameon=False, loc='lower left',\n borderaxespad=0, handlelength=1.75, \n title='Mutation', prop={'size':6})\n axes[0, 2].add_artist(leg1)\n \n axes[0, 2].get_legend().get_title().set_fontsize('7')\n \n leg3 = axes[1, 2].legend(bbox_to_anchor=(1.3, 0.5), frameon=False, \n loc='center left', borderaxespad=0., framealpha=1,\n handlelength=0.75, title='Time', prop={'size':6})\n \n axes[1, 2].get_legend().get_title().set_fontsize('7')\n \n for leg in [leg1,leg2]:\n plt.setp(leg.get_title(), fontsize=7)\n \n # Set panel labels\n axes[0,0].text(-0.24, 1.1, chr(2*ii + ord('A')), transform=axes[0,0].transAxes,\n fontsize=9, fontweight='bold', va='top', ha='right')\n axes[0,1].text(0.5, 1.2, 'Selection: %s' % config.selection['long_label'][environment], \n transform=axes[0,1].transAxes, fontsize=8, va='center', ha='center')\n axes[1,0].text(-0.24, 1.1, chr(2*ii + ord('B')), transform=axes[1,0].transAxes,\n fontsize=9, fontweight='bold', va='top', ha='right')\n\n # Axes limits\n for ax in fig.get_axes():\n \n ax.xaxis.label.set_size(6)\n ax.yaxis.label.set_size(6)\n ax.tick_params(axis='both', which='major', size=2, labelsize=6)\n ax.tick_params(axis='both', which='minor', size=0, labelsize=0)\n \n plt.setp(ax.get_xticklabels(), fontsize=6)\n plt.setp(ax.get_yticklabels(), fontsize=6)\n \n for loc in ['top','bottom','left','right']:\n ax.spines[loc].set_linewidth(0.75)\n \n if ax.is_last_row():\n if environment=='HU':\n ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5))\n ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5))\n elif environment=='RM':\n ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5))\n ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=4))\n \n plot.save_figure(dir_paper+'figures/figure3/figure3_%s' % environment)\n plt.show()", "('HU', u'WAxNA_F12_1_HU_2', u'ancestral')\n[-284.23225508271184, -272.21007639285102, -258.19741532980646]\nMean: 0.000036, Variance: 0.002758\n('HU', u'WAxNA_F12_1_HU_2', u'evolved')\n[-186.94223493879264, -185.11558777103389, -173.39666344572879]\nMean: 0.054732, Variance: 0.007596\n('HU', u'WAxNA_F12_1_HU_3', u'ancestral')\n[-284.23225508271184, -272.21007639285102, -258.1974153298064]\nMean: 0.000036, Variance: 0.002758\n('HU', u'WAxNA_F12_1_HU_3', u'evolved')\n[-131.31986323703262, -149.25143821279616, -137.15645452825072]\nMean: 0.028203, Variance: 0.003428\nMean: 0.254325, Variance: 0.004882\n('HU', u'WAxNA_F12_2_HU_3', u'ancestral')\n[-263.64658590072952, -249.61261745149946, -235.20187429120534]\nMean: 0.000059, Variance: 0.003417\n('HU', u'WAxNA_F12_2_HU_3', u'evolved')\n[-196.21637022666482, -183.80029162667867, -175.41868584307633]\nMean: 0.137476, Variance: 0.006896\n" ] ], [ [ "**Fig. 3:** Reconstruction of subclonal dynamics. (**A** and **C**), Competing subclones evolved in (*A*) hydroxyurea and (*C*) rapamycin experienced a variety of fates. Time is on the $x$-axis, starting after crossing when the population has no competing subclones. Cumulative haplotype frequency of subclones (bars) and allele frequency of *de novo* mutants (lines) are on the $y$-axis. Most commonly, selective sweeps were observed where a spontaneous mutation arose and increased in frequency. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. (**B** and **D**) Variability in intra-population growth rate , estimated by random sampling of 96 individuals at initial ($t = 0$ days, blue) and final ($t = 32$ days, red) time points ($n = 32$ technical replicates per individual). Mean growth rates by individual are shown at the foot of the histogram (Fig. S7). The posterior means of the distribution modes fitted by a Gaussian mixture model are indicated as dashed lines. The fitter individuals (pins) carry driver mutations, measured by targeted sampling and sequencing.", "_____no_output_____" ], [ "## Figure S3 - Sequence evolution of WAxNA founders", "_____no_output_____" ] ], [ [ "panels = {\n 'HU': {\n 'WAxNA_F12_1_HU_1':(0,1), \n 'WAxNA_F12_1_HU_2':(0,2), \n 'WAxNA_F12_1_HU_3':(0,3), \n 'WAxNA_F12_2_HU_1':(1,1), \n 'WAxNA_F12_2_HU_2':(1,2), \n 'WAxNA_F12_2_HU_3':(1,3)\n },\n 'RM': {\n 'WAxNA_F2_1_RM_1':(0,0),\n 'WAxNA_F12_1_RM_1':(0,1), \n 'WAxNA_F12_1_RM_2':(0,2), \n 'WAxNA_F12_1_RM_3':(0,3), \n 'WAxNA_F12_1_RM_4':(0,4),\n 'WAxNA_F2_1_RM_2':(1,0),\n 'WAxNA_F12_2_RM_1':(1,1), \n 'WAxNA_F12_2_RM_2':(1,2), \n 'WAxNA_F12_2_RM_3':(1,3), \n 'WAxNA_F12_2_RM_4':(1,4)\n }\n}\n\npopulations = panels['HU'].keys()+panels['RM'].keys()\ngroups_st = seq_st_df[seq_st_df.population.isin(populations)].groupby(['selection','population'])\ngroups_dn = seq_dn_df[(seq_dn_df.population.isin(populations))&\\\n (seq_dn_df.clone=='')&\\\n (seq_dn_df.gene!='non-coding')].groupby(['selection','population'])\n\n# Create a figure with subplots\nfig = plt.figure(figsize=(10, 10))\n\ngrid = gridspec.GridSpec(2, 1)\n\ngs = {}\n\nfor (ii, e) in enumerate(['HU','RM']):\n \n nrows = 2\n ncols = 5\n gs[e] = gridspec.GridSpecFromSubplotSpec(nrows, ncols,\n subplot_spec=grid[ii],\n hspace=0.3, wspace=0.15)\n \n for (jj, p) in enumerate(panels[e]):\n \n # Retrieve axes\n ax1 = plt.subplot(gs[e][panels[e][p]])\n ax2 = ax1.twinx()\n \n ### Subclone frequency ###\n gst = groups_st.get_group((e,p))\n \n # Set title\n ax1.set_title(p.replace('_',' '), fontsize=7, weight='bold')\n\n # Bar plot\n gst = gst.set_index('time')\n gst = gst[['subclone A','subclone B','subclone C','subclone D','bulk']]\n gst.plot(ax=ax1, kind='bar',\n legend=False, stacked=True, width=0.75, position=0.5,\n color=[config.lineages[c]['fill'] for c in gst.columns])\n \n ### De novo mutations ###\n if (e,p) in groups_dn.groups.keys():\n\n gdn = groups_dn.get_group((e,p))\n \n for (gene, pos, cds, sub, protein_pos, amino_acids, consequence), gdx \\\n in gdn.groupby(['gene','pos','cds_position','substitution',\\\n 'protein_position','amino_acids','consequence_short']):\n \n assignment = gdx.assignment.unique()[0]\n mutation_type = gdx.mutation_type.unique()[0]\n \n gdx = gdx.sort_values('time').reset_index(drop=True)\n gdx = gdx.sort_index()\n\n ax2.plot(gdx.index.values, gdx.frequency.values,\n color=config.lineages[assignment]['line'],\n **utils.merge_two_dicts(config.mutation_type[mutation_type],\n config.consequence_short[consequence]))\n\n if mutation_type=='driver':\n index = np.argmax(gdx.frequency)\n ax2.annotate(\n gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6,\n textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")], zorder=3\n )\n ax2.annotate(\n amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], \n xy=(index,gdx.frequency[index]), fontsize=5,\n textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.4, foreground=\"w\")], zorder=3\n )\n \n # Set legends\n if (e,p) in [('HU','WAxNA_F12_1_HU_3'),('RM','WAxNA_F12_1_RM_4')]:\n \n leg1 = ax1.legend(bbox_to_anchor=(1.3, -0.125), ncol=1,\n frameon=False, loc='lower left', \n borderaxespad=0., handlelength=0.7, \n title='Lineage', prop={'size':6})\n \n if (e,p) in [('HU','WAxNA_F12_2_HU_3'),('RM','WAxNA_F12_2_RM_4')]:\n \n driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['driver'])\n passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['passenger'])\n nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='',\n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")],\n **config.consequence_short['non-synonymous'])\n syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', \n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")],\n **config.consequence_short['synonymous'])\n leg2 = ax1.legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], \n ['driver','passenger','non-synonymous','synonymous'], \n bbox_to_anchor=(1.3, 1.125), ncol=1,\n frameon=False, loc='upper left',\n borderaxespad=0, handlelength=1.75, \n title='Mutation', prop={'size':6})\n \n for leg in [leg1,leg2]:\n plt.setp(leg.get_title(),fontsize=6)\n\n # Set axes labels\n if (e,p) in [('HU','WAxNA_F12_2_HU_2'),('RM','WAxNA_F12_2_RM_2')]:\n ax1.set_xlabel(r'Time, $t$ (days)')\n else:\n ax1.set_xlabel('')\n if (e,p) in [('HU','WAxNA_F12_1_HU_1'),('RM','WAxNA_F2_1_RM_1'),\n ('HU','WAxNA_F12_2_HU_1'),('RM','WAxNA_F2_1_RM_2')]:\n ax1.set_ylabel('Cumulative subclone\\n frequency, $f_j$ (bars)')\n else:\n ax1.set_yticklabels([])\n if (e,p) in [('HU','WAxNA_F12_1_HU_3'),('RM','WAxNA_F12_1_RM_4'),\n ('HU','WAxNA_F12_2_HU_3'),('RM','WAxNA_F12_2_RM_4')]:\n ax2.set_ylabel('Allele frequency (lines)', rotation=270, va='baseline')\n else:\n ax2.set_yticklabels([])\n \n plt.setp(ax1.xaxis.get_majorticklabels(), rotation=0) # rotate the x-axis ticks\n\n # Set panel labels\n if (e,p) in [('HU','WAxNA_F12_1_HU_1'),('RM','WAxNA_F2_1_RM_1')]:\n ax1.text(-0.25, 1.2, chr(ii + ord('A')), transform=ax1.transAxes,\n fontsize=9, fontweight='bold', va='center', ha='right')\n if (e,p) in [('HU','WAxNA_F12_1_HU_2'),('RM','WAxNA_F12_1_RM_2')]:\n ax1.text(0.5, 1.2, 'Selection: %s' % config.selection['long_label'][e],\n transform=ax1.transAxes, fontsize=8, va='center', ha='center')\n\nfor ax in fig.get_axes():\n \n ax.set_ylim(0, 1) # axes limits\n \n ax.xaxis.label.set_size(6)\n ax.yaxis.label.set_size(6)\n ax.tick_params(axis='both', which='major', size=2, labelsize=6)\n ax.tick_params(axis='both', which='minor', size=0, labelsize=0)\n \n plt.setp(ax.get_xticklabels(), fontsize=6)\n plt.setp(ax.get_yticklabels(), fontsize=6)\n\n for tick in ax.get_xticklabels():\n tick.set_visible(True)\n \n for loc in ['top','bottom','left','right']:\n ax.spines[loc].set_linewidth(.75)\n\nplot.save_figure(dir_supp+'figures/supp_figure_seq_subclonal_dynamics/supp_figure_seq_subclonal_dynamics_cross')\nplt.show()", "_____no_output_____" ] ], [ [ "**Fig. S3:** Subclonal dynamics in time for WAxNA founders evolved in (**A**) hydroxyurea and (**B**) rapamycin, measured by whole-population sequencing. Time is on the $x$-axis, starting after crossing when the population has no competing subclones. Cumulative haplotype frequency of subclones (bars) and allele frequency of *de novo* mutants (lines) are on the $y$-axis. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. No macroscopic subclones or *de novo* mutations were detected in any of the control replicates in YPD.", "_____no_output_____" ], [ "## Figure S4 - Sequence evolution of WA, NA founders", "_____no_output_____" ] ], [ [ "panels = {\n 'HU': {\n 'WA_HU_1':(0,0),\n 'WA_HU_2':(0,1),\n 'NA_HU_1':(0,2),\n 'NA_HU_2':(0,3),\n },\n 'RM': {\n 'WA_RM_1':(0,0),\n 'WA_RM_2':(0,1),\n 'NA_RM_1':(0,2),\n 'NA_RM_2':(0,3),\n }\n}\n\npopulations = panels['HU'].keys()+panels['RM'].keys()\ngroups_dn = seq_dn_df[(seq_dn_df.population.isin(populations)) & \\\n (seq_dn_df.clone=='') & \\\n (seq_dn_df.gene!='non-coding')].groupby(['selection','population'])\n\n# Get a figure with a lot of subplots\nfig = plt.figure(figsize=(8, 5))\n\ngrid = gridspec.GridSpec(2, 1, hspace=0.5)\n\ngs = {}\n\nfor (ii, e) in enumerate(['HU','RM']):\n \n nrows = 1\n ncols = 4\n gs[e] = gridspec.GridSpecFromSubplotSpec(nrows, ncols,\n subplot_spec=grid[ii],\n wspace=0.15)\n\n ### De novo mutations ###\n for (jj, p) in enumerate(panels[e].keys()):\n \n # Retrieve axes\n ax = plt.subplot(gs[e][panels[e][p]])\n \n # Set title\n ax.set_title(p.replace('_',' '), fontsize=7, weight='bold')\n \n # Set axes labels\n if (e,p) in [('HU','WA_HU_1'),('RM','WA_RM_1')]:\n ax.set_ylabel('Allele frequency')\n ax.text(-0.15, 1.2, chr(ii + ord('A')), transform=ax.transAxes,\n fontsize=9, fontweight='bold', va='center', ha='right')\n ax.text(0., 1.2, 'Selection: %s' % config.selection['long_label'][e], \n transform=ax.transAxes, fontsize=8, va='center', ha='left')\n ax.set_yticklabels([0.0,0.2,0.4,0.6,0.8,1.0])\n else:\n ax.set_yticklabels([])\n\n ax.set_xlabel(r'Time, $t$ (days)')\n \n # Set legend\n if (e,p) in [('HU','NA_HU_2')]:\n driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['driver'])\n passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], \n **config.mutation_type['passenger'])\n nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='',\n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")],\n **config.consequence_short['non-synonymous'])\n syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', \n path_effects=[path_effects.withStroke(linewidth=2, foreground=\"k\")],\n **config.consequence_short['synonymous'])\n leg1 = ax.legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], \n ['driver','passenger','non-synonymous','synonymous'], \n bbox_to_anchor=(1.1, -0.25), ncol=1,\n frameon=False, loc='center left',\n borderaxespad=0, handlelength=1.75, \n title='Mutation', prop={'size':6})\n plt.setp(leg1.get_title(),fontsize=6)\n \n # Set empty panels\n if (e,p) in groups_dn.groups.keys():\n gdn = groups_dn.get_group((e,p))\n else:\n ax.axvspan(8, 32, facecolor='w', edgecolor='0.5', alpha=0.5, hatch='//')\n ax.annotate('Extinct', xy=(16,0.5), fontsize=6, ha='center',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")])\n continue\n\n for (gene, cds_pos, sub, protein_pos, amino_acids, consequence), gdx in \\\n gdn.groupby(['gene','cds_position','substitution','protein_position','amino_acids','consequence_short']):\n \n assignment = gdx.assignment.unique()[0]\n mutation_type = gdx.mutation_type.unique()[0]\n\n gdx.time = gdx.time.apply(int)\n gdx = gdx.sort_values('time').reset_index(drop=True)\n gdx = gdx.sort_index()\n gdx = gdx.set_index('time')\n \n ax.plot(gdx.index, gdx.frequency,\n color=config.lineages['bulk']['line'], \n **utils.merge_two_dicts(config.mutation_type[mutation_type],\n config.consequence_short[consequence]))\n \n if mutation_type=='driver':\n index = np.argmax(gdx.frequency)\n ax.annotate(\n gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6,\n textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")], zorder=3\n )\n ax.annotate(\n amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], \n xy=(index,gdx.frequency[index]), fontsize=5,\n textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top',\n path_effects=[path_effects.withStroke(linewidth=0.4, foreground=\"w\")], zorder=3\n )\n\nfor ax in fig.get_axes():\n \n ax.set_xlim(2, 32) # axes limits\n ax.set_ylim(0, 1)\n \n ax.xaxis.label.set_size(6)\n ax.yaxis.label.set_size(6)\n ax.tick_params(axis='both', which='major', size=2, labelsize=6)\n ax.tick_params(axis='both', which='minor', size=0, labelsize=0)\n \n plt.setp(ax.get_xticklabels(), fontsize=6)\n plt.setp(ax.get_yticklabels(), fontsize=6)\n \n ax.set_xscale('log', base=2)\n ax.set_xticks([2, 4, 8, 16, 32])\n ax.xaxis.set_major_formatter(ticker.ScalarFormatter())\n ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5))\n \n for loc in ['top','bottom','left','right']:\n ax.spines[loc].set_linewidth(0.75)\n\nplot.save_figure(dir_supp+'figures/supp_figure_seq_subclonal_dynamics/supp_figure_seq_subclonal_dynamics_parents')\nplt.show()", "_____no_output_____" ] ], [ [ "**Fig. S4:** Subclonal dynamics in time for WA and NA founders evolved in (**A**) hydroxyurea and (**B**) rapamycin, measured by whole-population sequencing. WA founders evolved in hydroxyurea did not survive after 4 days. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. No *de novo* mutations were detected in any of the control replicates in YPD.", "_____no_output_____" ], [ "## Figure S9 - Phenotype evolution\n\nWe are inferring the model's components ($F, \\lambda_1, \\sigma_{\\lambda_1}, \\lambda_2, \\sigma_{\\lambda_2}$) using a Gaussian mixture model.", "_____no_output_____" ] ], [ [ "param='rel_growth_rate'\n\nscatter_panels = {\n 'WAxNA_F12_1_HU_2':0,\n 'WAxNA_F12_1_HU_3':1,\n 'WAxNA_F12_2_HU_3':2,\n 'WAxNA_F12_1_RM_3':3,\n 'WAxNA_F12_1_RM_4':4,\n 'WAxNA_F12_2_RM_2':5,\n}\n\ndata = pheno_df[pheno_df.population.isin(scatter_panels.keys())& \\\n np.isfinite(pheno_df[param])] # Take rows where param is finite\ndata = pd.pivot_table(\n data, \n index=['selection','population','group','isolate','gene','genotype_long','assignment'], \n columns='environment', \n values=param,\n aggfunc=np.mean\n)\n\ncorr = pheno_df[pheno_df.population.isin(scatter_panels.keys())& \\\n np.isfinite(pheno_df[param])] # Take rows where param is finite\ncorr = pd.pivot_table(\n corr, \n index=['isolate','gene','genotype_long','assignment'], \n columns=['selection','population','group','environment'], \n values=param,\n aggfunc=np.mean\n)\ncorr = corr.groupby(level=['selection','population','group'], axis=1, group_keys=False)\ncorr = corr.apply(lambda x: x.corr(method='spearman'))\ncorr = corr.query('environment==\\'SC\\'')\ncorr = pd.melt(corr).dropna()\ncorr = corr.pivot_table(columns=['group'], index=['selection','population','environment'], values='value')\n \nfig = plt.figure(figsize=(7.5,5.25))\nfig.subplots_adjust(left=0.02, right=0.98, bottom=0.02, top=0.98)\n\n# Make outer gridspec\ngrid = gridspec.GridSpec(nrows=2, ncols=3, width_ratios=[2, 2, 2], hspace=.5, wspace=.25) \n\ngs = {}\n\nfor ii, ((s, p), gp) in enumerate(data.groupby(level=['selection','population'])):\n print(s, p)\n # Use gridspec to assign different formats to panels in one plot\n gs[(s,p)] = gridspec.GridSpecFromSubplotSpec(nrows=2, ncols=2, hspace=.05, wspace=.05, \n width_ratios=[4,1], height_ratios=[1,4], \n subplot_spec=grid[scatter_panels[p]])\n \n ax = plt.subplot(gs[(s,p)][:])\n ax_scatter = plt.subplot(gs[(s,p)][1,0])\n ax_x = plt.subplot(gs[(s,p)][0,0])\n ax_y = plt.subplot(gs[(s,p)][1,1])\n\n # Define plot ranges at beginning, since used often later\n x = gp['SC'].values\n y = gp[s].values\n \n if s=='HU':\n x_range = [-0.2, 0.45]\n y_range = [-0.175, 0.225]\n x_count_range = [0, 0.4]\n y_count_range = [0, 0.3]\n elif s=='RM':\n x_range = [-0.4, 1.6]\n y_range = [-0.2, 0.19]\n x_count_range = [0, 0.4]\n y_count_range = [0, 0.2]\n\n # Set title\n ax_x.set_title(p.replace('_',' '), fontsize=7, weight='bold')\n \n ax_scatter.annotate(\n 'Ancestral (t = 0d)\\n' r'$\\rho$ = {:.2f}'.format(corr.ix[s, p, s]['ancestral']),\n xy=(1.25, 1.15), xycoords='axes fraction', fontsize=6,\n color=config.population['color']['ancestral'], ha='center', va='bottom'\n )\n ax_scatter.annotate(\n 'Evolved (t = 32d)\\n' r'$\\rho$ = {:.2f}'.format(corr.ix[s, p, s]['evolved']),\n xy=(1.25, 1.025), xycoords='axes fraction', fontsize=6,\n color=config.population['color']['evolved'], ha='center', va='bottom'\n )\n ax_scatter.axvline(x=0, ls='--', lw=1.5, color='lightgray', zorder=0)\n ax_scatter.axhline(y=0, ls='--', lw=1.5, color='lightgray', zorder=0)\n\n for jj, (t, gt) in enumerate(gp.groupby(level='group')):\n \n gt_all = gt.groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean])\n gt_random = gt.query('assignment==\\'\\'').groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean])\n gt_target = gt.query('assignment!=\\'\\'').groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean])\n print gt_target\n x_a = gt_all[s]\n y_a = gt_all['SC']\n \n x_r = gt_random[s]\n y_r = gt_random['SC']\n color = config.population['color'][t]\n \n # Scatter plot\n plot.scatter_plot(x_r, y_r, ax=ax_scatter, marker='.', color=color, ms=3)\n ax_scatter.set_xlim(x_range)\n ax_scatter.set_ylim(y_range)\n\n# ax_scatter.annotate(corr.ix[s, p, 'SC'][t],\n# xy=(0.95, 0.05), xycoords='axes fraction', fontsize=8,\n# color=color, ha='right', va='bottom')\n \n for (isolate, gene, genotype, assignment), data in gt_target.iterrows():\n x_t = gt_target[s]\n y_t = gt_target['SC']\n plot.scatter_plot(x_t, y_t, ax=ax_scatter, marker='o', ms=3, mec='k', mfc=color)\n ax_scatter.annotate(\n gene, xy = (data[s], data['SC']), xycoords='data', \n xytext = (0, 8), textcoords = 'offset points', \n ha = 'center', va = 'top', \n fontsize = 6, style = 'italic', \n path_effects=[path_effects.withStroke(linewidth=0.5, foreground=\"w\")]\n )\n \n # x-axis\n plot.histogram_x(x_r, ax=ax_x, time=t)\n ax_x.set_xlim(x_range)\n ax_x.set_ylim(y_count_range)\n \n # Mean of sequenced isolates \n# lollipops(x_s, ax_x)\n \n # y-axis\n plot.histogram_y(y_r, ax=ax_y, time=t)\n ax_y.set_xlim(x_count_range)\n ax_y.set_ylim(y_range)\n\n# Set axes labels\nax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_3')][1,0])\nax.set_xlabel('%s\\nRel. growth rate, $\\lambda_k(t)$' % config.environment['long_label']['HU'])\nax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_2')][1,0])\nax.set_ylabel('Rel. growth rate, $\\lambda_k(t)$\\n%s' % config.environment['long_label']['SC'])\n\nax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_4')][1,0])\nax.set_xlabel('%s\\nRel. growth rate, $\\lambda_k(t)$' % config.environment['long_label']['RM'])\nax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_3')][1,0])\nax.set_ylabel('Rel. growth rate, $\\lambda_k(t)$\\n%s' % config.environment['long_label']['SC'])\n\n# Set panel labels\nax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_2')][0,0])\nax.text(-.2, 1.75, chr(ord('A')), transform=ax.transAxes,\n fontsize=9, fontweight='bold', va='center', ha='right')\nax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_3')][0,0])\nax.text(0.5, 1.75, 'Selection: %s' % config.selection['long_label']['HU'], transform=ax.transAxes,\n fontsize=8, va='center', ha='center')\nax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_3')][0,0])\nax.text(-.2, 1.75, chr(ord('B')), transform=ax.transAxes,\n fontsize=9, fontweight='bold', va='center', ha='right')\nax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_4')][0,0])\nax.text(0.5, 1.75, 'Selection: %s' % config.selection['long_label']['RM'], transform=ax.transAxes,\n fontsize=8, va='center', ha='center')\n \n# Axes limits\nfor ax in fig.get_axes():\n ax.xaxis.label.set_size(6)\n ax.yaxis.label.set_size(6)\n ax.tick_params(axis='both', which='major', size=2, labelsize=6)\n ax.tick_params(axis='both', which='minor', size=0, labelsize=6)\n \n for loc in ['top','bottom','left','right']:\n ax.spines[loc].set_linewidth(0.75)\n\nplot.save_figure(dir_supp+'figures/supp_figure_pheno_evolution/supp_figure_pheno_evolution')\nplt.show()", "(u'HU', u'WAxNA_F12_1_HU_2')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n HU RM SC\n mean mean mean\nisolate gene genotype_long assignment \n1 RNR4 RNR4*/RNR4* bulk -0.031631 NaN -0.050832\n2 RNR4 RNR4*/RNR4* bulk 0.058815 NaN -0.094872\n(u'HU', u'WAxNA_F12_1_HU_3')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n HU RM SC\n mean mean mean\nisolate gene genotype_long assignment \n1 RNR2 RNR2*/RNR2 subclone A 0.176067 NaN -0.030428\n2 RNR2 RNR2*/RNR2 subclone A 0.209420 NaN -0.037551\n3 RNR2 RNR2*/RNR2 subclone A 0.192393 NaN -0.056146\n4 RNR2 RNR2*/RNR2 subclone A 0.203274 NaN -0.046559\n5 RNR2 RNR2*/RNR2 subclone A 0.349191 NaN 0.032273\n6 RNR2 RNR2*/RNR2 subclone A 0.384629 NaN 0.070329\n(u'HU', u'WAxNA_F12_2_HU_3')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n HU RM SC\n mean mean mean\nisolate gene genotype_long assignment \n1 RNR4 RNR4*/RNR4* subclone A 0.186776 NaN -0.025375\n2 RNR2 RNR2*/RNR2 bulk 0.167723 NaN 0.058833\n3 RNR2 RNR2*/RNR2 bulk 0.173507 NaN -0.018978\n4 RNR4 RNR4*/RNR4* subclone A 0.174113 NaN -0.056191\n5 RNR4 RNR4*/RNR4* subclone A 0.153926 NaN -0.089085\n6 RNR4 RNR4*/RNR4* subclone A 0.158836 NaN -0.056436\n(u'RM', u'WAxNA_F12_1_RM_3')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n HU RM SC\n mean mean mean\nisolate gene genotype_long assignment \n1 TOR1 TOR1*/TOR1 bulk NaN 1.300394 0.057327\n2 FPR1 FPR1*/FPR1* subclone A NaN 0.868465 -0.136658\n3 FPR1 FPR1*/FPR1* subclone A NaN 0.842563 -0.123537\n(u'RM', u'WAxNA_F12_1_RM_4')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n(u'RM', u'WAxNA_F12_2_RM_2')\nEmpty DataFrame\nColumns: [(HU, mean), (RM, mean), (SC, mean)]\nIndex: []\n HU RM SC\n mean mean mean\nisolate gene genotype_long assignment \n1 bulk NaN -0.010488 -0.006145\n2 bulk NaN -0.018509 -0.055439\n3 bulk NaN 0.017906 -0.028011\n4 bulk NaN -0.045135 -0.069053\n5 bulk NaN 0.118939 0.011753\n6 TOR1 TOR1*/TOR1 bulk NaN 0.740531 -0.033746\n" ] ], [ [ "**Fig. S9:** Variability in intra-population growth rate and fitness correlations. Fitness correlations of ancestral and evolved populations across environments, estimated by random sampling of individuals at initial (0 days, green) and final time points (32 days, purple), before and after selection in (**A**) hydroxyurea and (**B**) rapamycin. The relative growth rate $\\lambda_k(t)$ per individual $k$ is shown, calculated by averaging over ${n_r\\,{=}\\,32}$ technical replicates per individual. Relative growth rates are normalized with respect to the mean population growth rate $\\langle\\lambda_k\\rangle_{t=0}$ at $t=0$ days (see Figures 3B and 3D). The relative growth rates $\\lambda_k(t)$ in the stress environment ($x$-axis) are compared to the control environment ($y$-axis). Using a Gaussian mixture model, we found the posterior probability of the mixture modes of the the best-fit mixture (solid lines). The posterior means of the distribution modes are indicated as dashed lines. The fitter individuals carry driver mutations, as determined by targeted sampling and sequencing. Spearman's rank correlation, $\\rho$, is shown on the top right of each panel, to assess the association between the growth rate of isolates in the stress and control environments at 0 and 32 days.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a6acfe6194dd4c2ccd79dd8e82a4f9b2eee3017
22,190
ipynb
Jupyter Notebook
StkAutomation/Python/Problem_Specific/ConstellationWizard/ConstellationWizardUI.ipynb
jeremy-fields/STKCodeExamples
e46c44c693aef55bd23522e0b2d527ec677bf784
[ "Adobe-2006" ]
1
2021-07-31T14:51:53.000Z
2021-07-31T14:51:53.000Z
StkAutomation/Python/Problem_Specific/ConstellationWizard/ConstellationWizardUI.ipynb
jeremy-fields/STKCodeExamples
e46c44c693aef55bd23522e0b2d527ec677bf784
[ "Adobe-2006" ]
1
2020-08-11T21:14:46.000Z
2021-07-28T19:42:10.000Z
StkAutomation/Python/Problem_Specific/ConstellationWizard/ConstellationWizardUI.ipynb
jeremy-fields/STKCodeExamples
e46c44c693aef55bd23522e0b2d527ec677bf784
[ "Adobe-2006" ]
null
null
null
43.087379
242
0.583776
[ [ [ "# The Constellation Wizard requires a STK Scenario to be open", "_____no_output_____" ], [ "Simply run the cell below and the constelation wizard will appear", "_____no_output_____" ] ], [ [ "from tkinter import Tk\nfrom tkinter.ttk import *\nfrom tkinter import W\nfrom tkinter import E\nfrom tkinter import scrolledtext\nfrom tkinter import INSERT\nfrom tkinter import END\nfrom tkinter import IntVar\nfrom tkinter import messagebox\nfrom DeckAccessReaderGUI import *\nimport numpy as np\nimport pandas as pd\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom shutil import copyfile\nfrom comtypes.client import CreateObject\nfrom comtypes.client import GetActiveObject\nfrom comtypes.gen import STKObjects\n\n\n\n# Define window layout\nwindow = Tk()\nwindow.title('Constellation Wizard')\nwindow.geometry('587x510')\ncwd = os.getcwd()\ncwdFiles = cwd+'\\\\Files'\nwindow.iconbitmap(cwdFiles+'\\\\Misc\\\\'+'ConstellationWizardIcon.ico')\n\n\n# # Configure Style\nStyle().theme_use('vista')\n\n# # fonts for all widgets\n# window.option_add(\"*Font\", \"calabri 9\")\n\n\n######################################### Col0 ########################################################\nwidth = 35\npadx = 3\npady = 1\ncolumn=0\nrow = 1\n# Connect to STK\ntry:\n root = ConnectToSTK(version=12)\n startTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime\n stopTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime+3600\nexcept:\n res = messagebox.askyesno('Constellation Wizard','Failed to connect to a scenario.\\nIs a scenario in STK open?')\n if res == True:\n try:\n root = ConnectToSTK(version=12)\n startTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime\n stopTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime+3600\n except:\n window.quit()\n window.destroy()\n else:\n window.quit()\n window.destroy()\n \n\n\ndef createConMsgBox():\n res = messagebox.askyesno('Constellation Wizard',txt.get().replace(' ','-')+'.tce will be created and overwrite any existing file.\\nThis may take a while if there are many satellites in the scenario.\\nContinue?')\n if res == True:\n CreateConstellation(root,txt,txtBox,ssc=00000)\n\nbtnCreateCon = Button(window,width=width,text='Create Constellation From STK',command=lambda: createConMsgBox())\nbtnCreateCon.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 1\n\n# Load MTO\nbtnLoadMTO = Button(window,width=width,text='Load Constellation as MTO',command=lambda: LoadMTO(root,txtBox,MTOName = comboCon.get(),timestep=60,color=comboColor.get().lower(),orbitsOnOrOff=onOffStr(),orbitFrame=frameValue()))\nbtnLoadMTO.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 1\n\n# Orbit options\nlblFrame = Label(window,text = 'Show Orbits:')\nlblFrame.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\n\n# Checkbox\ndef onOffStr():\n onOff = showOrbits.get()\n if onOff == 0: \n onOff = 'off'\n elif onOff == 1:\n onOff = 'on' \n return onOff\n\nshowOrbits = IntVar()\nshowOrbits.set(0)\ncheckButton = Checkbutton(window, variable=showOrbits,offvalue=0,onvalue=1)\ncheckButton.grid(column=column+1,row=row,padx=padx,pady=pady,sticky=W)\n\nrow += 1\nrow += 1\n\n# Run Deck Access\nbtnDeckAccess = Button(window,width=width,text='Run Deck Access',command=lambda: runDeckAccess(root,txtStart.get(),txtStop.get(),comboCon,comboDA,txtBox,constraintSatName = comboSat.get()))\nbtnDeckAccess.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 1\n\n# Save Deck Access\ndef saveDA():\n newName = txt.get().replace(' ','-')\n res = messagebox.askyesno('Constellation Wizard',newName+'.tce will be created and overwrite any existing file.\\nContinue?')\n if res == True:\n copyfile(cwdFiles+'\\\\Constellations\\\\deckAccessTLE.tce', cwdFiles+'\\\\Constellations\\\\'+newName+'.tce')\n txtBox.insert(END,'Created: '+txt.get().replace(' ','-')+'.tce\\n')\n \nbtnSave = Button(window,text='Save Deck Access',command=saveDA)\nbtnSave.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E)\nrow += 2\n\n# # Load Subset\nbtnLoadSubset = Button(window,width=width,text='Load Satellites Using Template',command= lambda: LoadSatsFromFileUsingTemplate(root,txtStart.get(),txtStop.get(),comboCon,selected,txtBox,comboSat.get(),color=comboColor.get().lower()))\nbtnLoadSubset.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 2\n\n# Do Analysis\ndef AddToChain():\n addObj = comboChainCov.get()\n chainName = comboChain.get()\n try:\n chain = root.GetObjectFromPath('*/Chain/'+chainName)\n chain2 = chain.QueryInterface(STKObjects.IAgChain)\n chain2.Objects.Add(addObj)\n txtBox.insert(END,'Added: '+addObj.split('/')[-1]+'\\n')\n except:\n txtBox.insert(END,'Failed to Add: '+addObj.split('/')[-1]+'\\n')\n \nbtnAddChain = Button(window,width=width,text='Add To Chain',command=AddToChain)\nbtnAddChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 1\n\n# Do Analysis\ndef computeChain():\n chainName = comboChain.get()\n if root.CurrentScenario.Children.Contains(STKObjects.eChain,chainName):\n chain = root.GetObjectFromPath('*/Chain/'+chainName)\n chain2 = chain.QueryInterface(STKObjects.IAgChain)\n chain2.ClearAccess()\n chain2.ComputeAccess()\n txtBox.insert(END,'Computed: '+chainName+'\\n')\n else:\n txtBox.insert(END,'Failed to Compute: '+chainName+'\\n')\nbtnComputeChain = Button(window,text='Compute Chain',command=computeChain)\nbtnComputeChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E)\n\n\ndef removeAssets():\n chainName = comboChain.get()\n if root.CurrentScenario.Children.Contains(STKObjects.eChain,chainName):\n chain = root.GetObjectFromPath('*/Chain/'+chainName)\n chain2 = chain.QueryInterface(STKObjects.IAgChain)\n chain2.Objects.RemoveAll()\n txtBox.insert(END,'Removed Objects: '+chainName+'\\n')\n else:\n txtBox.insert(END,'Failed to Removed Objects: '+chainName+'\\n') \nbtnRemoveChain = Button(window,text='Remove Objects',command=removeAssets)\nbtnRemoveChain.grid(column=column+1,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E)\nrow += 1\n\n # Do Analysis\ndef AddToCoverage():\n addObj = comboChainCov.get()\n covName = comboCov.get()\n if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName):\n cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName)\n cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition)\n if cov2.AssetList.CanAssignAsset(addObj):\n cov2.AssetList.Add(addObj)\n txtBox.insert(END,'Added: '+addObj.split('/')[-1]+'\\n')\n else:\n txtBox.insert(END,'Already Assigned: '+addObj.split('/')[-1]+'\\n')\n else:\n txtBox.insert(END,'Failed to Add: '+addObj.split('/')[-1]+'\\n')\n \nbtnAddCoverage = Button(window,width=width,text='Add To Coverage',command=AddToCoverage)\nbtnAddCoverage.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2)\nrow += 1\n\n# Do Analysis\ndef computeCov():\n covName = comboCov.get()\n if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName):\n cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName)\n cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition)\n cov2.ClearAccesses()\n cov2.ComputeAccesses()\n txtBox.insert(END,'Computed: '+covName+'\\n')\n else:\n txtBox.insert(END,'Failed to Compute: '+covName+'\\n')\nbtnComputeCoverage = Button(window,text='Compute Coverage',command=computeCov)\nbtnComputeCoverage.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E)\n\ndef removeAssestsCov():\n covName = comboCov.get()\n if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName):\n cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName)\n cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition)\n cov2.AssetList.RemoveAll()\n txtBox.insert(END,'Removed Assets: '+covName+'\\n')\n else:\n txtBox.insert(END,'Failed to Removed Assets: '+covName+'\\n')\nbtnRemoveCov = Button(window,text='Remove Assets',command=removeAssestsCov)\nbtnRemoveCov.grid(column=column+1,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E)\nrow += 1\nrow += 3\n\n\ntxtBox = scrolledtext.ScrolledText(window,width=35,height=10)\ntxtBox.insert(INSERT,'Connected: '+root.CurrentScenario.InstanceName+'\\n')\ntxtBox.grid(column=column,row=row,padx=padx+0,pady=pady,rowspan=4,columnspan = 3,sticky=W+E)\nrowTxt = row\n\n\n######################################### Col2 ########################################################\n# Labels\nwidth2 = 30\ncolumn = 2\nrow = 1\nlblCreateCon = Label(window,text = 'Create/Save Constellation:')\nlblCreateCon.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblCon = Label(window,text = 'Constellation:')\nlblCon.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\n# MTO Options\nrow += 1\nlblColor = Label(window,text = 'MTO/Satellite Color:')\nlblColor.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow +=1\nlblDA = Label(window,text = 'Access From:')\nlblDA.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblStart = Label(window,text = 'Start Time:')\nlblStart.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblStop = Label(window,text = 'Stop Time:')\nlblStop.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblSatTemp = Label(window,text = 'Satellite Template:')\nlblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 2\nlblSatTemp = Label(window,text = 'Chain/Coverage Object:')\nlblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblSatTemp = Label(window,text = 'Chain:')\nlblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 1\nlblSatTemp = Label(window,text = 'Coverage:')\nlblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E)\nrow += 2\n\n######################################### Col3 ########################################################\ncolumn = 3\nrow=1\n# Entry box for Create Constellation\ntxt = Entry(window,width=width2+3)\ntxt.delete(0, END)\ntxt.insert(0, 'NewConstellationName')\ntxt.grid(column=column, row=row,padx=padx,pady=pady,columnspan=2,sticky=W)\nrow += 1\n\n# Constellation Options\ndef updateTCEList():\n tceList = [f.split('.')[0] for f in listdir(cwdFiles+'\\\\Constellations') if (isfile(join(cwdFiles+'\\\\Constellations', f))) & (f.split('.')[-1]=='tce' )& (f !='deckAccessTLE.tce')]\n comboCon['values'] = tceList\n \ncomboCon = Combobox(window,width=width2,state='readonly',postcommand = updateTCEList)\nupdateTCEList()\ncomboCon.current(0) # set the selected item\ncomboCon.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n\n# Radio Buttons\ndef frameValue():\n frame = selectedFrame.get()\n if frame == 0:\n frame = 'Inertial'\n elif frame == 1:\n frame = 'Fixed'\n return frame\n \nselectedFrame = IntVar()\nselectedFrame.set(0)\nradFrame1 = Radiobutton(window,text='Inertial', value=0, variable=selectedFrame)\nradFrame2 = Radiobutton(window,text='Fixed', value=1, variable=selectedFrame)\nradFrame1.grid(column=column-1,row=row,padx=padx,pady=pady)\nradFrame2.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n# Colors\ncolorsList = ['Green','Cyan','Blue','Magenta','Red','Yellow','White','Black']\ncomboColor = Combobox(window,width=width2,state='readonly')\ncomboColor['values'] = colorsList\ncomboColor.current(0) # set the selected item\ncomboColor.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow +=1\n\n\n# Deck Access Available Objects\ndef updateAccessList(root):\n objs = deckAccessAvailableObjs(root)\n for ii in range(len(objs)):\n objType = objs[ii].split('/')[-2]\n if objType == 'Sensor':\n objs[ii] = '/'.join(objs[ii].split('/')[-4:])\n else:\n objs[ii] = '/'.join(objs[ii].split('/')[-2:])\n comboDA['values'] = objs\n \ncomboDA = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateAccessList(root))\nupdateAccessList(root)\ntry:\n comboDA.current(0) # set the selected item\nexcept:\n pass\ncomboDA.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n# Entry box Times\nstartTimeUTCG = root.ConversionUtility.ConvertDate('EpSec','UTCG',str(startTime))\ntxtStart = Entry(window,width=width2+3)\ntxtStart.delete(0, END)\ntxtStart.insert(0, startTimeUTCG)\ntxtStart.grid(column=column,row=row,padx=padx,pady=pady,columnspan=2,sticky=W)\nstartTime = root.ConversionUtility.ConvertDate('UTCG','EpSec',str(txtStart.get()))\nrow += 1\n\nstopTimeUTCG = root.ConversionUtility.ConvertDate('EpSec','UTCG',str(stopTime))\ntxtStop = Entry(window,width=width2+3)\ntxtStop.delete(0, END)\ntxtStop.insert(0, stopTimeUTCG)\ntxtStop.grid(column=column,row=row,padx=padx,pady=pady,columnspan=2,sticky=W)\nstopTime = root.ConversionUtility.ConvertDate('UTCG','EpSec',str(txtStop.get()))\nrow += 1\n\n# Satellite Template\ndef updateSatList(root): \n sats = FilterObjectsByType(root,'Satellite',name = '')\n for ii in range(len(sats)):\n sats[ii] = sats[ii].split('/')[-1]\n sats.insert(0,'')\n comboSat['values'] = sats\ncomboSat = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateSatList(root))\nupdateSatList(root)\ntry:\n comboSat.current(0) # set the selected item\nexcept:\n pass\ncomboSat.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n\n# Radio Buttons\nselected = IntVar()\nselected.set(1)\nrad1 = Radiobutton(window,text='Deck Access Only', value=1, variable=selected)\nrad2 = Radiobutton(window,text='Entire Constellation', value=2, variable=selected)\nrad1.grid(column=column-1,row=row,padx=padx,pady=pady)\nrad2.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\n\nrow += 1\n\n# Deck Access Available Objects\n\ndef updateChainCovList(root):\n objs = chainCovAvailableObjs(root)\n for ii in range(len(objs)):\n objSplit = objs[ii].split('/')\n if objSplit[-4] =='Scenario':\n objs[ii] = '/'.join(objSplit[-2:])\n elif objSplit[-4]=='Sensor':\n objs[ii] = '/'.join(objSplit[-6:])\n else:\n objs[ii] = '/'.join(objSplit[-4:])\n comboChainCov['values'] = objs\ncomboChainCov = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateChainCovList(root))\nupdateChainCovList(root)\ntry:\n comboChainCov.current(0) # set the selected item\nexcept:\n pass\ncomboChainCov.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n# Chain Template\ndef updateChainList(root): \n chains = FilterObjectsByType(root,'Chain',name = '')\n for ii in range(len(chains)):\n chains[ii] = chains[ii].split('/')[-1]\n# chains.insert(0,'')\n comboChain['values'] = chains\ncomboChain = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateChainList(root))\nupdateChainList(root)\ntry:\n comboChain.current(0) # set the selected item\nexcept:\n pass\ncomboChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 1\n\n# Chain Coverage\ndef updateCovList(root): \n covs = FilterObjectsByType(root,'CoverageDefinition',name = '')\n for ii in range(len(covs)):\n covs[ii] = covs[ii].split('/')[-1]\n# covs.insert(0,'')\n comboCov['values'] = covs\ncomboCov = Combobox(window,width=width2,state='readonly',postcommand = updateCovList)\nupdateCovList(root)\ntry:\n comboCov.current(0) # set the selected item\nexcept:\n pass\ncomboCov.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W)\nrow += 2\n\n# row += 4\n \n\n# Unload Satellites\nbtnUnload = Button(window,width=15,text='Unload Satellites',command=lambda: UnloadObjs(root,'Satellite',pattern=txtUnload.get()))\nbtnUnload.grid(column=3,row=rowTxt+0,padx=padx,pady=pady,sticky=W+E)\n\ntxtUnload = Entry(window,width=15)\ntxtUnload.delete(0, END)\ntxtUnload.insert(0, 'tle-*')\ntxtUnload.grid(column=4,row=rowTxt+0,padx=padx,pady=pady,columnspan = 1,sticky=W)\n\nbtnUnloadMTO = Button(window,width=15,text='Unload MTOs',command=lambda: UnloadObjs(root,'MTO',pattern=txtUnloadMTO.get()))\nbtnUnloadMTO.grid(column=3,row=rowTxt+1,padx=padx,pady=pady,sticky=W)\n\ntxtUnloadMTO = Entry(window,width=15)\ntxtUnloadMTO.delete(0, END)\ntxtUnloadMTO.insert(0, '*')\ntxtUnloadMTO.grid(column=4,row=rowTxt+1,padx=padx,pady=pady,columnspan = 1,sticky=W)\n\nbtnUnloadCon = Button(window,width=15,text='Unload Con.',command=lambda: UnloadObjs(root,'Constellation',pattern=txtUnloadCon.get()))\nbtnUnloadCon.grid(column=3,row=rowTxt+2,padx=padx,pady=pady,sticky=W)\n\ntxtUnloadCon = Entry(window,width=15)\ntxtUnloadCon.delete(0, END)\ntxtUnloadCon.insert(0, '*')\ntxtUnloadCon.grid(column=4,row=rowTxt+2,padx=padx,pady=pady,columnspan = 1,sticky=W)\n\ndef clear():\n txtBox.delete(1.0,END)\nbtnClear = Button(window,width=15,text='Clear TextBox',command=clear)\nbtnClear.grid(column=3,row=rowTxt+3,padx=padx,pady=pady,sticky=W)\n\n\n# Keep window open\nwindow.mainloop()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
4a6aec879b466c0966bd652396ba9b44970c725e
852
ipynb
Jupyter Notebook
scikit-learn/api/neighbors.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
scikit-learn/api/neighbors.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
scikit-learn/api/neighbors.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
20.285714
86
0.551643
[ [ [ "## Kernel Density Estimation.\nneighbors.KernelDensity\n\n[机器学习——概率密度估计随笔](https://blog.csdn.net/qq_35692819/article/details/105982057)\n\n[非参数估计:核密度估计KDE](https://blog.csdn.net/pipisorry/article/details/53635895)\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
4a6aee9cd8a5c37ee033bb46cec7be923e7fc138
76,163
ipynb
Jupyter Notebook
04_user_guide/44_cookbook.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
04_user_guide/44_cookbook.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
04_user_guide/44_cookbook.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
26.556137
246
0.534472
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nprint(\"pandas\", pd.__version__)\nprint(\"numpy\",np.__version__)", "_____no_output_____" ] ], [ [ "# Cookbook\n\nThis is a repository for *short and sweet* examples and links for useful pandas recipes.\nWe encourage users to add to this documentation.\n\nAdding interesting links and/or inline examples to this section is a great *First Pull Request*.\n\nSimplified, condensed, new-user friendly, in-line examples have been inserted where possible to\naugment the Stack-Overflow and GitHub links. Many of the links contain expanded information,\nabove what the in-line examples offer.\n\npandas (pd) and NumPy (np) are the only two abbreviated imported modules. The rest are kept\nexplicitly imported for newer users.", "_____no_output_____" ], [ "## Idioms\n\n\n<a id='cookbook-idioms'></a>\nThese are some neat pandas `idioms`\n\n[if-then/if-then-else on one column, and assignment to another one or more columns:](https://stackoverflow.com/questions/17128302/python-pandas-idiom-for-if-then-else)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ] ], [ [ "### if-then…\n\nAn if-then on one column", "_____no_output_____" ] ], [ [ "df.loc[df.AAA >= 5, \"BBB\"] = -1\ndf", "_____no_output_____" ] ], [ [ "An if-then with assignment to 2 columns:", "_____no_output_____" ] ], [ [ "df.loc[df.AAA >= 5, [\"BBB\", \"CCC\"]] = 555\ndf", "_____no_output_____" ] ], [ [ "Add another line with different logic, to do the -else", "_____no_output_____" ] ], [ [ "df.loc[df.AAA < 5, [\"BBB\", \"CCC\"]] = 2000\ndf", "_____no_output_____" ] ], [ [ "Or use pandas where after you’ve set up a mask", "_____no_output_____" ] ], [ [ "df_mask = pd.DataFrame(\n {\"AAA\": [True] * 4, \"BBB\": [False] * 4, \"CCC\": [True, False] * 2}\n)\ndf.where(df_mask, -1000)", "_____no_output_____" ] ], [ [ "[if-then-else using NumPy’s where()](https://stackoverflow.com/questions/19913659/pandas-conditional-creation-of-a-series-dataframe-column)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ], [ "df[\"logic\"] = np.where(df[\"AAA\"] > 5, \"high\", \"low\")\ndf", "_____no_output_____" ] ], [ [ "### Splitting\n\n[Split a frame with a boolean criterion](https://stackoverflow.com/questions/14957116/how-to-split-a-dataframe-according-to-a-boolean-criterion)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ], [ "df[df.AAA <= 5]\ndf[df.AAA > 5]", "_____no_output_____" ] ], [ [ "### Building criteria\n\n[Select with multi-column criteria](https://stackoverflow.com/questions/15315452/selecting-with-complex-criteria-from-pandas-dataframe)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ] ], [ [ "…and (without assignment returns a Series)", "_____no_output_____" ] ], [ [ "df.loc[(df[\"BBB\"] < 25) & (df[\"CCC\"] >= -40), \"AAA\"]", "_____no_output_____" ] ], [ [ "…or (without assignment returns a Series)", "_____no_output_____" ] ], [ [ "df.loc[(df[\"BBB\"] > 25) | (df[\"CCC\"] >= -40), \"AAA\"]", "_____no_output_____" ] ], [ [ "…or (with assignment modifies the DataFrame.)", "_____no_output_____" ] ], [ [ "df.loc[(df[\"BBB\"] > 25) | (df[\"CCC\"] >= 75), \"AAA\"] = 0.1\ndf", "_____no_output_____" ] ], [ [ "[Select rows with data closest to certain value using argsort](https://stackoverflow.com/questions/17758023/return-rows-in-a-dataframe-closest-to-a-user-defined-number)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf\naValue = 43.0\ndf.loc[(df.CCC - aValue).abs().argsort()]", "_____no_output_____" ] ], [ [ "[Dynamically reduce a list of criteria using a binary operators](https://stackoverflow.com/questions/21058254/pandas-boolean-operation-in-a-python-list/21058331)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ], [ "Crit1 = df.AAA <= 5.5\nCrit2 = df.BBB == 10.0\nCrit3 = df.CCC > -40.0", "_____no_output_____" ] ], [ [ "One could hard code:", "_____no_output_____" ] ], [ [ "AllCrit = Crit1 & Crit2 & Crit3", "_____no_output_____" ] ], [ [ "…Or it can be done with a list of dynamically built criteria", "_____no_output_____" ] ], [ [ "import functools\n\nCritList = [Crit1, Crit2, Crit3]\nAllCrit = functools.reduce(lambda x, y: x & y, CritList)\n\ndf[AllCrit]", "_____no_output_____" ] ], [ [ "\n<a id='cookbook-selection'></a>", "_____no_output_____" ], [ "## Selection", "_____no_output_____" ], [ "### Dataframes\n\nThe indexing docs.\n\n[Using both row labels and value conditionals](https://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ], [ "df[(df.AAA <= 6) & (df.index.isin([0, 2, 4]))]", "_____no_output_____" ] ], [ [ "[Use loc for label-oriented slicing and iloc positional slicing](https://github.com/pandas-dev/pandas/issues/2904)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]},\n index=[\"foo\", \"bar\", \"boo\", \"kar\"],\n)", "_____no_output_____" ] ], [ [ "There are 2 explicit slicing methods, with a third general case\n\n1. Positional-oriented (Python slicing style : exclusive of end) \n1. Label-oriented (Non-Python slicing style : inclusive of end) \n1. General (Either slicing style : depends on if the slice contains labels or positions) \n\n\n\n<dl style='margin: 20px 0;'>\n<dt>::</dt>\n<dd>\ndf.iloc[0:3] # Positional\n\ndf.loc[“bar”:”kar”] # Label\n\n# Generic\ndf[0:3]\ndf[“bar”:”kar”]\n\n</dd>\n\n</dl>\n\nAmbiguity arises when an index consists of integers with a non-zero start or non-unit increment.", "_____no_output_____" ] ], [ [ "data = {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\ndf2 = pd.DataFrame(data=data, index=[1, 2, 3, 4]) # Note index starts at 1.\ndf2.iloc[1:3] # Position-oriented\ndf2.loc[1:3] # Label-oriented", "_____no_output_____" ] ], [ [ "[Using inverse operator (~) to take the complement of a mask](https://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [4, 5, 6, 7], \"BBB\": [10, 20, 30, 40], \"CCC\": [100, 50, -30, -50]}\n)\ndf", "_____no_output_____" ], [ "df[~((df.AAA <= 6) & (df.index.isin([0, 2, 4])))]", "_____no_output_____" ] ], [ [ "### New columns\n\n[Efficiently and dynamically creating new columns using applymap](https://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\"AAA\": [1, 2, 1, 3], \"BBB\": [1, 1, 2, 2], \"CCC\": [2, 1, 3, 1]})\ndf", "_____no_output_____" ], [ "source_cols = df.columns # Or some subset would work too\nnew_cols = [str(x) + \"_cat\" for x in source_cols]\ncategories = {1: \"Alpha\", 2: \"Beta\", 3: \"Charlie\"}\n\ndf[new_cols] = df[source_cols].applymap(categories.get)\ndf", "_____no_output_____" ] ], [ [ "[Keep other columns when using min() with groupby](https://stackoverflow.com/questions/23394476/keep-other-columns-when-using-min-with-groupby)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"AAA\": [1, 1, 1, 2, 2, 2, 3, 3], \"BBB\": [2, 1, 3, 4, 5, 1, 2, 3]}\n)\ndf", "_____no_output_____" ] ], [ [ "Method 1 : idxmin() to get the index of the minimums", "_____no_output_____" ] ], [ [ "df.loc[df.groupby(\"AAA\")[\"BBB\"].idxmin()]", "_____no_output_____" ] ], [ [ "Method 2 : sort then take first of each", "_____no_output_____" ] ], [ [ "df.sort_values(by=\"BBB\").groupby(\"AAA\", as_index=False).first()", "_____no_output_____" ] ], [ [ "Notice the same results, with the exception of the index.\n\n\n<a id='cookbook-multi-index'></a>", "_____no_output_____" ], [ "## Multiindexing\n\nThe multindexing docs.\n\n[Creating a MultiIndex from a labeled frame](https://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\n \"row\": [0, 1, 2],\n \"One_X\": [1.1, 1.1, 1.1],\n \"One_Y\": [1.2, 1.2, 1.2],\n \"Two_X\": [1.11, 1.11, 1.11],\n \"Two_Y\": [1.22, 1.22, 1.22],\n }\n)\ndf", "_____no_output_____" ], [ "# As Labelled Index\ndf = df.set_index(\"row\")\ndf", "_____no_output_____" ], [ "# With Hierarchical Columns\ndf.columns = pd.MultiIndex.from_tuples([tuple(c.split(\"_\")) for c in df.columns])\ndf", "_____no_output_____" ], [ "# Now stack & Reset\ndf = df.stack(0).reset_index(1)\ndf", "_____no_output_____" ], [ "# And fix the labels (Notice the label 'level_1' got added automatically)\ndf.columns = [\"Sample\", \"All_X\", \"All_Y\"]\ndf", "_____no_output_____" ] ], [ [ "### Arithmetic\n\n[Performing arithmetic with a MultiIndex that needs broadcasting](https://stackoverflow.com/questions/19501510/divide-entire-pandas-multiindex-dataframe-by-dataframe-variable/19502176#19502176)", "_____no_output_____" ] ], [ [ "cols = pd.MultiIndex.from_tuples(\n [(x, y) for x in [\"A\", \"B\", \"C\"] for y in [\"O\", \"I\"]]\n)\ndf = pd.DataFrame(np.random.randn(2, 6), index=[\"n\", \"m\"], columns=cols)\ndf", "_____no_output_____" ], [ "df = df.div(df[\"C\"], level=1)\ndf", "_____no_output_____" ] ], [ [ "### Slicing\n\n[Slicing a MultiIndex with xs](https://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes)", "_____no_output_____" ] ], [ [ "coords = [(\"AA\", \"one\"), (\"AA\", \"six\"), (\"BB\", \"one\"), (\"BB\", \"two\"), (\"BB\", \"six\")]\nindex = pd.MultiIndex.from_tuples(coords)\ndf = pd.DataFrame([11, 22, 33, 44, 55], index, [\"MyData\"])\ndf", "_____no_output_____" ] ], [ [ "To take the cross section of the 1st level and 1st axis the index:", "_____no_output_____" ] ], [ [ "# Note : level and axis are optional, and default to zero\ndf.xs(\"BB\", level=0, axis=0)", "_____no_output_____" ] ], [ [ "…and now the 2nd level of the 1st axis.", "_____no_output_____" ] ], [ [ "df.xs(\"six\", level=1, axis=0)", "_____no_output_____" ] ], [ [ "[Slicing a MultiIndex with xs, method #2](https://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas)", "_____no_output_____" ] ], [ [ "import itertools\n\nindex = list(itertools.product([\"Ada\", \"Quinn\", \"Violet\"], [\"Comp\", \"Math\", \"Sci\"]))\nheadr = list(itertools.product([\"Exams\", \"Labs\"], [\"I\", \"II\"]))\nindx = pd.MultiIndex.from_tuples(index, names=[\"Student\", \"Course\"])\ncols = pd.MultiIndex.from_tuples(headr) # Notice these are un-named\ndata = [[70 + x + y + (x * y) % 3 for x in range(4)] for y in range(9)]\ndf = pd.DataFrame(data, indx, cols)\ndf", "_____no_output_____" ], [ "All = slice(None)\ndf.loc[\"Violet\"]\ndf.loc[(All, \"Math\"), All]\ndf.loc[(slice(\"Ada\", \"Quinn\"), \"Math\"), All]\ndf.loc[(All, \"Math\"), (\"Exams\")]\ndf.loc[(All, \"Math\"), (All, \"II\")]", "_____no_output_____" ] ], [ [ "[Setting portions of a MultiIndex with xs](https://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill)", "_____no_output_____" ], [ "### Sorting\n\n[Sort by specific column or an ordered list of columns, with a MultiIndex](https://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas)", "_____no_output_____" ] ], [ [ "df.sort_values(by=(\"Labs\", \"II\"), ascending=False)", "_____no_output_____" ] ], [ [ "[Partial selection, the need for sortedness;](https://github.com/pandas-dev/pandas/issues/2995)", "_____no_output_____" ], [ "### Levels\n\n[Prepending a level to a multiindex](https://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex)\n\n[Flatten Hierarchical columns](https://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns)\n\n\n<a id='cookbook-missing-data'></a>", "_____no_output_____" ], [ "## Missing data\n\nThe missing data docs.\n\nFill forward a reversed timeseries", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n np.random.randn(6, 1),\n index=pd.date_range(\"2013-08-01\", periods=6, freq=\"B\"),\n columns=list(\"A\"),\n)\ndf.loc[df.index[3], \"A\"] = np.nan\ndf", "_____no_output_____" ], [ "df.reindex(df.index[::-1]).ffill()", "_____no_output_____" ] ], [ [ "[cumsum reset at NaN values](https://stackoverflow.com/questions/18196811/cumsum-reset-at-nan)", "_____no_output_____" ], [ "### Replace\n\n[Using replace with backrefs](https://stackoverflow.com/questions/16818871/extracting-value-and-creating-new-column-out-of-it)\n\n\n<a id='cookbook-grouping'></a>", "_____no_output_____" ], [ "## Grouping\n\nThe grouping docs.\n\n[Basic grouping with apply](https://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg)\n\nUnlike agg, apply’s callable is passed a sub-DataFrame which gives you access to all the columns", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\n \"animal\": \"cat dog cat fish dog cat cat\".split(),\n \"size\": list(\"SSMMMLL\"),\n \"weight\": [8, 10, 11, 1, 20, 12, 12],\n \"adult\": [False] * 5 + [True] * 2,\n }\n)\ndf", "_____no_output_____" ], [ "# List the size of the animals with the highest weight.\ndf.groupby(\"animal\").apply(lambda subf: subf[\"size\"][subf[\"weight\"].idxmax()])", "_____no_output_____" ] ], [ [ "[Using get_group](https://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key)", "_____no_output_____" ] ], [ [ "gb = df.groupby([\"animal\"])\ngb.get_group(\"cat\")", "_____no_output_____" ] ], [ [ "[Apply to different items in a group](https://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas)", "_____no_output_____" ] ], [ [ "def GrowUp(x):\n avg_weight = sum(x[x[\"size\"] == \"S\"].weight * 1.5)\n avg_weight += sum(x[x[\"size\"] == \"M\"].weight * 1.25)\n avg_weight += sum(x[x[\"size\"] == \"L\"].weight)\n avg_weight /= len(x)\n return pd.Series([\"L\", avg_weight, True], index=[\"size\", \"weight\", \"adult\"])\n\n\nexpected_df = gb.apply(GrowUp)\nexpected_df", "_____no_output_____" ] ], [ [ "[Expanding apply](https://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas)", "_____no_output_____" ] ], [ [ "S = pd.Series([i / 100.0 for i in range(1, 11)])\n\ndef cum_ret(x, y):\n return x * (1 + y)\n\ndef red(x):\n return functools.reduce(cum_ret, x, 1.0)\n\nS.expanding().apply(red, raw=True)", "_____no_output_____" ] ], [ [ "[Replacing some values with mean of the rest of a group](https://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, -1, 1, 2]})\ngb = df.groupby(\"A\")\n\ndef replace(g):\n mask = g < 0\n return g.where(mask, g[~mask].mean())\n\ngb.transform(replace)", "_____no_output_____" ] ], [ [ "[Sort groups by aggregated data](https://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\n \"code\": [\"foo\", \"bar\", \"baz\"] * 2,\n \"data\": [0.16, -0.21, 0.33, 0.45, -0.59, 0.62],\n \"flag\": [False, True] * 3,\n }\n)\ncode_groups = df.groupby(\"code\")\nagg_n_sort_order = code_groups[[\"data\"]].transform(sum).sort_values(by=\"data\")\nsorted_df = df.loc[agg_n_sort_order.index]\nsorted_df", "_____no_output_____" ] ], [ [ "[Create multiple aggregated columns](https://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function)", "_____no_output_____" ] ], [ [ "rng = pd.date_range(start=\"2014-10-07\", periods=10, freq=\"2min\")\nts = pd.Series(data=list(range(10)), index=rng)\n\ndef MyCust(x):\n if len(x) > 2:\n return x[1] * 1.234\n return pd.NaT\n\nmhc = {\"Mean\": np.mean, \"Max\": np.max, \"Custom\": MyCust}\nts.resample(\"5min\").apply(mhc)\nts", "_____no_output_____" ] ], [ [ "[Create a value counts column and reassign back to the DataFrame](https://stackoverflow.com/questions/17709270/i-want-to-create-a-column-of-value-counts-in-my-pandas-dataframe)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"Color\": \"Red Red Red Blue\".split(), \"Value\": [100, 150, 50, 50]}\n)\ndf", "_____no_output_____" ], [ "df[\"Counts\"] = df.groupby([\"Color\"]).transform(len)\ndf", "_____no_output_____" ] ], [ [ "[Shift groups of the values in a column based on the index](https://stackoverflow.com/q/23198053/190597)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"line_race\": [10, 10, 8, 10, 10, 8], \"beyer\": [99, 102, 103, 103, 88, 100]},\n index=[\n \"Last Gunfighter\",\n \"Last Gunfighter\",\n \"Last Gunfighter\",\n \"Paynter\",\n \"Paynter\",\n \"Paynter\",\n ],\n)\ndf", "_____no_output_____" ], [ "df[\"beyer_shifted\"] = df.groupby(level=0)[\"beyer\"].shift(1)\ndf", "_____no_output_____" ] ], [ [ "[Select row with maximum value from each group](https://stackoverflow.com/q/26701849/190597)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\n \"host\": [\"other\", \"other\", \"that\", \"this\", \"this\"],\n \"service\": [\"mail\", \"web\", \"mail\", \"mail\", \"web\"],\n \"no\": [1, 2, 1, 2, 1],\n }\n).set_index([\"host\", \"service\"])\nmask = df.groupby(level=0).agg(\"idxmax\")\ndf_count = df.loc[mask[\"no\"]].reset_index()\ndf_count", "_____no_output_____" ] ], [ [ "[Grouping like Python’s itertools.groupby](https://stackoverflow.com/q/29142487/846892)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=[\"A\"])\ndf[\"A\"].groupby((df[\"A\"] != df[\"A\"].shift()).cumsum()).groups", "_____no_output_____" ], [ "df[\"A\"].groupby((df[\"A\"] != df[\"A\"].shift()).cumsum()).cumsum()", "_____no_output_____" ] ], [ [ "### Expanding data\n\n[Alignment and to-date](https://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions)\n\n[Rolling Computation window based on values instead of counts](https://stackoverflow.com/questions/14300768/pandas-rolling-computation-with-window-based-on-values-instead-of-counts)\n\n[Rolling Mean by Time Interval](https://stackoverflow.com/questions/15771472/pandas-rolling-mean-by-time-interval)", "_____no_output_____" ], [ "### Splitting\n\n[Splitting a frame](https://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992)\n\nCreate a list of dataframes, split using a delineation based on logic included in rows.", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n data={\n \"Case\": [\"A\", \"A\", \"A\", \"B\", \"A\", \"A\", \"B\", \"A\", \"A\"],\n \"Data\": np.random.randn(9),\n }\n)\n\ndfs = list(\n zip(\n *df.groupby(\n (1 * (df[\"Case\"] == \"B\"))\n .cumsum()\n .rolling(window=3, min_periods=1)\n .median()\n )\n )\n)[-1]\n\ndfs[0]", "_____no_output_____" ], [ "dfs[1]", "_____no_output_____" ], [ "dfs[2]", "_____no_output_____" ] ], [ [ "\n<a id='cookbook-pivot'></a>", "_____no_output_____" ], [ "### Pivot\n\nThe Pivot docs.\n\n[Partial sums and subtotals](https://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n data={\n \"Province\": [\"ON\", \"QC\", \"BC\", \"AL\", \"AL\", \"MN\", \"ON\"],\n \"City\": [\n \"Toronto\",\n \"Montreal\",\n \"Vancouver\",\n \"Calgary\",\n \"Edmonton\",\n \"Winnipeg\",\n \"Windsor\",\n ],\n \"Sales\": [13, 6, 16, 8, 4, 3, 1],\n }\n)\ntable = pd.pivot_table(\n df,\n values=[\"Sales\"],\n index=[\"Province\"],\n columns=[\"City\"],\n aggfunc=np.sum,\n margins=True,\n)\ntable.stack(\"City\")", "_____no_output_____" ] ], [ [ "[Frequency table like plyr in R](https://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r)", "_____no_output_____" ] ], [ [ "grades = [48, 99, 75, 80, 42, 80, 72, 68, 36, 78]\ndf = pd.DataFrame(\n {\n \"ID\": [\"x%d\" % r for r in range(10)],\n \"Gender\": [\"F\", \"M\", \"F\", \"M\", \"F\", \"M\", \"F\", \"M\", \"M\", \"M\"],\n \"ExamYear\": [\n \"2007\",\n\"2007\",\"2007\",\"2008\",\"2008\",\"2008\",\"2008\",\"2009\",\"2009\",\"2009\", ],\n \"Class\": [\n \"algebra\",\"stats\",\"bio\",\"algebra\",\"algebra\",\"stats\",\"stats\",\"algebra\",\"bio\",\"bio\",\n ],\n \"Participated\": [\n \"yes\", \"yes\", \"yes\", \"yes\",\"no\", \"yes\", \"yes\",\"yes\",\"yes\",\"yes\",\n ],\n \"Passed\": [\"yes\" if x > 50 else \"no\" for x in grades],\n \"Employed\": [\n True,True,True,False,False,False,False,True,True,False,\n ],\n \"Grade\": grades,\n }\n)\n\ndf.groupby(\"ExamYear\").agg(\n {\n \"Participated\": lambda x: x.value_counts()[\"yes\"],\n \"Passed\": lambda x: sum(x == \"yes\"),\n \"Employed\": lambda x: sum(x),\n \"Grade\": lambda x: sum(x) / len(x),\n }\n)", "_____no_output_____" ] ], [ [ "[Plot pandas DataFrame with year over year data](https://stackoverflow.com/questions/30379789/plot-pandas-data-frame-with-year-over-year-data)\n\nTo create year and month cross tabulation:", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\"value\": np.random.randn(36)},\n index=pd.date_range(\"2011-01-01\", freq=\"M\", periods=36),\n)\n\npd.pivot_table(\n df, index=df.index.month, columns=df.index.year, values=\"value\", aggfunc=\"sum\"\n)", "_____no_output_____" ] ], [ [ "### Apply\n\n[Rolling apply to organize - Turning embedded lists into a MultiIndex frame](https://stackoverflow.com/questions/17349981/converting-pandas-dataframe-with-categorical-values-into-binary-values)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n data={\n \"A\": [[2, 4, 8, 16], [100, 200], [10, 20, 30]],\n \"B\": [[\"a\", \"b\", \"c\"], [\"jj\", \"kk\"], [\"ccc\"]],\n },\n index=[\"I\", \"II\", \"III\"],\n)\n\ndef SeriesFromSubList(aList):\n return pd.Series(aList)\n\ndf_orgz = pd.concat(\n {ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()}\n)\ndf_orgz", "_____no_output_____" ] ], [ [ "[Rolling apply with a DataFrame returning a Series](https://stackoverflow.com/questions/19121854/using-rolling-apply-on-a-dataframe-object)\n\nRolling Apply to multiple columns where function calculates a Series before a Scalar from the Series is returned", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n data=np.random.randn(2000, 2) / 10000,\n index=pd.date_range(\"2001-01-01\", periods=2000),\n columns=[\"A\", \"B\"],\n)\ndf", "_____no_output_____" ], [ "def gm(df, const):\n v = ((((df[\"A\"] + df[\"B\"]) + 1).cumprod()) - 1) * const\n return v.iloc[-1]\n\ns = pd.Series(\n {\n df.index[i]: gm(df.iloc[i: min(i + 51, len(df) - 1)], 5)\n for i in range(len(df) - 50)\n }\n)\ns", "_____no_output_____" ] ], [ [ "[Rolling apply with a DataFrame returning a Scalar](https://stackoverflow.com/questions/21040766/python-pandas-rolling-apply-two-column-input-into-function/21045831#21045831)\n\nRolling Apply to multiple columns where function returns a Scalar (Volume Weighted Average Price)", "_____no_output_____" ] ], [ [ "rng = pd.date_range(start=\"2014-01-01\", periods=100)\ndf = pd.DataFrame(\n {\n \"Open\": np.random.randn(len(rng)),\n \"Close\": np.random.randn(len(rng)),\n \"Volume\": np.random.randint(100, 2000, len(rng)),\n },\n index=rng,\n)\ndf", "_____no_output_____" ], [ "def vwap(bars):\n return (bars.Close * bars.Volume).sum() / bars.Volume.sum()\n\nwindow = 5\ns = pd.concat(\n [\n (pd.Series(vwap(df.iloc[i: i + window]), index=[df.index[i + window]]))\n for i in range(len(df) - window)\n ]\n)\ns.round(2)", "_____no_output_____" ] ], [ [ "## Timeseries\n\n[Between times](https://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range)\n\n[Using indexer between time](https://stackoverflow.com/questions/17559885/pandas-dataframe-mask-based-on-index)\n\n[Constructing a datetime range that excludes weekends and includes only certain times](https://stackoverflow.com/questions/24010830/pandas-generate-sequential-timestamp-with-jump/24014440#24014440?)\n\n[Vectorized Lookup](https://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe)\n\n[Aggregation and plotting time series](https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html)\n\nTurn a matrix with hours in columns and days in rows into a continuous row sequence in the form of a time series.\n[How to rearrange a Python pandas DataFrame?](https://stackoverflow.com/questions/15432659/how-to-rearrange-a-python-pandas-dataframe)\n\n[Dealing with duplicates when reindexing a timeseries to a specified frequency](https://stackoverflow.com/questions/22244383/pandas-df-refill-adding-two-columns-of-different-shape)\n\nCalculate the first day of the month for each entry in a DatetimeIndex", "_____no_output_____" ] ], [ [ "dates = pd.date_range(\"2000-01-01\", periods=5)\ndates.to_period(freq=\"M\").to_timestamp()", "_____no_output_____" ] ], [ [ "\n<a id='cookbook-resample'></a>", "_____no_output_____" ], [ "### Resampling\n\nThe [Resample](38_timeseries.ipynb#timeseries-resampling) docs.\n\n[Using Grouper instead of TimeGrouper for time grouping of values](https://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages)\n\n[Time grouping with some missing values](https://stackoverflow.com/questions/33637312/pandas-grouper-by-frequency-with-completeness-requirement)\n\nValid frequency arguments to Grouper [Timeseries](38_timeseries.ipynb#timeseries-offset-aliases)\n\n[Grouping using a MultiIndex](https://stackoverflow.com/questions/41483763/pandas-timegrouper-on-multiindex)\n\n[Using TimeGrouper and another grouping to create subgroups, then apply a custom function](https://github.com/pandas-dev/pandas/issues/3791)\n\n[Resampling with custom periods](https://stackoverflow.com/questions/15408156/resampling-with-custom-periods)\n\n[Resample intraday frame without adding new days](https://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days)\n\n[Resample minute data](https://stackoverflow.com/questions/14861023/resampling-minute-data)\n\n[Resample with groupby](https://stackoverflow.com/q/18677271/564538)\n\n\n<a id='cookbook-merge'></a>", "_____no_output_____" ], [ "## Merge\n\nThe [Concat](25_merging_join_concat.ipynb#merging-concatenation) docs. The [Join](25_merging_join_concat.ipynb#merging-join) docs.\n\n[Append two dataframes with overlapping index (emulate R rbind)](https://stackoverflow.com/questions/14988480/pandas-version-of-rbind)", "_____no_output_____" ] ], [ [ "rng = pd.date_range(\"2000-01-01\", periods=6)\ndf1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=[\"A\", \"B\", \"C\"])\ndf2 = df1.copy()", "_____no_output_____" ] ], [ [ "Depending on df construction, `ignore_index` may be needed", "_____no_output_____" ] ], [ [ "df = df1.append(df2, ignore_index=True)\ndf", "_____no_output_____" ] ], [ [ "[Self Join of a DataFrame](https://github.com/pandas-dev/pandas/issues/2996)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n data={\n \"Area\": [\"A\"] * 5 + [\"C\"] * 2,\n \"Bins\": [110] * 2 + [160] * 3 + [40] * 2,\n \"Test_0\": [0, 1, 0, 1, 2, 0, 1],\n \"Data\": np.random.randn(7),\n }\n)\ndf", "_____no_output_____" ], [ "df[\"Test_1\"] = df[\"Test_0\"] - 1\n\npd.merge(\n df,\n df,\n left_on=[\"Bins\", \"Area\", \"Test_0\"],\n right_on=[\"Bins\", \"Area\", \"Test_1\"],\n suffixes=(\"_L\", \"_R\"),\n)", "_____no_output_____" ] ], [ [ "[How to set the index and join](https://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join)\n\n[KDB like asof join](https://stackoverflow.com/questions/12322289/kdb-like-asof-join-for-timeseries-data-in-pandas/12336039#12336039)\n\n[Join with a criteria based on the values](https://stackoverflow.com/questions/15581829/how-to-perform-an-inner-or-outer-join-of-dataframes-with-pandas-on-non-simplisti)\n\n[Using searchsorted to merge based on values inside a range](https://stackoverflow.com/questions/25125626/pandas-merge-with-logic/2512764)\n\n\n<a id='cookbook-plotting'></a>", "_____no_output_____" ], [ "## Plotting\n\nThe Plotting docs.\n\n[Make Matplotlib look like R](https://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default)\n\n[Setting x-axis major and minor labels](https://stackoverflow.com/questions/12945971/pandas-timeseries-plot-setting-x-axis-major-and-minor-ticks-and-labels)\n\n[Plotting multiple charts in an IPython Jupyter notebook](https://stackoverflow.com/questions/16392921/make-more-than-one-chart-in-same-ipython-notebook-cell)\n\n[Creating a multi-line plot](https://stackoverflow.com/questions/16568964/make-a-multiline-plot-from-csv-file-in-matplotlib)\n\n[Plotting a heatmap](https://stackoverflow.com/questions/17050202/plot-timeseries-of-histograms-in-python)\n\n[Annotate a time-series plot](https://stackoverflow.com/questions/11067368/annotate-time-series-plot-in-matplotlib)\n\n[Annotate a time-series plot #2](https://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot)\n\n[Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter](https://pandas-xlsxwriter-charts.readthedocs.io/)\n\n[Boxplot for each quartile of a stratifying variable](https://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas)", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(\n {\n \"stratifying_var\": np.random.uniform(0, 100, 20),\n \"price\": np.random.normal(100, 5, 20),\n }\n)\n\ndf[\"quartiles\"] = pd.qcut(\n df[\"stratifying_var\"], 4, labels=[\"0-25%\", \"25-50%\", \"50-75%\", \"75-100%\"]\n)\n\ndf.boxplot(column=\"price\", by=\"quartiles\")", "_____no_output_____" ] ], [ [ "## Data in/out\n\n[Performance comparison of SQL vs HDF5](https://stackoverflow.com/questions/16628329/hdf5-and-sqlite-concurrency-compression-i-o-performance)\n\n\n<a id='cookbook-csv'></a>", "_____no_output_____" ], [ "### CSV\n\nThe CSV docs\n\n[read_csv in action](https://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/)\n\n[appending to a csv](https://stackoverflow.com/questions/17134942/pandas-dataframe-output-end-of-csv)\n\n[Reading a csv chunk-by-chunk](https://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309)\n\n[Reading only certain rows of a csv chunk-by-chunk](https://stackoverflow.com/questions/19674212/pandas-data-frame-select-rows-and-clear-memory)\n\n[Reading the first few lines of a frame](https://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe)\n\nReading a file that is compressed but not by `gzip/bz2` (the native compressed formats which `read_csv` understands).\nThis example shows a `WinZipped` file, but is a general application of opening the file within a context manager and\nusing that handle to read.\n[See here](https://stackoverflow.com/questions/17789907/pandas-convert-winzipped-csv-file-to-data-frame)\n\n[Inferring dtypes from a file](https://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize)\n\n[Dealing with bad lines](https://github.com/pandas-dev/pandas/issues/2886)\n\n[Dealing with bad lines II](http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/)\n\n[Reading CSV with Unix timestamps and converting to local timezone](http://nipunbatra.github.io/2013/06/pandas-reading-csv-with-unix-timestamps-and-converting-to-local-timezone/)\n\n[Write a multi-row index CSV without writing duplicates](https://stackoverflow.com/questions/17349574/pandas-write-multiindex-rows-with-to-csv)\n\n\n<a id='cookbook-csv-multiple-files'></a>", "_____no_output_____" ], [ "#### Reading multiple files to create a single DataFrame\n\nThe best way to combine multiple files into a single DataFrame is to read the individual frames one by one, put all\nof the individual frames into a list, and then combine the frames in the list using `pd.concat()`:", "_____no_output_____" ] ], [ [ "for i in range(3):\n data = pd.DataFrame(np.random.randn(10, 4))\n data.to_csv(\"file_{}.csv\".format(i))\n\nfiles = [\"file_0.csv\", \"file_1.csv\", \"file_2.csv\"]\nresult = pd.concat([pd.read_csv(f) for f in files], ignore_index=True)", "_____no_output_____" ] ], [ [ "You can use the same approach to read all files matching a pattern. Here is an example using `glob`:", "_____no_output_____" ] ], [ [ "import glob\nimport os\n\nfiles = glob.glob(\"file_*.csv\")\nresult = pd.concat([pd.read_csv(f) for f in files], ignore_index=True)", "_____no_output_____" ] ], [ [ "Finally, this strategy will work with the other `pd.read_*(...)` functions described in the io docs.\n\n\n<dl style='margin: 20px 0;'>\n<dt>::</dt>\n<dd>\n\n<dl style='margin: 20px 0;'>\n<dt>suppress</dt>\n<dd>\n</dd>\n\n</dl>\n\n\n<dl style='margin: 20px 0;'>\n<dt>for i in range(3):</dt>\n<dd>\nos.remove(“file_{}.csv”.format(i))\n\n</dd>\n\n</dl>\n\n</dd>\n\n</dl>", "_____no_output_____" ], [ "#### Parsing date components in multi-columns\n\nParsing date components in multi-columns is faster with a format", "_____no_output_____" ] ], [ [ "i = pd.date_range(\"20000101\", periods=10000)\ndf = pd.DataFrame({\"year\": i.year, \"month\": i.month, \"day\": i.day})\ndf.head()", "_____no_output_____" ], [ "%timeit pd.to_datetime(df.year * 10000 + df.month * 100 + df.day, format='%Y%m%d')", "_____no_output_____" ], [ "ds = df.apply(lambda x: \"%04d%02d%02d\" % (x[\"year\"], x[\"month\"], x[\"day\"]), axis=1)\nds.head()", "_____no_output_____" ], [ "%timeit pd.to_datetime(ds)", "_____no_output_____" ] ], [ [ "#### Skip row between header and data", "_____no_output_____" ] ], [ [ "data = \"\"\";;;;\n ;;;;\n ;;;;\n ;;;;\n ;;;;\n ;;;;\n;;;;\n ;;;;\n ;;;;\n;;;;\ndate;Param1;Param2;Param4;Param5\n ;m²;°C;m²;m\n;;;;\n01.01.1990 00:00;1;1;2;3\n01.01.1990 01:00;5;3;4;5\n01.01.1990 02:00;9;5;6;7\n01.01.1990 03:00;13;7;8;9\n01.01.1990 04:00;17;9;10;11\n01.01.1990 05:00;21;11;12;13\n\"\"\"", "_____no_output_____" ] ], [ [ "##### Option 1: pass rows explicitly to skip rows", "_____no_output_____" ] ], [ [ "from io import StringIO\n\npd.read_csv(\n StringIO(data),\n sep=\";\",\n skiprows=[11, 12],\n index_col=0,\n parse_dates=True,\n header=10,\n)", "_____no_output_____" ] ], [ [ "##### Option 2: read column names and then data", "_____no_output_____" ] ], [ [ "pd.read_csv(StringIO(data), sep=\";\", header=10, nrows=10).columns\ncolumns = pd.read_csv(StringIO(data), sep=\";\", header=10, nrows=10).columns\npd.read_csv(\n StringIO(data), sep=\";\", index_col=0, header=12, parse_dates=True, names=columns\n)", "_____no_output_____" ] ], [ [ "\n<a id='cookbook-sql'></a>", "_____no_output_____" ], [ "### SQL\n\nThe SQL docs\n\n[Reading from databases with SQL](https://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql)\n\n\n<a id='cookbook-excel'></a>", "_____no_output_____" ], [ "### Excel\n\nThe Excel docs\n\n[Reading from a filelike handle](https://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe)\n\n[Modifying formatting in XlsxWriter output](https://pbpython.com/improve-pandas-excel-output.html)\n\n\n<a id='cookbook-html'></a>", "_____no_output_____" ], [ "### HTML\n\n[Reading HTML tables from a server that cannot handle the default request\nheader](https://stackoverflow.com/a/18939272/564538)\n\n\n<a id='cookbook-hdf'></a>", "_____no_output_____" ], [ "### HDFStore\n\nThe HDFStores docs\n\n[Simple queries with a Timestamp Index](https://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table)\n\n[Managing heterogeneous data using a linked multiple table hierarchy](https://github.com/pandas-dev/pandas/issues/3032)\n\n[Merging on-disk tables with millions of rows](https://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925)\n\n[Avoiding inconsistencies when writing to a store from multiple processes/threads](https://stackoverflow.com/a/29014295/2858145)\n\nDe-duplicating a large store by chunks, essentially a recursive reduction operation. Shows a function for taking in data from\ncsv file and creating a store by chunks, with date parsing as well.\n[See here](https://stackoverflow.com/questions/16110252/need-to-compare-very-large-files-around-1-5gb-in-python/16110391#16110391)\n\n[Creating a store chunk-by-chunk from a csv file](https://stackoverflow.com/questions/20428355/appending-column-to-frame-of-hdf-file-in-pandas/20428786#20428786)\n\n[Appending to a store, while creating a unique index](https://stackoverflow.com/questions/16997048/how-does-one-append-large-amounts-of-data-to-a-pandas-hdfstore-and-get-a-natural/16999397#16999397)\n\n[Large Data work flows](https://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas)\n\n[Reading in a sequence of files, then providing a global unique index to a store while appending](https://stackoverflow.com/questions/16997048/how-does-one-append-large-amounts-of-data-to-a-pandas-hdfstore-and-get-a-natural)\n\n[Groupby on a HDFStore with low group density](https://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore)\n\n[Groupby on a HDFStore with high group density](https://stackoverflow.com/questions/25459982/trouble-with-grouby-on-millions-of-keys-on-a-chunked-file-in-python-pandas/25471765#25471765)\n\n[Hierarchical queries on a HDFStore](https://stackoverflow.com/questions/22777284/improve-query-performance-from-a-large-hdfstore-table-with-pandas/22820780#22820780)\n\n[Counting with a HDFStore](https://stackoverflow.com/questions/20497897/converting-dict-of-dicts-into-pandas-dataframe-memory-issues)\n\n[Troubleshoot HDFStore exceptions](https://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type)\n\n[Setting min_itemsize with strings](https://stackoverflow.com/questions/15988871/hdfstore-appendstring-dataframe-fails-when-string-column-contents-are-longer)\n\n[Using ptrepack to create a completely-sorted-index on a store](https://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index)\n\nStoring Attributes to a group node", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(np.random.randn(8, 3))\nstore = pd.HDFStore(\"test.h5\")\nstore.put(\"df\", df)\n\n# you can store an arbitrary Python object via pickle\nstore.get_storer(\"df\").attrs.my_attribute = {\"A\": 10}\nstore.get_storer(\"df\").attrs.my_attribute", "_____no_output_____" ], [ "store.close()\nos.remove(\"test.h5\")", "_____no_output_____" ] ], [ [ "You can create or load a HDFStore in-memory by passing the `driver`\nparameter to PyTables. Changes are only written to disk when the HDFStore\nis closed.", "_____no_output_____" ] ], [ [ "store = pd.HDFStore(\"test.h5\", \"w\", diver=\"H5FD_CORE\")\n\ndf = pd.DataFrame(np.random.randn(8, 3))\nstore[\"test\"] = df\n\n# only after closing the store, data is written to disk:\nstore.close()", "_____no_output_____" ], [ "os.remove(\"test.h5\")", "_____no_output_____" ] ], [ [ "### Binary files\n\npandas readily accepts NumPy record arrays, if you need to read in a binary\nfile consisting of an array of C structs. For example, given this C program\nin a file called `main.c` compiled with `gcc main.c -std=gnu99` on a\n64-bit machine,", "_____no_output_____" ], [ "```c\n#include <stdio.h>\n#include <stdint.h>\n\ntypedef struct _Data\n{\n int32_t count;\n double avg;\n float scale;\n} Data;\n\nint main(int argc, const char *argv[])\n{\n size_t n = 10;\n Data d[n];\n\n for (int i = 0; i < n; ++i)\n {\n d[i].count = i;\n d[i].avg = i + 1.0;\n d[i].scale = (float) i + 2.0f;\n }\n\n FILE *file = fopen(\"binary.dat\", \"wb\");\n fwrite(&d, sizeof(Data), n, file);\n fclose(file);\n\n return 0;\n}\n```\n", "_____no_output_____" ], [ "the following Python code will read the binary file `'binary.dat'` into a\npandas `DataFrame`, where each element of the struct corresponds to a column\nin the frame:", "_____no_output_____" ], [ "```python\nnames = \"count\", \"avg\", \"scale\"\n\n# note that the offsets are larger than the size of the type because of\n# struct padding\noffsets = 0, 8, 16\nformats = \"i4\", \"f8\", \"f4\"\ndt = np.dtype({\"names\": names, \"offsets\": offsets, \"formats\": formats}, align=True)\ndf = pd.DataFrame(np.fromfile(\"binary.dat\", dt))\n```\n", "_____no_output_____" ], [ ">**Note**\n>\n>The offsets of the structure elements may be different depending on the\narchitecture of the machine on which the file was created. Using a raw\nbinary file format like this for general data storage is not recommended, as\nit is not cross platform. We recommended either HDF5 or parquet, both of\nwhich are supported by pandas’ IO facilities.", "_____no_output_____" ], [ "## Computation\n\n[Numerical integration (sample-based) of a time series](https://nbviewer.ipython.org/5720498)", "_____no_output_____" ], [ "### Correlation\n\nOften it’s useful to obtain the lower (or upper) triangular form of a correlation matrix calculated from `DataFrame.corr()`. This can be achieved by passing a boolean mask to `where` as follows:", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(np.random.random(size=(100, 5)))\n\ncorr_mat = df.corr()\nmask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1)\n\ncorr_mat.where(mask)", "_____no_output_____" ] ], [ [ "The `method` argument within `DataFrame.corr` can accept a callable in addition to the named correlation types. Here we compute the [distance correlation](https://en.wikipedia.org/wiki/Distance_correlation) matrix for a `DataFrame` object.", "_____no_output_____" ] ], [ [ "def distcorr(x, y):\n n = len(x)\n a = np.zeros(shape=(n, n))\n b = np.zeros(shape=(n, n))\n for i in range(n):\n for j in range(i + 1, n):\n a[i, j] = abs(x[i] - x[j])\n b[i, j] = abs(y[i] - y[j])\n a += a.T\n b += b.T\n a_bar = np.vstack([np.nanmean(a, axis=0)] * n)\n b_bar = np.vstack([np.nanmean(b, axis=0)] * n)\n A = a - a_bar - a_bar.T + np.full(shape=(n, n), fill_value=a_bar.mean())\n B = b - b_bar - b_bar.T + np.full(shape=(n, n), fill_value=b_bar.mean())\n cov_ab = np.sqrt(np.nansum(A * B)) / n\n std_a = np.sqrt(np.sqrt(np.nansum(A ** 2)) / n)\n std_b = np.sqrt(np.sqrt(np.nansum(B ** 2)) / n)\n return cov_ab / std_a / std_b\n\n\ndf = pd.DataFrame(np.random.normal(size=(100, 3)))\ndf.corr(method=distcorr)", "_____no_output_____" ] ], [ [ "## Timedeltas\n\nThe [Timedeltas](39_timedeltas.ipynb#timedeltas-timedeltas) docs.\n\n[Using timedeltas](https://github.com/pandas-dev/pandas/pull/2899)", "_____no_output_____" ] ], [ [ "import datetime\n\ns = pd.Series(pd.date_range(\"2012-1-1\", periods=3, freq=\"D\"))", "_____no_output_____" ], [ "s - s.max()", "_____no_output_____" ], [ "s.max() - s", "_____no_output_____" ], [ "s - datetime.datetime(2011, 1, 1, 3, 5)", "_____no_output_____" ], [ "s + datetime.timedelta(minutes=5)", "_____no_output_____" ], [ "datetime.datetime(2011, 1, 1, 3, 5) - s", "_____no_output_____" ], [ "datetime.timedelta(minutes=5) + s", "_____no_output_____" ] ], [ [ "[Adding and subtracting deltas and dates](https://stackoverflow.com/questions/16385785/add-days-to-dates-in-dataframe)", "_____no_output_____" ] ], [ [ "deltas = pd.Series([datetime.timedelta(days=i) for i in range(3)])\ndf = pd.DataFrame({\"A\": s, \"B\": deltas})\ndf", "_____no_output_____" ], [ "df[\"New Dates\"] = df[\"A\"] + df[\"B\"]\ndf[\"Delta\"] = df[\"A\"] - df[\"New Dates\"]\ndf", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ] ], [ [ "[Another example](https://stackoverflow.com/questions/15683588/iterating-through-a-pandas-dataframe)\n\nValues can be set to NaT using np.nan, similar to datetime", "_____no_output_____" ] ], [ [ "y = s - s.shift()\ny", "_____no_output_____" ], [ "y[1] = np.nan\ny", "_____no_output_____" ] ], [ [ "## Creating example data\n\nTo create a dataframe from every combination of some given values, like R’s `expand.grid()`\nfunction, we can create a dict where the keys are column names and the values are lists\nof the data values:", "_____no_output_____" ] ], [ [ "def expand_grid(data_dict):\n rows = itertools.product(*data_dict.values())\n return pd.DataFrame.from_records(rows, columns=data_dict.keys())\n\n\ndf = expand_grid(\n {\"height\": [60, 70], \"weight\": [100, 140, 180], \"sex\": [\"Male\", \"Female\"]}\n)\ndf", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6b08b85efd0874980bf531c9a772f51e30735f
448,907
ipynb
Jupyter Notebook
F_Machine_learning/1_Unsupervised-Learning/4_Subtyping_breast_cancer_from_gene_expression.ipynb
oercompbiomed/CBM101
20010dcb99fbf218c4789eb5918dcff8ceb94898
[ "MIT" ]
7
2019-07-03T07:41:55.000Z
2022-02-06T20:25:37.000Z
F_Machine_learning/1_Unsupervised-Learning/4_Subtyping_breast_cancer_from_gene_expression.ipynb
oercompbiomed/CBM101
20010dcb99fbf218c4789eb5918dcff8ceb94898
[ "MIT" ]
9
2019-03-14T15:15:09.000Z
2019-08-01T14:18:21.000Z
F_Machine_learning/1_Unsupervised-Learning/4_Subtyping_breast_cancer_from_gene_expression.ipynb
oercompbiomed/CBM101
20010dcb99fbf218c4789eb5918dcff8ceb94898
[ "MIT" ]
11
2019-03-12T10:43:11.000Z
2021-10-05T12:15:00.000Z
218.021855
160,260
0.887353
[ [ [ "## Using low dimensional embeddings to discover subtypes of breast cancer\n\nThis notebook is largely based on https://towardsdatascience.com/reduce-dimensions-for-single-cell-4224778a2d67 (credit to Nikolay Oskolkov).", "_____no_output_____" ], [ "https://www.nature.com/articles/s41467-018-07582-3#data-availability", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport GEOparse\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "GEO_ID = \"GSE111229\" # from the article ", "_____no_output_____" ] ], [ [ "#### Exercise 1. load the dataset into `rna_seq` using GEOparse.", "_____no_output_____" ] ], [ [ "# %load solutions/ex4_1.py\nrna_seq = GEOparse.get_GEO(geo=GEO_ID, destdir=\"./\")", "02-Dec-2020 16:36:10 DEBUG utils - Directory ./ already exists. Skipping.\n02-Dec-2020 16:36:10 INFO GEOparse - File already exist: using local version.\n02-Dec-2020 16:36:10 INFO GEOparse - Parsing ./GSE111229_family.soft.gz: \n02-Dec-2020 16:36:10 DEBUG GEOparse - DATABASE: GeoMiame\n02-Dec-2020 16:36:10 DEBUG GEOparse - SERIES: GSE111229\n02-Dec-2020 16:36:10 DEBUG GEOparse - PLATFORM: GPL13112\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025845\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025846\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025847\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025848\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025849\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025850\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025851\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025852\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025853\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025854\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025855\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025856\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025857\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025858\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025859\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025860\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025861\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025862\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025863\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025864\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025865\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025866\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025867\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025868\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025869\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025870\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025871\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025872\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025873\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025874\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025875\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025876\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025877\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025878\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025879\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025880\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025881\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025882\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025883\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025884\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025885\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025886\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025887\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025888\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025889\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025890\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025891\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025892\n02-Dec-2020 16:36:10 DEBUG GEOparse - SAMPLE: GSM3025893\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025894\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025895\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025896\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025897\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025898\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025899\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025900\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025901\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025902\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025903\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025904\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025905\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025906\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025907\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025908\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025909\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025910\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025911\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025912\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025913\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025914\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025915\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025916\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025917\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025918\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025919\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025920\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025921\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025922\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025923\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025924\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025925\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025926\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025927\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025928\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025929\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025930\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025931\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025932\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025933\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025934\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025935\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025936\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025937\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025938\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025939\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025940\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025941\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025942\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025943\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025944\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025945\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025946\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025947\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025948\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025949\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025950\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025951\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025952\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025953\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025954\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025955\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025956\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025957\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025958\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025959\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025960\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025961\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025962\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025963\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025964\n02-Dec-2020 16:36:11 DEBUG GEOparse - SAMPLE: GSM3025965\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025966\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025967\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025968\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025969\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025970\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025971\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025972\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025973\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025974\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025975\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025976\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025977\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025978\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025979\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025980\n02-Dec-2020 16:36:12 DEBUG GEOparse - SAMPLE: GSM3025981\n" ], [ "dir(rna_seq)", "_____no_output_____" ], [ "rna_seq.download_SRA??", "_____no_output_____" ], [ "rna_seq.geotype", "_____no_output_____" ], [ "rna_seq.phenotype_data.shape", "_____no_output_____" ], [ "rna_seq.phenotype_data.shape", "_____no_output_____" ], [ "rna_seq.to_soft('test', False)", "_____no_output_____" ], [ "cafs = pd.read_csv('data/CAFs.txt', sep='\\t')", "_____no_output_____" ], [ "sorted(cafs.cluster.unique())", "_____no_output_____" ], [ "expr = cafs", "_____no_output_____" ] ], [ [ "### The expression matrix\n716 cells has been sequenced, and the expression levels has been assessed for 558 genes. Arranging the cells as rows and genes as columns we obtain an *expression matrix*.", "_____no_output_____" ] ], [ [ "expr.shape", "_____no_output_____" ], [ "expr", "_____no_output_____" ] ], [ [ "Before going further, try to reflect for a moment how you would try to illuminate any pattern in this data, given what you already know.", "_____no_output_____" ], [ "#### Plot the expression matrix", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,8))\nplt.imshow(expr.values, cmap='Greens', vmax=4000, vmin=0)\nplt.title('Expression matrix')\nplt.ylabel('Cells')\nplt.xlabel('Genes')\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "#### Exercise 2. The data is very sparse (most entries are zero), can you quantify how sparse it is? (i.e. how many of the entries are 0) ", "_____no_output_____" ] ], [ [ "# %load solutions/ex4_2.py\nnp.count_nonzero(expr.values) / np.prod(expr.shape)\n\n# only 20% of the entries are non-zero.", "_____no_output_____" ], [ "print(\"\\n\" + \"Dimensions of input file: \" + str(expr.shape) + \"\\n\")\nprint(\"\\n\" + \"Last column corresponds to cluster assignments: \" + \"\\n\")\nprint(expr.iloc[0:4, (expr.shape[1]-4):expr.shape[1]])\nX = expr.values[:,0:(expr.shape[1]-1)]\nY = expr.values[:,expr.shape[1]-1] #cluster\nX = np.log(X + 1)", "\nDimensions of input file: (716, 558)\n\n\nLast column corresponds to cluster assignments: \n\n Zfp652os Zfp81 Zfp944 cluster\nSS2_15_0048_A3 0.0 0.0 0.0 1\nSS2_15_0048_A6 0.0 0.0 0.0 1\nSS2_15_0048_A5 0.0 0.0 0.0 1\nSS2_15_0048_A4 0.0 0.0 0.0 2\n" ], [ "cafs.dtypes.unique()", "_____no_output_____" ] ], [ [ "### Decomposing the signals\nNow that we have gained some basic understanding of the data, we see it is fit for machine learning. You have already seen a few techniques to reduce data dimensionality reduction. We start with PCA", "_____no_output_____" ] ], [ [ "from sklearn.decomposition import PCA", "_____no_output_____" ], [ "#from matplotlib import cm\n#dir(cm) # available colors", "_____no_output_____" ] ], [ [ "#### Exercise 3. Perform PCA on the expression data and visualize the results (with colors to represent the ground truth clusters)", "_____no_output_____" ] ], [ [ "# %load solutions/ex4_3.py\nmodel = PCA()\npca = model.fit_transform(X)\nplt.scatter(pca[:, 0], pca[:, 1], c = Y, cmap = 'rainbow', s = 1)\nplt.xlabel(\"PC1\", fontsize = 20); plt.ylabel(\"PC2\", fontsize = 20)", "_____no_output_____" ], [ "plt.plot(model.explained_variance_ratio_[:10])\nplt.xticks(range(10));plt.show()", "_____no_output_____" ] ], [ [ "PCA is completely unsupervised. Linear discriminant analysis (LDA) is often used for the same purpose as PCA (dimensionality reduction), but is strictly speaking not unsupervised.", "_____no_output_____" ] ], [ [ "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis", "_____no_output_____" ], [ "model = LinearDiscriminantAnalysis(n_components = 2, priors = None, shrinkage = 'auto', \n solver = 'eigen', store_covariance = False, tol = 0.0001)\nlda = model.fit_transform(X, Y)\nplt.scatter(lda[:, 0], lda[:, 1], c = Y, cmap = 'viridis', s = 1)\nplt.xlabel(\"LDA1\", fontsize = 20); plt.ylabel(\"LDA2\", fontsize = 20)\nfeature_importances = pd.DataFrame({'Gene':np.array(expr.columns)[:-1], \n 'Score':abs(model.coef_[0])})\nprint(feature_importances.sort_values('Score', ascending = False).head(20))", " Gene Score\n422 Rn45s 39.249111\n64 B2m 12.880958\n212 Fth1 8.174487\n287 Lars2 7.385672\n147 Cst3 6.857356\n315 Malat1 6.290840\n420 Rgs5 4.681367\n524 Timp1 4.486659\n131 Col3a1 3.229705\n130 Col1a2 3.062976\n537 Trmt61b 3.020343\n145 Crip1 2.991499\n322 Mgp 2.787057\n368 Mt1 2.673895\n400 Postn 2.401428\n534 Tpm1 2.258763\n248 Hspa5 2.133318\n241 Higd1b 2.094767\n220 Ggt1 2.003810\n122 Cldn4 1.951643\n" ] ], [ [ "The way to interpret the data above: we clearly see the data lay in three clusters, suggesting we have found 3 different separable expression-signatures. However, we also see one cluster is occupied by 2 clusters (the colors are imposed by the fact that we know the \"ground truth\", but unsupervised methods are generally used for data exploration in which we do not know of these things.", "_____no_output_____" ], [ "# Non-linear methods\n\n# t-SNE\n\nt-SNE is a very popular decomposition technique used in molecular biology, especially for visualization purposes. t-SNE does generally not cope well with high dimensionality, so it is common to first transform the data with PCA and then run this through t-SNE. Here we will do both with and without prereducing the dimensionality.", "_____no_output_____" ] ], [ [ "from sklearn.manifold import TSNE", "_____no_output_____" ], [ "model = TSNE(learning_rate = 10, n_components = 2, random_state = 123, perplexity = 30)\ntsne = model.fit_transform(X)", "_____no_output_____" ], [ "plt.scatter(tsne[:, 0], tsne[:, 1], c = Y, cmap = 'rainbow', s = 2, marker='x')\nplt.title('tSNE', fontsize = 20)\nplt.xlabel(\"tSNE1\", fontsize = 20)\nplt.ylabel(\"tSNE2\", fontsize = 20)", "_____no_output_____" ] ], [ [ "#### Exercise 4. Reduce the data first with PCA to 30 principal components, then rerun the tSNE on this transformed data. ", "_____no_output_____" ] ], [ [ "# %load solutions/ex4_4.py\nX_reduced = PCA(n_components = 30).fit_transform(X)\nmodel = TSNE(learning_rate = 10, n_components = 2, random_state = 123, perplexity = 30)\ntsne = model.fit_transform(X_reduced)\n\nplt.scatter(tsne[:, 0], tsne[:, 1], c = Y, cmap = 'rainbow', s = 2, marker='x')\nplt.title('tSNE on PCA', fontsize = 20)\nplt.xlabel(\"tSNE1\", fontsize = 20)\nplt.ylabel(\"tSNE2\", fontsize = 20)", "_____no_output_____" ] ], [ [ "While it can be hard to discern the performance boost of prereduction, we can certainly see that t-SNE performs better than a linear method like PCA. However, non-linearity is no guarantee of success itself. For instance Isomap does not do well with this data.", "_____no_output_____" ] ], [ [ "from sklearn.manifold import Isomap", "_____no_output_____" ], [ "model = Isomap()\nisomap = model.fit_transform(X)\nplt.scatter(isomap[:, 0], isomap[:, 1], c = Y, cmap = 'viridis', s = 1)\nplt.title('ISOMAP')\n#plt.colorbar()\nplt.xlabel(\"ISO1\")\nplt.ylabel(\"ISO2\")", "_____no_output_____" ] ], [ [ "We should not throw Isomap out the window yet, like most algorithm, there is no one-size-fits-all. Isomap is well suited for tasks without clear clusters, but continuous change is present. ", "_____no_output_____" ], [ "# UMAP\nA more recent alternative to t-SNE is [UMAP](https://arxiv.org/abs/1802.03426), which also produces high quality visualizations with good separation, and scales better than t-sne with large datasets.", "_____no_output_____" ] ], [ [ "from umap import UMAP", "_____no_output_____" ], [ "print(\"Performing Uniform Manifold Approximation and Projection (UMAP) ...\")\n#model = UMAP(n_neighbors = 30, min_dist = 0.3, n_components = 2)\nmodel = UMAP()\numap = model.fit_transform(X) # or X_reduced", "Performing Uniform Manifold Approximation and Projection (UMAP) ...\n" ], [ "plt.scatter(umap[:, 0], umap[:, 1], c = Y, cmap = 'viridis', s = 1)\nplt.title('UMAP')\n#plt.colorbar()\nplt.xlabel(\"UMAP1\")\nplt.ylabel(\"UMAP2\")", "_____no_output_____" ] ], [ [ "#### Conclusion\nIn summary, when doing data exploration of gene expression (and other biomedical data), non-linear methods are preferred to linear ones.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4a6b0a69a9e2e6b801728f73345c305231f3026c
897,965
ipynb
Jupyter Notebook
Running - HPMC - Selecting move sizes.ipynb
jennyfothergill/hoomd-examples
0e21f63a06a3ef6f69eb565799f5ff5a218bbb36
[ "BSD-3-Clause" ]
12
2016-12-05T14:21:32.000Z
2022-03-30T17:38:53.000Z
Running - HPMC - Selecting move sizes.ipynb
jennyfothergill/hoomd-examples
0e21f63a06a3ef6f69eb565799f5ff5a218bbb36
[ "BSD-3-Clause" ]
1
2017-03-13T08:11:20.000Z
2017-04-04T00:22:05.000Z
Running - HPMC - Selecting move sizes.ipynb
jennyfothergill/hoomd-examples
0e21f63a06a3ef6f69eb565799f5ff5a218bbb36
[ "BSD-3-Clause" ]
10
2017-03-21T13:54:42.000Z
2021-09-17T12:53:23.000Z
3,453.711538
796,506
0.926978
[ [ [ "import hoomd\nimport hoomd.hpmc\nimport ex_render\nimport math\nfrom matplotlib import pyplot\nimport numpy\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Selecting move sizes\n\nHPMC allows you to set the translation and rotation move sizes. Set the move size too small and almost all trial moves are accepted, but it takes many time steps to move the whole system an appreciable amount. Set the move size too large and individual moves will advance the system significantly, but most of the trial moves are rejected.\n\nTo find the true optimal move size, you need to define the slowest evolving order parameter in the system. Then perform simulations at many move sizes and find the one with where that order parameter has the fastest decorrelation time.\n\n## Acceptance rule of thumb\n\nIn a wide range of systems, the optimal move size is one where the move acceptance ratio is 20%. This rule applies in moderately dense to dense system configurations. HPMC can auto-tune the move size to meet a given acceptance ratio. To demonstrate, here is the hard square tutorial script:", "_____no_output_____" ] ], [ [ "hoomd.context.initialize('--mode=cpu');\nsystem = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=1.2), n=10);\nmc = hoomd.hpmc.integrate.convex_polygon(d=0.1, a=0.1, seed=1);\nsquare_verts = [[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]];\nmc.shape_param.set('A', vertices=square_verts);\n\nlog1 = hoomd.analyze.log(filename=\"log-output.log\",\n quantities=['hpmc_sweep',\n 'hpmc_translate_acceptance',\n 'hpmc_rotate_acceptance',\n 'hpmc_d',\n 'hpmc_a',\n 'hpmc_move_ratio',\n 'hpmc_overlap_count'],\n period=10,\n overwrite=True);", "HOOMD-blue v2.1.8 CUDA (7.5) DOUBLE HPMC_MIXED MPI SSE SSE2 SSE3 \nCompiled: 07/21/2017\nCopyright 2009-2016 The Regents of the University of Michigan.\n-----\nYou are using HOOMD-blue. Please cite the following:\n* J A Anderson, C D Lorenz, and A Travesset. \"General purpose molecular dynamics\n simulations fully implemented on graphics processing units\", Journal of\n Computational Physics 227 (2008) 5342--5359\n* J Glaser, T D Nguyen, J A Anderson, P Liu, F Spiga, J A Millan, D C Morse, and\n S C Glotzer. \"Strong scaling of general-purpose molecular dynamics simulations\n on GPUs\", Computer Physics Communications 192 (2015) 97--107\n-----\n-----\nYou are using HPMC. Please cite the following:\n* J A Anderson, M E Irrgang, and S C Glotzer. \"Scalable Metropolis Monte Carlo\n for simulation of hard shapes\", Computer Physics Communications 204 (2016) 21\n --30\n-----\nHOOMD-blue is running on the CPU\nnotice(2): Group \"all\" created containing 100 particles\n" ] ], [ [ "Activate the tuner and tell it to tune both the **d** and **a** moves. \n\nYou can restrict it to only tune one of the move types and provide a range of move sizes the tuner is allowed to choose from. This example sets a maximum translation move size of half the particle width, and a maximum rotation move size that rotates the square all the way to the next symmetric configuration.", "_____no_output_____" ] ], [ [ "tuner = hoomd.hpmc.util.tune(obj=mc, tunables=['d', 'a'], max_val=[0.5, 2*math.pi/4], target=0.2);", "_____no_output_____" ] ], [ [ "Update the tuner between short runs. It will examine the acceptance ratio and adjust the move sizes to meet the target acceptance ratio.", "_____no_output_____" ] ], [ [ "for i in range(20):\n hoomd.run(100, quiet=True);\n tuner.update();", "_____no_output_____" ] ], [ [ "In this example, the acceptance ratios converges after only 10 steps of the tuner.", "_____no_output_____" ] ], [ [ "data = numpy.genfromtxt(fname='log-output.log', skip_header=True);\npyplot.figure(figsize=(4,2.2), dpi=140);\npyplot.plot(data[:,0], data[:,2], label='translate acceptance');\npyplot.plot(data[:,0], data[:,4], label='d');\npyplot.xlabel('time step');\npyplot.ylabel('acceptance / move size');\npyplot.legend();\n\npyplot.figure(figsize=(4,2.2), dpi=140);\npyplot.plot(data[:,0], data[:,3], label='rotate acceptance');\npyplot.plot(data[:,0], data[:,5], label='a');\npyplot.xlabel('time step');\npyplot.ylabel('acceptance / move size');\npyplot.legend(loc='right');", "_____no_output_____" ] ], [ [ "## Sampling equilibrium states\n\nStrictly speaking, changing the move size with an tuner **VIOLATES DETAILED BALANCE**. When you make ensemble averages, do not include the period of the simulation where you executed the tuner. This example shows how to make the equilibrium run as a second stage of the script.", "_____no_output_____" ] ], [ [ "d = hoomd.dump.gsd(\"trajectory-square.gsd\", period=1000, group=hoomd.group.all(), overwrite=True);\nhoomd.run(10000);", "** starting run **\nTime 00:00:03 | Step 12000 / 12000 | TPS 3712.39 | ETA 00:00:00\nAverage TPS: 3698.01\n---------\nnotice(2): -- HPMC stats:\nnotice(2): Average translate acceptance: 0.192971\nnotice(2): Average rotate acceptance: 0.196917\nnotice(2): Trial moves per second: 1.47079e+06\nnotice(2): Overlap checks per second: 2.18958e+07\nnotice(2): Overlap checks per trial move: 14.8871\nnotice(2): Number of overlap errors: 0\n** run complete **\n" ] ], [ [ "Examine how the system configuration evolves over time. [ex_render](ex_render.py) is a helper script that builds animated gifs from trajectory files and system snapshots. It is part of the [hoomd-examples](https://github.com/glotzerlab/hoomd-examples) repository and designed only to render these examples.", "_____no_output_____" ] ], [ [ "ex_render.display_movie(lambda x: ex_render.render_polygon_frame(x, square_verts), 'trajectory-square.gsd')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6b0df130e8690c284735be7bb462f519e059ea
440,100
ipynb
Jupyter Notebook
notebooks/code-graveyard/6.2.1.2-modeling-absa-bert.ipynb
KoredeAkande/nigerian_isp_sentiment_analysis
9583c7dd671494fced48b25610e961a40e8c0603
[ "MIT" ]
null
null
null
notebooks/code-graveyard/6.2.1.2-modeling-absa-bert.ipynb
KoredeAkande/nigerian_isp_sentiment_analysis
9583c7dd671494fced48b25610e961a40e8c0603
[ "MIT" ]
null
null
null
notebooks/code-graveyard/6.2.1.2-modeling-absa-bert.ipynb
KoredeAkande/nigerian_isp_sentiment_analysis
9583c7dd671494fced48b25610e961a40e8c0603
[ "MIT" ]
null
null
null
150.512996
164,368
0.849646
[ [ [ "# Modeling: Aspect-Based Sentiment Analysis\n## BerTweet ", "_____no_output_____" ], [ "Oversampling as a solution to the imabalance still wasn't enough to raise the model's performance significantly. This was especially the case because the validation and test sets were very small and still imbalanced (plus, we can't resample these!). Thus, my next step is to gather more data by sampling tweets I had not previously annotated, annotating this new sample and ONLY retaining tweets that have aspects – as we have enough tweets without aspects already!", "_____no_output_____" ], [ "## 1. Library Importation", "_____no_output_____" ] ], [ [ "#Data manipulation and visualization packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n#Modeling packages\nimport torch\nfrom tqdm.auto import tqdm\nimport pytorch_lightning as pl\nfrom torchmetrics import Accuracy, F1, AUROC\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nfrom transformers import AutoTokenizer, AutoModel\n\n#Model evaluation and metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, multilabel_confusion_matrix\n\n#Set seed\nRANDOM_SEED = 123\npl.seed_everything(RANDOM_SEED)", "Global seed set to 123\n" ] ], [ [ "## 2. Loading the data", "_____no_output_____" ] ], [ [ "#Load the dataframe with punctuated tweets. We will replace the tweets in the dataset below with this\ndf_with_punct = pd.read_csv(\"../data/processed/sample_encoded_and_cleaned.csv\")\nprint(df_with_punct.shape)\ndf_with_punct.head()", "(377, 6)\n" ], [ "#Load the corresponding aspect annotated data with the text cleaned with punctuation removal\ndf = pd.read_csv(\"../data/processed/absa_labelled.csv\")\nprint(df.shape)\ndf.head()", "(377, 3)\n" ], [ "#Create new dataset with the punctuated tweets and the aspects annotated\naspect_df = pd.concat([df_with_punct[['Text']],\n df[['Aspects','Sentiment']]],axis=1)\naspect_df.head()", "_____no_output_____" ], [ "#Save the new dataframe for future reference\naspect_df.to_csv(\"../data/processed/absa_text_with_punct.csv\", index=False)", "_____no_output_____" ] ], [ [ "## 3. Reformat the data", "_____no_output_____" ], [ "### a. Convert lists from string format to list", "_____no_output_____" ] ], [ [ "#Convert Aspects column\naspect_df.Aspects = aspect_df.Aspects.apply(lambda x: eval(x) if (pd.notnull(x)) else x)\n\n#Convert Sentiment column\naspect_df.Sentiment = aspect_df.Sentiment.apply(lambda x: eval(x) if (pd.notnull(x)) else x)\n\n#Make a copy of the dataframe\nprocessed_aspect_df = aspect_df.copy()\n\n#Quick preview\nprocessed_aspect_df.head()", "_____no_output_____" ] ], [ [ "### b. Fill NaNs with [None]", "_____no_output_____" ] ], [ [ "processed_aspect_df = processed_aspect_df.apply(lambda s: s.fillna({i: [None] for i in df.index}))\nprocessed_aspect_df.head()", "_____no_output_____" ] ], [ [ "### b. Binarizing the aspects", "_____no_output_____" ] ], [ [ "#List aspects determined during the annotation phase\n#Note: This might not be exhaustive! But it should cover most cases. It is also subjective!\n#Also using synonyms of these words will likely yield different results\nASPECTS = ['cost','speed','reliability','coverage', 'customer service', 'trustworthiness']\n\n#Iterate through all the aspects and if the aspect is not in the tweet, record 0 else record 1\nfor aspect in ASPECTS[::-1]:\n processed_aspect_df.insert(1,aspect,processed_aspect_df.Aspects.apply(lambda x: 1 if aspect in x else 0))\n \n#Drop the Aspects column\nprocessed_aspect_df.drop(columns=['Aspects'] , inplace=True)\n\n#Quick preview\nprocessed_aspect_df.head()", "_____no_output_____" ], [ "#Save the binarized dataframe\nprocessed_aspect_df.to_csv(\"../data/processed/absa_binarized.csv\",index=False)", "_____no_output_____" ] ], [ [ "## 4. Quick EDA", "_____no_output_____" ] ], [ [ "with plt.style.context(['notebook','no-latex','grid']):\n plt.figure(figsize = (12,7), dpi=300)\n processed_aspect_df[ASPECTS].sum().sort_values()\\\n .plot(kind = \"barh\", color = 'cornflowerblue')\\\n .set(xlabel = \"Number of times detected in tweets\",\n ylabel = \"Aspect category\")\n \n plt.show()", "_____no_output_____" ] ], [ [ "From the above, we see that the data is quite imbalanced. Trustworthiness is especially important to point out. As there are very few tweets tagged with trustworthiness, we might need to oversample the label.", "_____no_output_____" ], [ "## 5. Split data into training, validation and test set", "_____no_output_____" ], [ "### a. Split to training and test set", "_____no_output_____" ] ], [ [ "#Split the data 80:20 (training and test)\ntrain_df, test_df = train_test_split(processed_aspect_df,test_size=0.20, \n stratify = processed_aspect_df[['trustworthiness','customer service']])\nprint(f\"Train size: {train_df.shape}\", f\"Test size: {test_df.shape}\")", "Train size: (301, 8) Test size: (76, 8)\n" ] ], [ [ "### b. Split training set to training and validation set", "_____no_output_____" ] ], [ [ "train_df, val_df = train_test_split(train_df,test_size=0.25,\n stratify = train_df[['trustworthiness','customer service']])\nprint(f\"Train size: {train_df.shape}\", f\"Validation size: {val_df.shape}\")", "Train size: (225, 8) Validation size: (76, 8)\n" ] ], [ [ "### c. Check to see that all the sets have all the aspect categories", "_____no_output_____" ] ], [ [ "df_sets = [('Training',train_df),('Validation', val_df),('Test',test_df)]\n \nplt.figure(figsize = (12,6), dpi=300)\nfor idx,df in enumerate(df_sets):\n \n plt.subplot(1,3,idx+1)\n plt.tight_layout()\n\n with plt.style.context(['notebook','no-latex','grid']):\n \n plt.title(df[0])\n df[1][ASPECTS].sum().plot(kind = \"bar\", color = 'cornflowerblue')\n plt.xticks(rotation=90)\n \n if idx == 0:\n plt.ylabel('Number of tags in the tweets', fontsize=16)\n \n \nplt.show()\n ", "_____no_output_____" ] ], [ [ "## 6. Modeling - Aspect Extraction", "_____no_output_____" ], [ "### a. Load the tokenizer and the BERT model", "_____no_output_____" ] ], [ [ "from transformers import BertTokenizerFast as BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup\nBERT_MODEL_NAME = 'bert-base-cased'", "_____no_output_____" ], [ "#Load BerTweet tokenizer\nTOKENIZER = BertTokenizer.from_pretrained(BERT_MODEL_NAME)\n\n#Load the BERTweet model\nBERT_MODEL = BertModel.from_pretrained(BERT_MODEL_NAME, from_tf = True, return_dict=True)", "2021-11-26 09:34:01.699303: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\nAll TF 2.0 model weights were used when initializing BertModel.\n\nAll the weights of BertModel were initialized from the TF 2.0 model.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use BertModel for predictions without further training.\n" ] ], [ [ "### b. Prepare the training, validation, and test sets", "_____no_output_____" ] ], [ [ "TRAIN_BATCH_SIZE = 16\nTEST_BATCH_SIZE = 8", "_____no_output_____" ], [ "class Generate_PyTorch_Dataset(torch.utils.data.Dataset):\n\n def __init__(self, dataframe, tokenizer):\n self.dataframe = dataframe\n self.tokenizer = tokenizer\n self.max_len = tokenizer.model_max_length\n \n def __len__(self):\n return len(self.dataframe)\n \n def __getitem__(self, idx):\n \n #Get each row of the dataframe\n data_row = self.dataframe.iloc[idx]\n \n #Get the tweet\n text = str(data_row.Text)\n \n #Get the aspect labels\n labels = data_row[ASPECTS]\n \n #Encode the tweet\n encoded_text = self.tokenizer.encode_plus(\n text,\n add_special_tokens = True,\n truncation = True,\n return_attention_mask = True,\n return_token_type_ids = False,\n return_length = True,\n max_length = self.max_len,\n return_tensors = 'pt',\n padding = \"max_length\"\n )\n \n \n return {\n 'input_ids': encoded_text['input_ids'].flatten(),\n 'attention_mask': encoded_text['attention_mask'].flatten(),\n 'labels': torch.tensor(labels, dtype=torch.float)\n }\n ", "_____no_output_____" ], [ "class PyTorchDataModule(pl.LightningDataModule):\n \n def __init__(self, train_df, val_df, test_df, tokenizer, train_batch_size=16, test_batch_size=8):\n \n super().__init__()\n self.train_df = train_df\n self.val_df = train_df\n self.test_df = test_df\n self.tokenizer = tokenizer\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n self.max_len = self.tokenizer.model_max_length\n \n def setup(self):\n \n \"\"\"\n Setup the training, validation and test sets by converting them to Pytorch datasets\n \"\"\"\n \n self.train_dataset = Generate_PyTorch_Dataset(self.train_df,self.tokenizer)\n self.val_dataset = Generate_PyTorch_Dataset(self.val_df,self.tokenizer)\n self.test_dataset = Generate_PyTorch_Dataset(self.test_df,self.tokenizer)\n \n \n def train_dataloader(self):\n \n \"\"\"\n Training set dataloader\n \"\"\"\n \n return DataLoader(self.train_dataset,\n batch_size = self.train_batch_size,\n shuffle = True, \n num_workers = 2)\n \n def val_dataloader(self):\n \n \"\"\"\n Validation set dataloader\n \"\"\"\n \n return DataLoader(self.val_dataset, batch_size = self.test_batch_size, num_workers=2)\n \n def test_dataloader(self):\n \n \"\"\"\n Test set dataloader\n \"\"\"\n \n return DataLoader(self.test_dataset, batch_size = self.test_batch_size, num_workers=2)\n ", "_____no_output_____" ], [ "#Instantiate and set up the data_module\ndata_module = PyTorchDataModule(train_df,val_df,test_df,TOKENIZER, TRAIN_BATCH_SIZE, TEST_BATCH_SIZE)\ndata_module.setup()", "_____no_output_____" ] ], [ [ "### c. Define the model", "_____no_output_____" ] ], [ [ "class ISP_TweetAspectClassifier(pl.LightningModule):\n \n #Set the aspect classifier\n def __init__(self, n_classes=6, n_training_steps=None, n_warmup_steps=None, lr=2e-5):\n super().__init__()\n self.bert = BERT_MODEL\n self.classifier = torch.nn.Linear(self.bert.config.hidden_size, n_classes)\n self.n_training_steps = n_training_steps\n self.n_warmup_steps = n_warmup_steps\n self.lr = lr\n self.criterion = torch.nn.BCELoss()\n \n def forward(self, input_ids, attention_mask, labels = None):\n output = self.bert(input_ids, attention_mask=attention_mask)\n output = self.classifier(output.pooler_output)\n output = torch.sigmoid(output)\n \n loss = 0\n \n if labels is not None:\n loss = self.criterion(output, labels)\n \n return loss, output\n \n def training_step(self, batch, batch_idx):\n \n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n loss, outputs = self(input_ids, attention_mask, labels)\n self.log(\"train_loss\", loss, prog_bar=True, logger=True)\n \n return {\"loss\": loss, \"predictions\": outputs.detach(), \"labels\": labels}\n \n def validation_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n loss, outputs = self(input_ids, attention_mask,labels)\n self.log(\"val_loss\", loss, prog_bar=True, logger=True)\n \n return loss\n \n \n def test_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n loss, outputs = self(input_ids, attention_mask, labels)\n self.log(\"test_loss\", loss, prog_bar=True, logger=True)\n \n return loss\n \n \n def training_epoch_end(self, outputs):\n \n #List to store the true labels and the model's predictions\n labels = []\n predictions = []\n \n #Iterate through all the outputs and get the true vs. predicted label\n for output in outputs:\n \n for label in output[\"labels\"].detach().cpu():\n labels.append(label)\n for pred in output[\"predictions\"].detach().cpu():\n predictions.append(pred)\n \n #Stack the tensors\n labels = torch.stack(labels).int()\n predictions = torch.stack(predictions)\n \n #Record the AUROC for each aspect after each training epoch\n for idx, name in enumerate(ASPECTS):\n metric = AUROC()\n class_roc_auc = metric(predictions[:,idx], labels[:,idx])\n self.logger.experiment.add_scalar(f\"{name}_roc_auc/Train\", class_roc_auc, self.current_epoch)\n \n \n def configure_optimizers(self):\n \n optimizer = AdamW(self.parameters(), lr = self.lr)\n \n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.n_warmup_steps,\n num_training_steps=self.n_training_steps)\n \n \n return {'optimizer': optimizer, 'lr_scheduler':{'scheduler':scheduler,'interval':'step'}}\n ", "_____no_output_____" ] ], [ [ "class ISP_TweetAspectClassifier(pl.LightningModule):\n \n #Set the aspect classifier\n def __init__(self, n_classes=6, n_training_steps=None, n_warmup_steps=None, lr=2e-5):\n super().__init__()\n self.bertweet = BERTWEET_MODEL\n self.classifier = torch.nn.Linear(self.bertweet.config.hidden_size, n_classes)\n self.n_training_steps = n_training_steps\n self.n_warmup_steps = n_warmup_steps\n self.lr = lr\n self.criterion = torch.nn.BCEWithLogitsLoss()\n \n def forward(self, input_ids, attention_mask):\n output = self.bertweet(input_ids, attention_mask=attention_mask)\n output = self.classifier(output.pooler_output)\n output = torch.sigmoid(output)\n \n return output\n \n def training_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n outputs = self(input_ids, attention_mask)\n loss = self.criterion(outputs, labels)\n self.log(\"train_loss\", loss, prog_bar=True, logger=True)\n \n return {\"loss\": loss, \"predictions\": outputs.detach(), \"labels\": labels}\n \n def validation_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n outputs = self(input_ids, attention_mask)\n loss = self.criterion(outputs, labels)\n self.log(\"val_loss\", loss, prog_bar=True, logger=True)\n \n return loss\n \n \n def test_step(self, batch, batch_idx):\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n labels = batch[\"labels\"]\n \n outputs = self(input_ids, attention_mask)\n loss = self.criterion(outputs, labels)\n self.log(\"test_loss\", loss, prog_bar=True, logger=True)\n \n return loss\n \n \n def training_epoch_end(self, outputs):\n \n #List to store the true labels and the model's predictions\n labels = []\n predictions = []\n \n #Iterate through all the outputs and get the true vs. predicted label\n for output in outputs:\n \n for label in output[\"labels\"].detach().cpu():\n labels.append(label)\n for pred in output[\"predictions\"].detach().cpu():\n predictions.append(pred)\n \n #Stack the tensors\n labels = torch.stack(labels).int()\n predictions = torch.stack(predictions)\n \n #Record the AUROC for each aspect after each training epoch\n for idx, name in enumerate(ASPECTS):\n metric = AUROC()\n class_roc_auc = metric(predictions[:,idx], labels[:,idx])\n self.logger.experiment.add_scalar(f\"{name}_roc_auc/Train\", class_roc_auc, self.current_epoch)\n \n \n def configure_optimizers(self):\n \n optimizer = AdamW(self.parameters(), lr = self.lr)\n \n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.n_warmup_steps,\n num_training_steps=self.n_training_steps)\n \n \n return {'optimizer': optimizer, 'lr_scheduler':{'scheduler':scheduler,'interval':'step'}}\n ", "_____no_output_____" ] ], [ [ "#Define variables\nN_EPOCHS = 20\nSTEPS_PER_EPOCH =len(train_df)//TRAIN_BATCH_SIZE\nTOTAL_TRAIN_STEPS = STEPS_PER_EPOCH * N_EPOCHS\nN_WARMUP_STEPS = TOTAL_TRAIN_STEPS // 4\n\n#Instantiate the classifier model\nmodel = ISP_TweetAspectClassifier(n_training_steps = TOTAL_TRAIN_STEPS,\n n_warmup_steps = N_WARMUP_STEPS)", "_____no_output_____" ] ], [ [ "### d. Training", "_____no_output_____" ] ], [ [ "#Setup callback to perform saves during training\ncheckpoint_callback = ModelCheckpoint(\n dirpath = \"../models/absa-aspect-extraction/bert\",\n filename = \"ae-{epoch:02d}-{val_loss:.2f}\",\n save_top_k = 3, #Save the top 3 models\n verbose = True,\n monitor = \"val_loss\",\n mode = \"min\" #Minimize val loss\n)\n\n#Log progress in Tensorboard\nlogger = TensorBoardLogger(\"../models/absa-aspect-extraction/bert/lightning_logs\", name = \"isp-tweets\")", "_____no_output_____" ], [ "trainer = pl.Trainer(\n #gpus = 1,\n logger = logger,\n log_every_n_steps = 15,\n callbacks = [checkpoint_callback],\n max_epochs = N_EPOCHS,\n progress_bar_refresh_rate = 30\n)\n\ntrainer.fit(model, data_module)", "GPU available: False, used: False\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\n\n | Name | Type | Params\n-----------------------------------------\n0 | bert | BertModel | 108 M \n1 | classifier | Linear | 4.6 K \n2 | criterion | BCELoss | 0 \n-----------------------------------------\n108 M Trainable params\n0 Non-trainable params\n108 M Total params\n433.260 Total estimated model params size (MB)\n" ] ], [ [ "### e. Model Evaluation", "_____no_output_____" ] ], [ [ "#Evaluate the model's performance on the test dataset\ntrainer.test(model,data_module)", "/Users/koredeakande/opt/anaconda3/envs/capstone/lib/python3.7/site-packages/pytorch_lightning/trainer/data_loading.py:111: UserWarning: The dataloader, test_dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 8 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n f\"The dataloader, {name}, does not have many workers which may be a bottleneck.\"\n" ], [ "# Visualize the logs using tensorboard.\n%load_ext tensorboard\n%tensorboard --logdir ../models/absa-aspect-extraction/bert/lightning_logs/\n#%reload_ext tensorboard", "The tensorboard extension is already loaded. To reload it, use:\n %reload_ext tensorboard\n" ] ], [ [ "#### (i) Evaluate model performance on a generated example", "_____no_output_____" ] ], [ [ "trainer.checkpoint_callback.best_model_path", "_____no_output_____" ], [ "#Load the best model based on validation loss\ntrained_model = ISP_TweetAspectClassifier.load_from_checkpoint(\n trainer.checkpoint_callback.best_model_path,\n n_classes=len(ASPECTS)\n)\n\ntrained_model", "_____no_output_____" ], [ "#Put model into evaluation mode\ntrained_model.eval()\ntrained_model.freeze()", "_____no_output_____" ], [ "test_comment = \"The internet is so slow and it's so expensive\"\n\nencoding = TOKENIZER.encode_plus(\n test_comment,\n add_special_tokens=True,\n max_length=TOKENIZER.model_max_length,\n return_token_type_ids=False,\n padding=\"max_length\",\n return_attention_mask=True,\n return_tensors='pt',\n)\n\n_, test_prediction = trained_model(encoding[\"input_ids\"], encoding[\"attention_mask\"])\ntest_prediction = test_prediction.flatten().numpy()\nfor label, prediction in zip(ASPECTS, test_prediction):\n print(f\"{label}: {prediction}\")", "cost: 0.1138223260641098\nspeed: 0.18508286774158478\nreliability: 0.1815706491470337\ncoverage: 0.17521558701992035\ncustomer service: 0.16000589728355408\ntrustworthiness: 0.056724175810813904\n" ] ], [ [ "Doesn't seem to do a good job distinguishing the different aspects for this case.", "_____no_output_____" ], [ "#### (ii) Evaluate model performance on the validation set", "_____no_output_____" ] ], [ [ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ntrained_model = trained_model.to(device)\n\n#Prepare the validation set\nval_dataset = Generate_PyTorch_Dataset(\n val_df,\n TOKENIZER,\n)\n\n#Lists to store the model predictions and the true labels\nmodel_preds = []\ntrue_labels = []\n\nfor item in tqdm(val_dataset):\n _, pred = trained_model(\n item[\"input_ids\"].unsqueeze(dim=0).to(device),\n item[\"attention_mask\"].unsqueeze(dim=0).to(device)\n )\n model_preds.append(pred.flatten())\n true_labels.append(item[\"labels\"].int())\n\nmodel_preds = torch.stack(model_preds).detach().cpu()\ntrue_labels = torch.stack(true_labels).detach().cpu()", "_____no_output_____" ], [ "#Compute the accuracy on the validation set\nacc_metric = Accuracy()\nacc_metric(model_preds, true_labels)", "_____no_output_____" ] ], [ [ "We get a fairly high accuracy on the validation set. However, we note that the dataset is quite imbalanced. So it would be important to also check the model's performance on the different aspects (remember: trustworthiness had very few samples)", "_____no_output_____" ] ], [ [ "auroc_metric = AUROC(pos_label=1)\nprint(\"AUROC per tag\")\nfor i, name in enumerate(ASPECTS):\n tag_auroc = auroc_metric(model_preds[:, i], true_labels[:, i])\n print(f\"{name}: {tag_auroc}\")", "AUROC per tag\ncost: 0.7031509280204773\nspeed: 0.6428571343421936\nreliability: 0.7494823336601257\ncoverage: 0.7260274291038513\ncustomer service: 0.8985915780067444\ntrustworthiness: 0.13333332538604736\n" ], [ "true_labels", "_____no_output_____" ], [ "model_preds", "_____no_output_____" ] ], [ [ "### Classification Report", "_____no_output_____" ] ], [ [ "y_pred = model_preds.numpy()\ny_true = true_labels.numpy()\n\ny_pred = np.where(y_pred > 0.2, 1, 0)\n\nprint(classification_report(\n y_true,\n y_pred,\n target_names=ASPECTS,\n zero_division=0\n))", " precision recall f1-score support\n\n cost 0.00 0.00 0.00 9\n speed 0.17 0.33 0.22 6\n reliability 0.50 0.43 0.46 7\n coverage 0.00 0.00 0.00 3\ncustomer service 0.50 0.20 0.29 5\n trustworthiness 0.00 0.00 0.00 1\n\n micro avg 0.26 0.19 0.22 31\n macro avg 0.19 0.16 0.16 31\n weighted avg 0.23 0.19 0.19 31\n samples avg 0.03 0.05 0.03 31\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a6b1368ade4274bbcf49bf6708c1ba78c58c477
13,617
ipynb
Jupyter Notebook
test/Models/energybalance_pkg/test/cpp/Priestlytaylor.ipynb
cyrillemidingoyi/PyCropML
b866cc17374424379142d9162af985c1f87c74b6
[ "MIT" ]
5
2020-06-21T18:58:04.000Z
2022-01-29T21:32:28.000Z
test/Models/energybalance_pkg/test/cpp/Priestlytaylor.ipynb
cyrillemidingoyi/PyCropML
b866cc17374424379142d9162af985c1f87c74b6
[ "MIT" ]
27
2018-12-04T15:35:44.000Z
2022-03-11T08:25:03.000Z
test/Models/energybalance_pkg/test/cpp/Priestlytaylor.ipynb
cyrillemidingoyi/PyCropML
b866cc17374424379142d9162af985c1f87c74b6
[ "MIT" ]
7
2019-04-20T02:25:22.000Z
2021-11-04T07:52:35.000Z
53.610236
198
0.598002
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a6b17436153398b9183d02bad38407e45f9b9f8
38,433
ipynb
Jupyter Notebook
nba-three-pointers-map.ipynb
alrocar/carto-nba
d30f189d2415533bc8c78578b29f7c789d87ab00
[ "Apache-2.0" ]
null
null
null
nba-three-pointers-map.ipynb
alrocar/carto-nba
d30f189d2415533bc8c78578b29f7c789d87ab00
[ "Apache-2.0" ]
null
null
null
nba-three-pointers-map.ipynb
alrocar/carto-nba
d30f189d2415533bc8c78578b29f7c789d87ab00
[ "Apache-2.0" ]
null
null
null
39.378074
347
0.411391
[ [ [ "## VISUALIZING YOUR FAVOURITE NBA PLAYER 3 POINTERS GRAPH\n\nTools we are going to use:\n\n- The NBA API to get the data from any NBA player\n- CARTOframes to upload the data seamlessly to CARTO\n- The CARTO Python SDK to analyze and create a 3-pointers map\n- carto-print to generate a high resolution ready-to-print image\n", "_____no_output_____" ], [ "#### Let's start by importing the required modules", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport time\n\nfrom carto.auth import APIKeyAuthClient\nfrom carto.maps import NamedMapManager\nfrom carto.print import Printer\n\nfrom nba_api.stats.static import players\nfrom nba_api.stats.static import teams\nfrom nba_api.stats.endpoints import shotchartdetail\n\nimport pandas as pd\nfrom cartoframes.auth import Credentials, set_default_credentials\nfrom cartoframes import to_carto\nfrom cartoframes.data.clients import SQLClient\nimport geopandas as gpd", "_____no_output_____" ] ], [ [ "#### Time to set the CARTO credentials to use", "_____no_output_____" ] ], [ [ "CARTO_BASE_URL = os.environ['CARTO_API_URL']\nCARTO_BASE_URL = 'https://aromeu.carto.com/'", "_____no_output_____" ], [ "CARTO_API_KEY = os.environ['CARTO_API_KEY']\nCARTO_API_KEY = '424dec8b179567aace6ef7b229c9afa1d78d68e7'\nCARTO_USER_NAME = 'aromeu'", "_____no_output_____" ] ], [ [ "#### Set the player name and the teams he has played with", "_____no_output_____" ] ], [ [ "PLAYER_NAME = 'Stephen Curry'\nTEAMS_NAME = ['Golden State Warriors']", "_____no_output_____" ], [ "PLAYER_NAME = 'Russell Westbrook'\nTEAMS_NAME = ['Oklahoma City Thunder']", "_____no_output_____" ], [ "PLAYER_NAME = 'Damian Lillard'\nTEAMS_NAME = ['Portland Trail Blazers']", "_____no_output_____" ] ], [ [ "#### Yes, just 10 lines to get all their shoting data", "_____no_output_____" ] ], [ [ "data = []\nheaders = []\nplayer = players.find_players_by_full_name(PLAYER_NAME)\nplayer_id = player[0]['id']\n\nfor team_name in TEAMS_NAME:\n team = teams.find_teams_by_full_name(team_name)\n team_id = team[0]['id']\n shots = shotchartdetail.ShotChartDetail(player_id=player_id, team_id=team_id)\n headers = shots.shot_chart_detail.data['headers']\n data.extend(shots.shot_chart_detail.data['data'])", "_____no_output_____" ] ], [ [ "#### Let's go the data scientist path", "_____no_output_____" ] ], [ [ "#df = pd.DataFrame(data, columns=headers)\nPLAYER_NAME = 'stephen_curry'\ndf = pd.read_csv(f'{PLAYER_NAME}.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "#### And send the data to your CARTO account", "_____no_output_____" ] ], [ [ "dataset_name = '_'.join(PLAYER_NAME.split(' ')).lower()\ncreds = Credentials(base_url=CARTO_BASE_URL, api_key=CARTO_API_KEY)\nset_default_credentials(creds)", "_____no_output_____" ], [ "# 2019 shots\n# YEAR = '2019'\n# bool_series = df[\"GAME_DATE\"].str.startswith(YEAR, na = False) \n \n# displaying filtered dataframe \n# df = df[bool_series] ", "_____no_output_____" ], [ "gdf = gpd.GeoDataFrame(\n df, geometry=gpd.points_from_xy(df.LOC_X, df.LOC_Y))\nto_carto(gdf, dataset_name, if_exists='replace')", "Success! Data uploaded to table \"stephen_curry\" correctly\n" ] ], [ [ "#### Wait, shots locations are in pixels coordinates. Let's do a hacky trick and let's suppose we are using coordinates in meters", "_____no_output_____" ] ], [ [ "sql_client = SQLClient()\nsql_client.execute(\"UPDATE {} SET the_geom = st_transform(st_setsrid(st_geometryfromtext('POINT(' || ST_X(the_geom) || ' ' || ST_Y(the_geom) || ')'), 3857), 4326)\".format(dataset_name))", "_____no_output_____" ] ], [ [ "#### Let's now compose a 7 layers map. If you wonder how I get to imagine this, I used BUILDER + some PostGIS wizardry + a lot (I mean a lot) of trial/error for the styling ", "_____no_output_____" ] ], [ [ "def create_named_map(auth_client, dataset_name, map_name, factor):\n template = {\n \"version\": \"0.0.1\",\n \"name\": map_name,\n \"auth\": {\n \"method\": \"open\"\n },\n \"placeholders\": {},\n \"view\": {},\n \"layergroup\": {\n \"version\": \"1.0.1\",\n \"layers\": [\n {\n \"type\": \"plain\",\n \"options\": {\n \"color\": \"#2d2d2d\"\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer {\n polygon-fill: #2a2a2a;\n polygon-opacity: 0.9;\n }\n #layer::outline {\n line-width: 2 * %d;\n line-color: #4edce6;\n line-opacity: 1;\n }''' % (factor),\n \"sql\": '''SELECT 1 AS cartodb_id,\n the_geom,\n the_geom_webmercator\n FROM aromeu.basketball_court\n UNION\n SELECT 1 AS cartodb_id,\n the_geom,\n the_geom_webmercator\n FROM aromeu.basketball_court_markers'''\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer {\n line-width: 30 * %d;\n line-comp-op: screen;\n line-opacity: 0.08;\n [shot_distance >= 31] {\n line-color: #fff500;\n line-width: 0.3 * %d;\n }\n [shot_distance >= 22][shot_distance < 24] {\n line-color: #0d3781;\n line-opacity: 0.1;\n }\n\n [shot_distance >= 24][shot_distance < 26] {\n line-color: #681a87;\n line-opacity: 0.1;\n }\n\n [shot_distance >= 26][shot_distance < 28] {\n line-color: #8a1377;\n }\n\n [shot_distance >= 28][shot_distance < 31] {\n line-color: #ee29ac;\n }\n\n image-filters: agg-stack-blur(45 * %d, 45 * %d);\n }''' % (factor, factor, factor, factor),\n \"sql\": '''WITH a AS (\n SELECT\n *,\n st_transform(the_geom, 3857) as the_geom_webmercator,\n ST_Length(the_geom::geography) / 1000 AS length\n FROM (\n SELECT\n ST_MakeLine(\n the_geom,\n ST_SetSRID(\n ST_MakePoint(\n -1.53456990177195e-22,\n -3.17697838071347e-15\n ),\n 4326\n )\n ) AS the_geom,\n cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm\n FROM (SELECT * FROM {dataset} WHERE shot_distance >= 22 and shot_distance < 30 and shot_made_flag != 0) _line_analysis\n ) _cdb_analysis_line_to_single_point\n ) SELECT * FROM a'''.format(dataset=dataset_name)\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer {\n line-width: 1 * %d;\n line-comp-op: screen;\n line-opacity: 0.7;\n [shot_distance >= 31] {\n line-color: #fff500;\n }\n [shot_distance >= 22][shot_distance < 24] {\n line-color: #0d3781;\n }\n [shot_distance >= 24][shot_distance < 26] {\n line-color: #681a87;\n }\n\n [shot_distance >= 26][shot_distance < 28] {\n line-color: #8a1377;\n }\n\n [shot_distance >= 28][shot_distance < 31] {\n line-color: #ee29ac;\n }\n }''' % (factor),\n \"sql\": '''WITH a AS (\n SELECT\n *,\n st_transform(the_geom, 3857) as the_geom_webmercator,\n ST_Length(the_geom::geography) / 1000 AS length\n FROM (\n SELECT\n ST_MakeLine(\n the_geom,\n ST_SetSRID(\n ST_MakePoint(\n -1.53456990177195e-22,\n -3.17697838071347e-15\n ),\n 4326\n )\n ) AS the_geom,\n cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm\n FROM (SELECT * FROM {dataset} WHERE shot_distance >= 22 and shot_distance < 30 and shot_made_flag != 0) _line_analysis\n ) _cdb_analysis_line_to_single_point\n ) SELECT * FROM a'''.format(dataset=dataset_name)\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer {\n line-width: 6 * %d;\n line-comp-op: screen;\n line-opacity: 0.2;\n line-color: #fff500;\n image-filters: agg-stack-blur(18 * %d, 18 * %d);\n\n }''' % (factor, factor, factor),\n \"sql\": '''WITH a AS (\n SELECT\n *,\n st_transform(the_geom, 3857) as the_geom_webmercator,\n ST_Length(the_geom::geography) / 1000 AS length\n FROM (\n SELECT\n ST_MakeLine(\n the_geom,\n ST_SetSRID(\n ST_MakePoint(\n -1.53456990177195e-22,\n -3.17697838071347e-15\n ),\n 4326\n )\n ) AS the_geom,\n cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm\n FROM (SELECT * FROM {dataset} WHERE shot_distance >= 30 and shot_made_flag != 0) _line_analysis\n ) _cdb_analysis_line_to_single_point\n ), points AS\n ( SELECT cartodb_id , loc_x, loc_y, shot_distance,\n ST_StartPoint(ST_LineMerge(the_geom_webmercator)) AS p1 ,\n ST_EndPoint(ST_LineMerge(the_geom_webmercator)) AS p2\n FROM a AS q2),\n mid AS\n (SELECT *,\n ST_SetSRID(ST_MakePoint((ST_X(p2) - ST_X(p1))/2 + ST_X(p1), (ST_Y(p2) - ST_Y(p1))/3 + ST_Y(p1)), 3857) AS midpoint,\n PI()/2 - ST_Azimuth(p1, p2) AS angle,\n ST_Distance(p1, p2)/6 AS radius\n FROM points),\n third AS\n (SELECT *,\n ST_Translate(midpoint, sign(loc_x) *0.005 *sin(angle)*radius, 0 *cos(angle)*radius) AS p3\n FROM mid)\n SELECT *,\n ST_SetSRID(ST_CurveToLine('CIRCULARSTRING( ' || ST_X(p1) || ' ' || ST_Y(p1) || ',' || ST_X(p3) || ' ' || ST_Y(p3) || ',' || ST_X(p2) || ' ' || ST_Y(p2) || ')'), 3857) AS the_geom_webmercator\n FROM third'''.format(dataset=dataset_name)\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer {\n line-width: 1.5 * %d;\n line-comp-op: lighten;\n line-opacity: 0.7;\n line-color: #fff500;\n\n }''' % (factor),\n \"sql\": '''WITH a AS (\n SELECT\n *,\n st_transform(the_geom, 3857) as the_geom_webmercator,\n ST_Length(the_geom::geography) / 1000 AS length\n FROM (\n SELECT\n ST_MakeLine(\n the_geom,\n ST_SetSRID(\n ST_MakePoint(\n -1.53456990177195e-22,\n -3.17697838071347e-15\n ),\n 4326\n )\n ) AS the_geom,\n cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm\n FROM (SELECT * FROM {dataset} WHERE shot_distance >= 30 and shot_made_flag != 0) _line_analysis\n ) _cdb_analysis_line_to_single_point\n ), points AS\n ( SELECT cartodb_id , loc_x, loc_y, shot_distance,\n ST_StartPoint(ST_LineMerge(the_geom_webmercator)) AS p1 ,\n ST_EndPoint(ST_LineMerge(the_geom_webmercator)) AS p2\n FROM a AS q2),\n mid AS\n (SELECT *,\n ST_SetSRID(ST_MakePoint((ST_X(p2) - ST_X(p1))/2 + ST_X(p1), (ST_Y(p2) - ST_Y(p1))/3 + ST_Y(p1)), 3857) AS midpoint,\n PI()/2 - ST_Azimuth(p1, p2) AS angle,\n ST_Distance(p1, p2)/6 AS radius\n FROM points),\n third AS\n (SELECT *,\n ST_Translate(midpoint, sign(loc_x) *0.005 *sin(angle)*radius, 0 *cos(angle)*radius) AS p3\n FROM mid)\n SELECT *,\n ST_SetSRID(ST_CurveToLine('CIRCULARSTRING( ' || ST_X(p1) || ' ' || ST_Y(p1) || ',' || ST_X(p3) || ' ' || ST_Y(p3) || ',' || ST_X(p2) || ' ' || ST_Y(p2) || ')'), 3857) AS the_geom_webmercator\n FROM third'''.format(dataset=dataset_name)\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer['mapnik::geometry_type'=1] {\n marker-fill: #fff;\n marker-width: 12 * %d;\n marker-line-color: #fff;\n marker-line-width: 0;\n marker-line-opacity: 1;\n marker-opacity: 0.6;\n marker-type: ellipse;\n marker-placement: point;\n marker-allow-overlap: true;\n marker-comp-op: lighten;\n marker-clip: false;\n marker-multi-policy: largest;\n image-filters: agg-stack-blur(18 * %d, 18 * %d);\n }''' % (factor, factor, factor),\n \"sql\": '''with a as (select action_type,game_event_id,game_id,minutes_remaining,period,seconds_remaining,shot_distance,shot_made_flag,shot_type,shot_zone_area,shot_zone_basic,shot_zone_range,team_id,team_name,game_date, the_geom_webmercator, the_geom from {dataset})\nSELECT 1 as cartodb_id, * FROM a WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0'''.format(dataset=dataset_name)\n }\n },\n {\n \"type\": \"cartodb\",\n \"options\": {\n \"cartocss_version\": \"2.1.1\",\n \"cartocss\": '''#layer['mapnik::geometry_type'=1] {\n marker-fill: #fff;\n marker-width: 9 * %d;\n marker-line-color: #fff;\n marker-line-width: 3 * %d;\n marker-line-opacity: 1;\n marker-opacity: 0.3;\n marker-type: ellipse;\n marker-placement: point;\n marker-allow-overlap: true;\n marker-comp-op: lighten;\n marker-clip: false;\n marker-multi-policy: largest;\n }''' % (factor, factor),\n \"sql\": '''with a as (select action_type,game_event_id,game_id,minutes_remaining,period,seconds_remaining,shot_distance,shot_made_flag,shot_type,shot_zone_area,shot_zone_basic,shot_zone_range,team_id,team_name,game_date, the_geom_webmercator, the_geom from {dataset})\nSELECT 1 as cartodb_id, * FROM a WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0'''.format(dataset=dataset_name)\n }\n }\n ]\n }\n }\n\n named_map_manager = NamedMapManager(auth_client)\n\n try:\n named_map = named_map_manager.get(map_name)\n if named_map is not None:\n named_map.client = auth_client\n named_map.delete()\n except Exception as e:\n #ignore\n print(e)\n\n return named_map_manager.create(template=template)", "_____no_output_____" ] ], [ [ "#### This is how we authenticate the CARTO Python SDK", "_____no_output_____" ] ], [ [ "auth_client = APIKeyAuthClient(CARTO_BASE_URL, CARTO_API_KEY)", "_____no_output_____" ] ], [ [ "#### This is one of the things I love the most from CARTO: prototype with BUILDER + then template your map and finally use APIs to produce maps programmatically\n\nFor this specific case, we have parameterized the line and markers widths, so with a single template, we can produce maps that we can use to share a screenshot (with 72DPI) or to export for high resolution printing (with 300DPI).\n\nLet's go for the poster printing", "_____no_output_____" ] ], [ [ "DPI = 72\nFACTOR = DPI / 72.0\nmap_name = 'tpl_' + dataset_name + str(int(round(time.time() * 1000)))\ncreate_named_map(auth_client, dataset_name, map_name, FACTOR)", "[\"Cannot find template 'tpl_stephen_curry1588194572838' of user 'aromeu'\"]\n" ] ], [ [ "#### Aaaand we're mostly done. Let's export a huge-high-resolution image", "_____no_output_____" ] ], [ [ "map = {\n 'username': CARTO_USER_NAME,\n 'map_id': map_name,\n 'width': 120,\n 'height': 80,\n 'dpi': DPI,\n 'zoom': 18,\n 'bounds': {\"ne\":[-0.000977916642979147,-0.004578593652695418],\"sw\":[0.004981951781790824,0.004288789350539447]},\n 'api_key': CARTO_API_KEY\n}\n\np = Printer(map['username'], map['map_id'], map['api_key'], map['width'], map['height'], map['zoom'], map['bounds'], map['dpi'], 'RGBA')\nimage_path = p.export('.')", "_____no_output_____" ], [ "image_path", "_____no_output_____" ] ], [ [ "#### How it looks like??", "_____no_output_____" ], [ "![](aromeu_tpl_stephen_curry1588194572838_20200429230933.png)", "_____no_output_____" ], [ "#### Clean some stuff and close the door when you leave, please", "_____no_output_____" ] ], [ [ "named_map_manager = NamedMapManager(auth_client)\n\ntry:\n named_map = named_map_manager.get(map_name)\n if named_map is not None:\n named_map.client = auth_client\n named_map.delete()\nexcept Exception as e:\n #ignore\n print(e)", "_____no_output_____" ], [ " --WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
4a6b213464843552e26fd2e005d42e7f1b8dfb96
187,029
ipynb
Jupyter Notebook
python/numpy/3-examples.ipynb
skawns0724/KOSA-Big-Data_Vision
af123dfe0a82a82795bb6732285c390be86e83b7
[ "MIT" ]
11
2021-06-01T00:08:01.000Z
2021-09-04T02:41:55.000Z
python/numpy/3-examples.ipynb
skawns0724/KOSA-Big-Data_Vision
af123dfe0a82a82795bb6732285c390be86e83b7
[ "MIT" ]
1
2021-05-28T08:44:39.000Z
2021-06-01T01:11:04.000Z
python/numpy/3-examples.ipynb
skawns0724/KOSA-Big-Data_Vision
af123dfe0a82a82795bb6732285c390be86e83b7
[ "MIT" ]
12
2021-06-01T00:27:05.000Z
2021-07-01T04:25:27.000Z
1,684.945946
139,748
0.962198
[ [ [ "----\n# 과제 2차방정식 그래프 그리기\n\n## $y = x^2$ where $x = (-5, +5)$\n## $y = -x^2$ where $x = (-5, +5)$\n## $y = x \\cdot 5$ where $x = (-5, +5)$\n## $y = -x \\cdot 5$ where $ x = (-5, +5)$\n----", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\nfig = plt.figure(figsize=(12, 6)) \n\nx = np.linspace(-5, 5, 100) \n\nplt.title(\"4 diffrent graphs\")\n\nplt.xlim(-10, 10)\nplt.ylim(-25, 25)\n\nplt.xlabel(\" $x$ axis\")\nplt.ylabel(\" $y$ axis\")\n\n###############################################\n# 하기 4개의 plot 함수를 이용하여 과제를 완성하기\nplt.plot(x, x**2)\nplt.plot(x, -x**2)\nplt.plot(x, 5*x)\nplt.plot(x, -5*x)\n###############################################\n\nplt.grid() \nplt.show() ", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
4a6b2ae4468e7f885a08ffa5ec57ec773a229b8d
2,567
ipynb
Jupyter Notebook
playbook/tactics/impact/T1491.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
8
2021-05-25T15:25:31.000Z
2021-11-08T07:14:45.000Z
playbook/tactics/impact/T1491.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
1
2021-08-23T17:38:02.000Z
2021-10-12T06:58:19.000Z
playbook/tactics/impact/T1491.ipynb
haresudhan/The-AtomicPlaybook
447b1d6bca7c3750c5a58112634f6bac31aff436
[ "MIT" ]
2
2021-05-29T20:24:24.000Z
2021-08-05T23:44:12.000Z
49.365385
916
0.7164
[ [ [ "# T1491 - Defacement\nAdversaries may modify visual content available internally or externally to an enterprise network. Reasons for [Defacement](https://attack.mitre.org/techniques/T1491) include delivering messaging, intimidation, or claiming (possibly false) credit for an intrusion. Disturbing or offensive images may be used as a part of [Defacement](https://attack.mitre.org/techniques/T1491) in order to cause user discomfort, or to pressure compliance with accompanying messages. \n", "_____no_output_____" ], [ "## Atomic Tests:\nCurrently, no tests are available for this technique.", "_____no_output_____" ], [ "## Detection\nMonitor internal and external websites for unplanned content changes. Monitor application logs for abnormal behavior that may indicate attempted or successful exploitation. Use deep packet inspection to look for artifacts of common exploit traffic, such as SQL injection. Web Application Firewalls may detect improper inputs attempting exploitation.\n\n", "_____no_output_____" ], [ "## Shield Active Defense\n### System Activity Monitoring \n Collect system activity logs which can reveal adversary activity. \n\n Capturing system logs can show logins, user and system events, etc. Collecting this data and potentially sending it to a centralized location can help reveal the presence of an adversary and the actions they perform on a compromised system.\n#### Opportunity\nThere is an opportunity to detect an adversary who modifies website content (internally or externally) by monitoring for unauthorized changes to websites.\n#### Use Case\nA defender can monitor websites for unplanned content changes and generate alerts when activity is detected.\n#### Procedures\nEnsure that systems capture and retain common system level activity artifacts that might be produced.\nMonitor Windows systems for event codes that reflect an adversary changing passwords, adding accounts to groups, etc.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ] ]
4a6b339924ebd8b0f2b040b709b27e16d18f894b
63,308
ipynb
Jupyter Notebook
gp-for-sine-wave.ipynb
mgorkove/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras
71937e6b25736c17bdc68abea0519f88f7410077
[ "MIT" ]
10
2017-05-23T09:02:16.000Z
2021-08-04T22:52:59.000Z
gp-for-sine-wave.ipynb
alastairrough/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras
71937e6b25736c17bdc68abea0519f88f7410077
[ "MIT" ]
null
null
null
gp-for-sine-wave.ipynb
alastairrough/Time-Series-Prediction-with-LSTM-Recurrent-Neural-Networks-in-Python-with-Keras
71937e6b25736c17bdc68abea0519f88f7410077
[ "MIT" ]
7
2018-03-11T16:47:15.000Z
2021-07-21T17:24:32.000Z
295.831776
30,403
0.9075
[ [ [ "## Prediction sine wave function using Gaussian Process\n\nAn example for Gaussian process algorithm to predict sine wave function.\nThis example is from [\"Gaussian Processes regression: basic introductory example\"](http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gp_regression.html).", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.gaussian_process import GaussianProcess\nfrom matplotlib import pyplot as pl\n%matplotlib inline\n\nnp.random.seed(1)", "_____no_output_____" ], [ "# The function to predict\ndef f(x):\n return x*np.sin(x)", "_____no_output_____" ], [ "# --------------------------\n# First the noiseless case\n# --------------------------\n\n# Obervations\nX = np.atleast_2d([0., 1., 2., 3., 5., 6., 7., 8., 9.5]).T\ny = f(X).ravel()\n\n#X = np.atleast_2d(np.linspace(0, 100, 200)).T\n\n# Mesh the input space for evaluations of the real function, the prediction and its MSE\nx = np.atleast_2d(np.linspace(0, 10, 1000)).T\n\n# Instanciate a Gaussian Process model\ngp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,\n random_start=100)\n\n# Fit to data using Maximum Likelihood Estimation of the parameters\ngp.fit(X, y)\n\n# Make the prediction on the meshed x-axis (ask for MSE as well)\ny_pred, MSE = gp.predict(x, eval_MSE=True)\nsigma = np.sqrt(MSE)", "_____no_output_____" ], [ "# Plot the function, the prediction and the 95% confidence interval based on the MSE\nfig = pl.figure()\npl.plot(x, f(x), 'r:', label=u'$f(x) = x\\,\\sin(x)$')\npl.plot(X, y, 'r.', markersize=10, label=u'Observations')\npl.plot(x, y_pred, 'b-', label=u'Prediction')\npl.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma,\n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.5, fc='b', ec='None', label='95% confidence interval')\npl.xlabel('$x$')\npl.ylabel('$f(x)$')\npl.ylim(-10, 20)\npl.legend(loc='upper left')", "_____no_output_____" ], [ "# now the noisy case\nX = np.linspace(0.1, 9.9, 20)\nX = np.atleast_2d(X).T\n\n# Observations and noise\ny = f(X).ravel()\ndy = 0.5 + 1.0 * np.random.random(y.shape)\nnoise = np.random.normal(0, dy)\ny += noise\n\n# Mesh the input space for evaluations of the real function, the prediction and\n# its MSE\nx = np.atleast_2d(np.linspace(0, 10, 1000)).T\n\n# Instanciate a Gaussian Process model\ngp = GaussianProcess(corr='squared_exponential', theta0=1e-1,\n thetaL=1e-3, thetaU=1,\n nugget=(dy / y) ** 2,\n random_start=100)\n\n# Fit to data using Maximum Likelihood Estimation of the parameters\ngp.fit(X, y)\n\n# Make the prediction on the meshed x-axis (ask for MSE as well)\ny_pred, MSE = gp.predict(x, eval_MSE=True)\nsigma = np.sqrt(MSE)\n\n# Plot the function, the prediction and the 95% confidence interval based on\n# the MSE\nfig = pl.figure()\npl.plot(x, f(x), 'r:', label=u'$f(x) = x\\,\\sin(x)$')\npl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')\npl.plot(x, y_pred, 'b-', label=u'Prediction')\npl.fill(np.concatenate([x, x[::-1]]),\n np.concatenate([y_pred - 1.9600 * sigma,\n (y_pred + 1.9600 * sigma)[::-1]]),\n alpha=.5, fc='b', ec='None', label='95% confidence interval')\npl.xlabel('$x$')\npl.ylabel('$f(x)$')\npl.ylim(-10, 20)\npl.legend(loc='upper left')\n\npl.show()", "/Developer/Applications/anaconda/lib/python3.4/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n if self._edgecolors == str('face'):\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a6b3d44b65fc58452a91a36f13037acd358979d
135,540
ipynb
Jupyter Notebook
notebooks/8.0-mwhitesi-salmonella-clonal-groups.ipynb
superphy/phenores
2bb9938dba7d3b031ceaee0c00c725b03915af68
[ "BSD-3-Clause" ]
null
null
null
notebooks/8.0-mwhitesi-salmonella-clonal-groups.ipynb
superphy/phenores
2bb9938dba7d3b031ceaee0c00c725b03915af68
[ "BSD-3-Clause" ]
null
null
null
notebooks/8.0-mwhitesi-salmonella-clonal-groups.ipynb
superphy/phenores
2bb9938dba7d3b031ceaee0c00c725b03915af68
[ "BSD-3-Clause" ]
3
2018-01-19T23:11:58.000Z
2018-09-20T17:53:40.000Z
251.465677
68,492
0.85577
[ [ [ "Clustering Mash distances to obtain clonal groups for all Salmonella", "_____no_output_____" ] ], [ [ "library('FactoMineR')\nlibrary('factoextra')\nlibrary('readxl')\nlibrary('dplyr')", "Loading required package: ggplot2\nWelcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa\n\nAttaching package: ‘dplyr’\n\nThe following objects are masked from ‘package:stats’:\n\n filter, lag\n\nThe following objects are masked from ‘package:base’:\n\n intersect, setdiff, setequal, union\n\n" ], [ "mash_dist_file = '../data/interim/mash_distance_matrix.csv'\nmeta_excel_file = '../data/raw/GenotypicAMR_Master.xlsx'", "_____no_output_____" ], [ "distances <- read.csv(mash_dist_file, header=TRUE, row.names=1, stringsAsFactors=FALSE)\noptions(warn=-1)\nmetadf = read_excel(meta_excel_file, na='-')\noptions(warn=0)", "_____no_output_____" ], [ "serodf <- as.data.frame(metadf['serovar'])\nserodf$serovar <- tolower(serodf$serovar)\nsrrs <- sapply(metadf['run'], as.character)\nrownames(serodf) <- srrs\nserodf[is.na(serodf$serovar),] = 'No serovar'", "_____no_output_____" ], [ "pcs <- PCA(distances, scale.unit=FALSE, ncp = 20, graph = FALSE)\n\nfp = list()\ncfit = list()\nfor(k in 5:15) {\n cat(k)\n cat(\"\\n\")\n clu <- HCPC(pcs, graph = FALSE, nb.clust=k)\n clustdf <- clu$data.clust['clust']\n clustdf = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE)\n cluster_counts = clustdf %>% group_by(serovar) %>% summarise(n_clusters = n_distinct(clust))\n # Find serovars split across clusters\n splitserovars = cluster_counts$serovar[cluster_counts['n_clusters'] >= 2]\n # For serovars split across clusters, count number in each cluster\n splitcounts = clustdf %>% count(serovar, clust) %>% filter(serovar %in% splitserovars) %>% arrange(desc(serovar))\n # Count all genomes in non-majority clusters\n misclassified = splitcounts %>% group_by(serovar) %>% filter(n != max(n)) %>% filter(serovar != 'No serovar')\n fp[[k]] = sum(misclassified['n'])\n cfit[[k]] = clu\n}", "5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n" ], [ "pcs <- PCA(distances, scale.unit=FALSE, ncp = 20, graph = FALSE)\n\nfp = list()\ncfit = list()\nk = 9\ncat(k)\ncat(\"\\n\")\nclu <- HCPC(pcs, graph = FALSE, nb.clust=k)\nclustdf <- clu$data.clust['clust']\nclustdf = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE)\ncluster_counts = clustdf %>% group_by(serovar) %>% summarise(n_clusters = n_distinct(clust))\n# Find serovars split across clusters\nsplitserovars = cluster_counts$serovar[cluster_counts['n_clusters'] >= 2]\n# For serovars split across clusters, count number in each cluster\nsplitcounts = clustdf %>% count(serovar, clust) %>% filter(serovar %in% splitserovars) %>% arrange(desc(serovar))\n# Count all genomes in non-majority clusters\nmisclassified = splitcounts %>% group_by(serovar) %>% filter(n != max(n)) %>% filter(serovar != 'No serovar')\nfp[[k]] = sum(misclassified['n'])\ncfit[[k]] = clu", "9\n" ], [ "# fp = number of genomes that are in wrong clusters, i.e. the correct cluster is the cluster with the most serovar genomes\nprint(fp)", "[[1]]\nNULL\n\n[[2]]\nNULL\n\n[[3]]\nNULL\n\n[[4]]\nNULL\n\n[[5]]\nNULL\n\n[[6]]\nNULL\n\n[[7]]\nNULL\n\n[[8]]\nNULL\n\n[[9]]\n[1] 56\n\n" ], [ "clustdf <- cfit[[9]]$data.clust['clust']", "_____no_output_____" ], [ "table(clustdf)", "_____no_output_____" ], [ "fviz_eig(pcs)", "_____no_output_____" ], [ "plot(cfit[[9]], choice = \"3D.map\")", "_____no_output_____" ], [ "clustdf2 = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE)\ncluster_counts = clustdf2 %>% group_by(clust) %>% count(serovar) %>% filter(n>2)", "_____no_output_____" ], [ "cluster_counts", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6b4dfc856c135d9708933e05626f82d34c0963
25,801
ipynb
Jupyter Notebook
HML/chapter3/chapter3ex1.ipynb
Zilleplus/MachineLearning
13e4fe996386d3f66b7866cc133ae9b26a6333d6
[ "MIT" ]
null
null
null
HML/chapter3/chapter3ex1.ipynb
Zilleplus/MachineLearning
13e4fe996386d3f66b7866cc133ae9b26a6333d6
[ "MIT" ]
null
null
null
HML/chapter3/chapter3ex1.ipynb
Zilleplus/MachineLearning
13e4fe996386d3f66b7866cc133ae9b26a6333d6
[ "MIT" ]
null
null
null
113.162281
1,548
0.684043
[ [ [ "excercise 1. chapter 3:\n\nBuild classifiedr on MNIST dataset that achieves over 97% accuracy on the test set.\nHINT: The KNeighborsClassfier wors quiet well for this taks; you just need to find good hyperparameter values. ( try a grid search on the weights and n_neighbors parameters)", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_openml\nmnist = fetch_openml('mnist_784', version=1)\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx, y = mnist[\"data\"], mnist[\"target\"]\ny = y.astype(np.uint8) # !!! Important to turn the strings into int's, otherwise the classfier will error.\nx_train, x_test, y_train, y_test = x[:60000], x[60000:], y[:60000], y[6000:]", "_____no_output_____" ] ], [ [ "Let's try to fit a k-nearest classifier to fit, with some random parameters. Just to get a feeling with the classifier.", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\n\nneigh = KNeighborsClassifier()\nparam_grid = [{'weights':['uniform','distance'],'n_neighbors' : [1, 3, 5, 7]}]\ngrid_search = GridSearchCV(neigh, param_grid, verbose=3,cv=3, scoring = 'neg_mean_squared_error',return_train_score=True)\ngrid_search.fit(x_train, y_train)", "Fitting 3 folds for each of 8 candidates, totalling 24 fits\n[CV 1/3] END n_neighbors=1, weights=uniform;, score=(train=-0.000, test=-0.537) total time= 1.0min\n" ], [ "grid_search.best_estimator_", "_____no_output_____" ], [ "#from sklearn.neighbors import KNeighborsClassifier\n# uniform/distance\nneigh_test = KNeighborsClassifier(weights=\"distance\", n_neighbors=1)\nneigh_test.fit(x_train, y_train)\ntest_size = 100\npred_y = neigh_test.predict(x_train[:test_size]) \n\nfrom sklearn.metrics import mean_squared_error\nmean_squared_error(y_train[:test_size], pred_y)", "_____no_output_____" ], [ "from sklearn.model_selection import cross_val_predict\ny_scores = cross_val_predict(neigh, x_train, y_train, cv=3, method=\"predict_proba\")", "_____no_output_____" ], [ "from sklearn.metrics import roc_curve\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores)\nplt.plot(fpr, tpr, linewidth=2)\nplt.xlabel(\"false positive rate\")\nplt.ylabel(\"true positive rate\")", "_____no_output_____" ], [ "y_scores[1]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a6b5ad65c5e6d19722a721ac8158f1d803a9d1b
14,044
ipynb
Jupyter Notebook
Parkinsons/Parkinson.ipynb
sriram98v/Keras
53ffb00acfc4ddd84e213c768ab2a7fcc7f87893
[ "MIT" ]
2
2018-02-08T06:43:25.000Z
2020-08-17T06:41:54.000Z
Parkinsons/Parkinson.ipynb
sriram98v/Keras
53ffb00acfc4ddd84e213c768ab2a7fcc7f87893
[ "MIT" ]
null
null
null
Parkinsons/Parkinson.ipynb
sriram98v/Keras
53ffb00acfc4ddd84e213c768ab2a7fcc7f87893
[ "MIT" ]
3
2018-10-03T20:17:13.000Z
2020-01-18T09:02:12.000Z
31.773756
122
0.414839
[ [ [ "import numpy as np\nimport os\nimport pandas as pd\nimport keras\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, LSTM, Conv1D, Dropout, Flatten", "_____no_output_____" ], [ "path = os.path.join(os.getcwd()+'/datasets/parkinsons.csv')\ndataframe = pd.read_csv(path)\ndataframe.head()", "_____no_output_____" ], [ "from sklearn.utils import shuffle\ndata = shuffle(dataframe, random_state=101)\nx = data.drop(columns=['name', 'status', 'MDVP:Jitter(Abs)', 'MDVP:RAP'])\ny = data['status']\ny.head()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.40, random_state=101)", "_____no_output_____" ], [ "batch_size = 100\nnum_classes = 2\nepochs = 10\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)", "_____no_output_____" ], [ "X_train = X_train.as_matrix(columns=None).reshape(X_train.shape + (1,))\nX_test = X_test.as_matrix(columns=None).reshape(X_test.shape + (1,))", "_____no_output_____" ], [ "y_test.shape", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Conv1D(32, (3), activation='softmax', input_shape=[20, 1]))\nmodel.add(Conv1D(64, (3), activation='softmax'))\nmodel.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(500, activation='softmax'))\nmodel.add(Dropout(0.25))\nmodel.add(Dense(num_classes, activation='softmax'))", "_____no_output_____" ], [ "model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(X_test, y_test))", "Train on 117 samples, validate on 78 samples\nEpoch 1/10\n117/117 [==============================] - 1s - loss: 0.6916 - acc: 0.7179 - val_loss: 0.6904 - val_acc: 0.7949\nEpoch 2/10\n117/117 [==============================] - 0s - loss: 0.6911 - acc: 0.7265 - val_loss: 0.6895 - val_acc: 0.7949\nEpoch 3/10\n117/117 [==============================] - 0s - loss: 0.6903 - acc: 0.7265 - val_loss: 0.6886 - val_acc: 0.7949\nEpoch 4/10\n117/117 [==============================] - 0s - loss: 0.6896 - acc: 0.7265 - val_loss: 0.6876 - val_acc: 0.7949\nEpoch 5/10\n117/117 [==============================] - 0s - loss: 0.6887 - acc: 0.7265 - val_loss: 0.6868 - val_acc: 0.7949\nEpoch 6/10\n117/117 [==============================] - 0s - loss: 0.6882 - acc: 0.7265 - val_loss: 0.6857 - val_acc: 0.7949\nEpoch 7/10\n117/117 [==============================] - 0s - loss: 0.6876 - acc: 0.7265 - val_loss: 0.6850 - val_acc: 0.7949\nEpoch 8/10\n117/117 [==============================] - 0s - loss: 0.6868 - acc: 0.7265 - val_loss: 0.6840 - val_acc: 0.7949\nEpoch 9/10\n117/117 [==============================] - 0s - loss: 0.6860 - acc: 0.7265 - val_loss: 0.6833 - val_acc: 0.7949\nEpoch 10/10\n117/117 [==============================] - 0s - loss: 0.6855 - acc: 0.7265 - val_loss: 0.6825 - val_acc: 0.7949\n" ], [ "scores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))", "_____no_output_____" ], [ "model_yaml = model.to_yaml()\nwith open(\"parkinsons.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6b5b56e0ebc89fc0a288bb24b57cab169ed05a
22,049
ipynb
Jupyter Notebook
convolutional-neural-networks/cifar-cnn/cifar10_cnn_augmentation.ipynb
jetsonai11/Udacity_Deep_Learning_Nanodegree_2020
7193092812f0fcde975a93180e547081f50766c4
[ "MIT" ]
2
2020-05-09T01:52:34.000Z
2020-05-12T08:06:58.000Z
convolutional-neural-networks/cifar-cnn/.ipynb_checkpoints/cifar10_cnn_augmentation-checkpoint.ipynb
jetsonai11/Udacity_Deep_Learning_Nanodegree_2020
7193092812f0fcde975a93180e547081f50766c4
[ "MIT" ]
3
2020-11-13T18:49:26.000Z
2022-02-10T01:53:18.000Z
convolutional-neural-networks/cifar-cnn/cifar10_cnn_augmentation.ipynb
jetsonai11/Udacity_Deep_Learning_Nanodegree_2020
7193092812f0fcde975a93180e547081f50766c4
[ "MIT" ]
null
null
null
39.514337
533
0.583473
[ [ [ "# Convolutional Neural Networks\n---\nIn this notebook, we train a **CNN** to classify images from the CIFAR-10 database.\n\nThe images in this database are small color images that fall into one of ten classes; some example images are pictured below.\n\n<img src='notebook_ims/cifar_data.png' width=70% height=70% />", "_____no_output_____" ], [ "### Test for [CUDA](http://pytorch.org/docs/stable/cuda.html)\n\nSince these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation.", "_____no_output_____" ] ], [ [ "import torch\nimport numpy as np\n\n# check if CUDA is available\ntrain_on_gpu = torch.cuda.is_available()\n\nif not train_on_gpu:\n print('CUDA is not available. Training on CPU ...')\nelse:\n print('CUDA is available! Training on GPU ...')", "CUDA is not available. Training on CPU ...\n" ] ], [ [ "---\n## Load and Augment the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data.\n\n#### Augmentation\n\nIn this cell, we perform some simple [data augmentation](https://medium.com/nanonets/how-to-use-deep-learning-when-you-have-limited-data-part-2-data-augmentation-c26971dc8ced) by randomly flipping and rotating the given image data. We do this by defining a torchvision `transform`, and you can learn about all the transforms that are used to pre-process and augment data, [here](https://pytorch.org/docs/stable/torchvision/transforms.html).\n\n#### TODO: Look at the [transformation documentation](https://pytorch.org/docs/stable/torchvision/transforms.html); add more augmentation transforms, and see how your model performs.\n\nThis type of data augmentation should add some positional variety to these images, so that when we train a model on this data, it will be robust in the face of geometric changes (i.e. it will recognize a ship, no matter which direction it is facing). It's recommended that you choose one or two transforms.", "_____no_output_____" ] ], [ [ "from torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n# percentage of training set to use as validation\nvalid_size = 0.2\n\n# convert data to a normalized torch.FloatTensor\ntransform = transforms.Compose([\n transforms.RandomHorizontalFlip(), # randomly flip and rotate\n transforms.RandomRotation(10),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n# choose the training and test datasets\ntrain_data = datasets.CIFAR10('data', train=True,\n download=True, transform=transform)\ntest_data = datasets.CIFAR10('data', train=False,\n download=True, transform=transform)\n\n# obtain training indices that will be used for validation\nnum_train = len(train_data)\nindices = list(range(num_train))\nnp.random.shuffle(indices)\nsplit = int(np.floor(valid_size * num_train))\ntrain_idx, valid_idx = indices[split:], indices[:split]\n\n# define samplers for obtaining training and validation batches\ntrain_sampler = SubsetRandomSampler(train_idx)\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n# prepare data loaders (combine dataset and sampler)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n sampler=train_sampler, num_workers=num_workers)\nvalid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, \n sampler=valid_sampler, num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)\n\n# specify the image classes\nclasses = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']", "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to data/cifar-10-python.tar.gz\n" ] ], [ [ "### Visualize a Batch of Training Data", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\n# helper function to un-normalize and display an image\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image", "_____no_output_____" ], [ "# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy() # convert images to numpy for display\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\n# display 20 images\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n imshow(images[idx])\n ax.set_title(classes[labels[idx]])", "_____no_output_____" ] ], [ [ "### View an Image in More Detail\n\nHere, we look at the normalized red, green, and blue (RGB) color channels as three separate, grayscale intensity images.", "_____no_output_____" ] ], [ [ "rgb_img = np.squeeze(images[3])\nchannels = ['red channel', 'green channel', 'blue channel']\n\nfig = plt.figure(figsize = (36, 36)) \nfor idx in np.arange(rgb_img.shape[0]):\n ax = fig.add_subplot(1, 3, idx + 1)\n img = rgb_img[idx]\n ax.imshow(img, cmap='gray')\n ax.set_title(channels[idx])\n width, height = img.shape\n thresh = img.max()/2.5\n for x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center', size=8,\n color='white' if img[x][y]<thresh else 'black')", "_____no_output_____" ] ], [ [ "---\n## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)\n\nThis time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following:\n* [Convolutional layers](https://pytorch.org/docs/stable/nn.html#conv2d), which can be thought of as stack of filtered images.\n* [Maxpooling layers](https://pytorch.org/docs/stable/nn.html#maxpool2d), which reduce the x-y size of an input, keeping only the most _active_ pixels from the previous layer.\n* The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output.\n\nA network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer.\n\n<img src='notebook_ims/2_layer_conv.png' height=50% width=50% />\n\n#### TODO: Define a model with multiple convolutional layers, and define the feedforward metwork behavior.\n\nThe more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting. \n\nIt's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) to help decide on a final structure.\n\n#### Output volume for a convolutional layer\n\nTo compute the output size of a given convolutional layer we can perform the following calculation (taken from [Stanford's cs231n course](http://cs231n.github.io/convolutional-networks/#layers)):\n> We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by `(W−F+2P)/S+1`. \n\nFor example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the CNN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # convolutional layer (sees 32x32x3 image tensor)\n self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\n # convolutional layer (sees 16x16x16 tensor)\n self.conv2 = nn.Conv2d(16, 32, 3, padding=1)\n # convolutional layer (sees 8x8x32 tensor)\n self.conv3 = nn.Conv2d(32, 64, 3, padding=1)\n # max pooling layer\n self.pool = nn.MaxPool2d(2, 2)\n # linear layer (64 * 4 * 4 -> 500)\n self.fc1 = nn.Linear(64 * 4 * 4, 500)\n # linear layer (500 -> 10)\n self.fc2 = nn.Linear(500, 10)\n # dropout layer (p=0.25)\n self.dropout = nn.Dropout(0.25)\n\n def forward(self, x):\n # add sequence of convolutional and max pooling layers\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n # flatten image input\n x = x.view(-1, 64 * 4 * 4)\n # add dropout layer\n x = self.dropout(x)\n # add 1st hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n # add dropout layer\n x = self.dropout(x)\n # add 2nd hidden layer, with relu activation function\n x = self.fc2(x)\n return x\n\n# create a complete CNN\nmodel = Net()\nprint(model)\n\n# move tensors to GPU if CUDA is available\nif train_on_gpu:\n model.cuda()", "_____no_output_____" ] ], [ [ "### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)\n\nDecide on a loss and optimization function that is best suited for this classification task. The linked code examples from above, may be a good starting point; [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). Pay close attention to the value for **learning rate** as this value determines how your model converges to a small error.\n\n#### TODO: Define the loss and optimizer and see how these choices change the loss over time.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\n# specify loss function (categorical cross-entropy)\ncriterion = nn.CrossEntropyLoss()\n\n# specify optimizer\noptimizer = optim.SGD(model.parameters(), lr=0.01)", "_____no_output_____" ] ], [ [ "---\n## Train the Network\n\nRemember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting.", "_____no_output_____" ] ], [ [ "# number of epochs to train the model\nn_epochs = 30\n\nvalid_loss_min = np.Inf # track change in validation loss\n\nfor epoch in range(1, n_epochs+1):\n\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n ###################\n # train the model #\n ###################\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n # move tensors to GPU if CUDA is available\n if train_on_gpu:\n data, target = data.cuda(), target.cuda()\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update training loss\n train_loss += loss.item()*data.size(0)\n \n ###################### \n # validate the model #\n ######################\n model.eval()\n for batch_idx, (data, target) in enumerate(valid_loader):\n # move tensors to GPU if CUDA is available\n if train_on_gpu:\n data, target = data.cuda(), target.cuda()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # update average validation loss \n valid_loss += loss.item()*data.size(0)\n \n # calculate average losses\n train_loss = train_loss/len(train_loader.sampler)\n valid_loss = valid_loss/len(valid_loader.sampler)\n \n # print training/validation statistics \n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\n epoch, train_loss, valid_loss))\n \n # save model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(\n valid_loss_min,\n valid_loss))\n torch.save(model.state_dict(), 'model_augmented.pt')\n valid_loss_min = valid_loss", "_____no_output_____" ] ], [ [ "### Load the Model with the Lowest Validation Loss", "_____no_output_____" ] ], [ [ "model.load_state_dict(torch.load('model_augmented.pt'))", "_____no_output_____" ] ], [ [ "---\n## Test the Trained Network\n\nTest your trained model on previously unseen data! A \"good\" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images.", "_____no_output_____" ] ], [ [ "# track test loss\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nmodel.eval()\n# iterate over test data\nfor batch_idx, (data, target) in enumerate(test_loader):\n # move tensors to GPU if CUDA is available\n if train_on_gpu:\n data, target = data.cuda(), target.cuda()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # update test loss \n test_loss += loss.item()*data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1) \n # compare predictions to true label\n correct_tensor = pred.eq(target.data.view_as(pred))\n correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())\n # calculate test accuracy for each object class\n for i in range(batch_size):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# average test loss\ntest_loss = test_loss/len(test_loader.dataset)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n classes[i], 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))", "_____no_output_____" ] ], [ [ "### Visualize Sample Test Results", "_____no_output_____" ] ], [ [ "# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\nimages.numpy()\n\n# move model inputs to cuda, if GPU available\nif train_on_gpu:\n images = images.cuda()\n\n# get sample outputs\noutput = model(images)\n# convert output probabilities to predicted class\n_, preds_tensor = torch.max(output, 1)\npreds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n imshow(images[idx])\n ax.set_title(\"{} ({})\".format(classes[preds[idx]], classes[labels[idx]]),\n color=(\"green\" if preds[idx]==labels[idx].item() else \"red\"))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a6b64674848ac780f080f6f2a57d30f7aa03d2e
444,006
ipynb
Jupyter Notebook
d2l-en/pytorch/chapter_generative-adversarial-networks/dcgan.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
d2l-en/pytorch/chapter_generative-adversarial-networks/dcgan.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
d2l-en/pytorch/chapter_generative-adversarial-networks/dcgan.ipynb
gr8khan/d2lai
7c10432f38c80e86978cd075d0024902b47842a0
[ "MIT" ]
null
null
null
131.79163
100,349
0.813665
[ [ [ "# Deep Convolutional Generative Adversarial Networks\n:label:`sec_dcgan`\n\nIn :numref:`sec_basic_gan`, we introduced the basic ideas behind how GANs work. We showed that they can draw samples from some simple, easy-to-sample distribution, like a uniform or normal distribution, and transform them into samples that appear to match the distribution of some dataset. And while our example of matching a 2D Gaussian distribution got the point across, it is not especially exciting.\n\nIn this section, we will demonstrate how you can use GANs to generate photorealistic images. We will be basing our models on the deep convolutional GANs (DCGAN) introduced in :cite:`Radford.Metz.Chintala.2015`. We will borrow the convolutional architecture that have proven so successful for discriminative computer vision problems and show how via GANs, they can be leveraged to generate photorealistic images.\n", "_____no_output_____" ] ], [ [ "from d2l import torch as d2l\nimport torch\nimport torchvision\nfrom torch import nn\nimport warnings", "_____no_output_____" ] ], [ [ "## The Pokemon Dataset\n\nThe dataset we will use is a collection of Pokemon sprites obtained from [pokemondb](https://pokemondb.net/sprites). First download, extract and load this dataset.\n", "_____no_output_____" ] ], [ [ "#@save\nd2l.DATA_HUB['pokemon'] = (d2l.DATA_URL + 'pokemon.zip',\n 'c065c0e2593b8b161a2d7873e42418bf6a21106c')\n\ndata_dir = d2l.download_extract('pokemon')\npokemon = torchvision.datasets.ImageFolder(data_dir)", "Downloading ../data/pokemon.zip from http://d2l-data.s3-accelerate.amazonaws.com/pokemon.zip...\n" ] ], [ [ "We resize each image into $64\\times 64$. The `ToTensor` transformation will project the pixel value into $[0, 1]$, while our generator will use the tanh function to obtain outputs in $[-1, 1]$. Therefore we normalize the data with $0.5$ mean and $0.5$ standard deviation to match the value range.\n", "_____no_output_____" ] ], [ [ "batch_size = 256\ntransformer = torchvision.transforms.Compose([\n torchvision.transforms.Resize((64, 64)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(0.5, 0.5)\n])\npokemon.transform = transformer\ndata_iter = torch.utils.data.DataLoader(\n pokemon, batch_size=batch_size,\n shuffle=True, num_workers=d2l.get_dataloader_workers())", "_____no_output_____" ] ], [ [ "Let us visualize the first 20 images.\n", "_____no_output_____" ] ], [ [ "warnings.filterwarnings('ignore')\nd2l.set_figsize((4, 4))\nfor X, y in data_iter:\n imgs = X[0:20,:,:,:].permute(0, 2, 3, 1)/2+0.5\n d2l.show_images(imgs, num_rows=4, num_cols=5)\n break", "_____no_output_____" ] ], [ [ "## The Generator\n\nThe generator needs to map the noise variable $\\mathbf z\\in\\mathbb R^d$, a length-$d$ vector, to a RGB image with width and height to be $64\\times 64$ . In :numref:`sec_fcn` we introduced the fully convolutional network that uses transposed convolution layer (refer to :numref:`sec_transposed_conv`) to enlarge input size. The basic block of the generator contains a transposed convolution layer followed by the batch normalization and ReLU activation.\n", "_____no_output_____" ] ], [ [ "class G_block(nn.Module):\n def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2,\n padding=1, **kwargs):\n super(G_block, self).__init__(**kwargs)\n self.conv2d_trans = nn.ConvTranspose2d(in_channels, out_channels,\n kernel_size, strides, padding, bias=False)\n self.batch_norm = nn.BatchNorm2d(out_channels)\n self.activation = nn.ReLU()\n\n def forward(self, X):\n return self.activation(self.batch_norm(self.conv2d_trans(X)))", "_____no_output_____" ] ], [ [ "In default, the transposed convolution layer uses a $k_h = k_w = 4$ kernel, a $s_h = s_w = 2$ strides, and a $p_h = p_w = 1$ padding. With a input shape of $n_h^{'} \\times n_w^{'} = 16 \\times 16$, the generator block will double input's width and height.\n\n$$\n\\begin{aligned}\nn_h^{'} \\times n_w^{'} &= [(n_h k_h - (n_h-1)(k_h-s_h)- 2p_h] \\times [(n_w k_w - (n_w-1)(k_w-s_w)- 2p_w]\\\\\n &= [(k_h + s_h (n_h-1)- 2p_h] \\times [(k_w + s_w (n_w-1)- 2p_w]\\\\\n &= [(4 + 2 \\times (16-1)- 2 \\times 1] \\times [(4 + 2 \\times (16-1)- 2 \\times 1]\\\\\n &= 32 \\times 32 .\\\\\n\\end{aligned}\n$$\n", "_____no_output_____" ] ], [ [ "x = torch.zeros((2, 3, 16, 16))\ng_blk = G_block(20)\ng_blk(x).shape", "_____no_output_____" ] ], [ [ "If changing the transposed convolution layer to a $4\\times 4$ kernel, $1\\times 1$ strides and zero padding. With a input size of $1 \\times 1$, the output will have its width and height increased by 3 respectively.\n", "_____no_output_____" ] ], [ [ "x = torch.zeros((2, 3, 1, 1))\ng_blk = G_block(20, strides=1, padding=0)\ng_blk(x).shape", "_____no_output_____" ] ], [ [ "The generator consists of four basic blocks that increase input's both width and height from 1 to 32. At the same time, it first projects the latent variable into $64\\times 8$ channels, and then halve the channels each time. At last, a transposed convolution layer is used to generate the output. It further doubles the width and height to match the desired $64\\times 64$ shape, and reduces the channel size to $3$. The tanh activation function is applied to project output values into the $(-1, 1)$ range.\n", "_____no_output_____" ] ], [ [ "n_G = 64\nnet_G = nn.Sequential(\n G_block(in_channels=100, out_channels=n_G*8,\n strides=1, padding=0), # Output: (64 * 8, 4, 4)\n G_block(in_channels=n_G*8, out_channels=n_G*4), # Output: (64 * 4, 8, 8)\n G_block(in_channels=n_G*4, out_channels=n_G*2), # Output: (64 * 2, 16, 16)\n G_block(in_channels=n_G*2, out_channels=n_G), # Output: (64, 32, 32)\n nn.ConvTranspose2d(in_channels=n_G, out_channels=3, \n kernel_size=4, stride=2, padding=1, bias=False),\n nn.Tanh()) # Output: (3, 64, 64)", "_____no_output_____" ] ], [ [ "Generate a 100 dimensional latent variable to verify the generator's output shape.\n", "_____no_output_____" ] ], [ [ "x = torch.zeros((1, 100, 1, 1))\nnet_G(x).shape", "_____no_output_____" ] ], [ [ "## Discriminator\n\nThe discriminator is a normal convolutional network network except that it uses a leaky ReLU as its activation function. Given $\\alpha \\in[0, 1]$, its definition is\n\n$$\\textrm{leaky ReLU}(x) = \\begin{cases}x & \\text{if}\\ x > 0\\\\ \\alpha x &\\text{otherwise}\\end{cases}.$$\n\nAs it can be seen, it is normal ReLU if $\\alpha=0$, and an identity function if $\\alpha=1$. For $\\alpha \\in (0, 1)$, leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the \"dying ReLU\" problem that a neuron might always output a negative value and therefore cannot make any progress since the gradient of ReLU is 0.\n", "_____no_output_____" ] ], [ [ "alphas = [0, .2, .4, .6, .8, 1]\nx = torch.arange(-2, 1, 0.1)\nY = [nn.LeakyReLU(alpha)(x).detach().numpy() for alpha in alphas]\nd2l.plot(x.detach().numpy(), Y, 'x', 'y', alphas)", "_____no_output_____" ] ], [ [ "The basic block of the discriminator is a convolution layer followed by a batch normalization layer and a leaky ReLU activation. The hyperparameters of the convolution layer are similar to the transpose convolution layer in the generator block.\n", "_____no_output_____" ] ], [ [ "class D_block(nn.Module):\n def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2,\n padding=1, alpha=0.2, **kwargs):\n super(D_block, self).__init__(**kwargs)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size,\n strides, padding, bias=False)\n self.batch_norm = nn.BatchNorm2d(out_channels)\n self.activation = nn.LeakyReLU(alpha, inplace=True)\n\n def forward(self, X):\n return self.activation(self.batch_norm(self.conv2d(X)))", "_____no_output_____" ] ], [ [ "A basic block with default settings will halve the width and height of the inputs, as we demonstrated in :numref:`sec_padding`. For example, given a input shape $n_h = n_w = 16$, with a kernel shape $k_h = k_w = 4$, a stride shape $s_h = s_w = 2$, and a padding shape $p_h = p_w = 1$, the output shape will be:\n\n$$\n\\begin{aligned}\nn_h^{'} \\times n_w^{'} &= \\lfloor(n_h-k_h+2p_h+s_h)/s_h\\rfloor \\times \\lfloor(n_w-k_w+2p_w+s_w)/s_w\\rfloor\\\\\n &= \\lfloor(16-4+2\\times 1+2)/2\\rfloor \\times \\lfloor(16-4+2\\times 1+2)/2\\rfloor\\\\\n &= 8 \\times 8 .\\\\\n\\end{aligned}\n$$\n", "_____no_output_____" ] ], [ [ "x = torch.zeros((2, 3, 16, 16))\nd_blk = D_block(20)\nd_blk(x).shape", "_____no_output_____" ] ], [ [ "The discriminator is a mirror of the generator.\n", "_____no_output_____" ] ], [ [ "n_D = 64\nnet_D = nn.Sequential(\n D_block(n_D), # Output: (64, 32, 32)\n D_block(in_channels=n_D, out_channels=n_D*2), # Output: (64 * 2, 16, 16)\n D_block(in_channels=n_D*2, out_channels=n_D*4), # Output: (64 * 4, 8, 8)\n D_block(in_channels=n_D*4, out_channels=n_D*8), # Output: (64 * 8, 4, 4)\n nn.Conv2d(in_channels=n_D*8, out_channels=1,\n kernel_size=4, bias=False)) # Output: (1, 1, 1)", "_____no_output_____" ] ], [ [ "It uses a convolution layer with output channel $1$ as the last layer to obtain a single prediction value.\n", "_____no_output_____" ] ], [ [ "x = torch.zeros((1, 3, 64, 64))\nnet_D(x).shape", "_____no_output_____" ] ], [ [ "## Training\n\nCompared to the basic GAN in :numref:`sec_basic_gan`, we use the same learning rate for both generator and discriminator since they are similar to each other. In addition, we change $\\beta_1$ in Adam (:numref:`sec_adam`) from $0.9$ to $0.5$. It decreases the smoothness of the momentum, the exponentially weighted moving average of past gradients, to take care of the rapid changing gradients because the generator and the discriminator fight with each other. Besides, the random generated noise `Z`, is a 4-D tensor and we are using GPU to accelerate the computation.\n", "_____no_output_____" ] ], [ [ "def train(net_D, net_G, data_iter, num_epochs, lr, latent_dim,\n device=d2l.try_gpu()):\n loss = nn.BCEWithLogitsLoss(reduction='sum')\n for w in net_D.parameters():\n nn.init.normal_(w, 0, 0.02)\n for w in net_G.parameters():\n nn.init.normal_(w, 0, 0.02)\n net_D, net_G = net_D.to(device), net_G.to(device)\n trainer_hp = {'lr': lr, 'betas': [0.5,0.999]}\n trainer_D = torch.optim.Adam(net_D.parameters(), **trainer_hp)\n trainer_G = torch.optim.Adam(net_G.parameters(), **trainer_hp)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss',\n xlim=[1, num_epochs], nrows=2, figsize=(5, 5),\n legend=['discriminator', 'generator'])\n animator.fig.subplots_adjust(hspace=0.3)\n for epoch in range(1, num_epochs + 1):\n # Train one epoch\n timer = d2l.Timer()\n metric = d2l.Accumulator(3) # loss_D, loss_G, num_examples\n for X, _ in data_iter:\n batch_size = X.shape[0]\n Z = torch.normal(0, 1, size=(batch_size, latent_dim, 1, 1))\n X, Z = X.to(device), Z.to(device)\n metric.add(d2l.update_D(X, Z, net_D, net_G, loss, trainer_D),\n d2l.update_G(Z, net_D, net_G, loss, trainer_G),\n batch_size)\n # Show generated examples\n Z = torch.normal(0, 1, size=(21, latent_dim, 1, 1), device=device)\n # Normalize the synthetic data to N(0, 1)\n fake_x = net_G(Z).permute(0, 2, 3, 1) / 2 + 0.5\n imgs = torch.cat(\n [torch.cat([\n fake_x[i * 7 + j].cpu().detach() for j in range(7)], dim=1)\n for i in range(len(fake_x)//7)], dim=0)\n animator.axes[1].cla()\n animator.axes[1].imshow(imgs)\n # Show the losses\n loss_D, loss_G = metric[0] / metric[2], metric[1] / metric[2]\n animator.add(epoch, (loss_D, loss_G))\n print(f'loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, '\n f'{metric[2] / timer.stop():.1f} examples/sec on {str(device)}')", "_____no_output_____" ] ], [ [ "We train the model with a small number of epochs just for demonstration.\nFor better performance,\nthe variable `num_epochs` can be set to a larger number.\n", "_____no_output_____" ] ], [ [ "latent_dim, lr, num_epochs = 100, 0.005, 20\ntrain(net_D, net_G, data_iter, num_epochs, lr, latent_dim)", "loss_D 0.072, loss_G 6.780, 1069.5 examples/sec on cuda:0\n" ] ], [ [ "## Summary\n\n* DCGAN architecture has four convolutional layers for the Discriminator and four \"fractionally-strided\" convolutional layers for the Generator.\n* The Discriminator is a 4-layer strided convolutions with batch normalization (except its input layer) and leaky ReLU activations.\n* Leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the “dying ReLU” problem and helps the gradients flow easier through the architecture.\n\n\n## Exercises\n\n1. What will happen if we use standard ReLU activation rather than leaky ReLU?\n1. Apply DCGAN on Fashion-MNIST and see which category works well and which does not.\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/1083)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a6b6a3721b85f8939f4be047d79fd39ea58ec06
3,893
ipynb
Jupyter Notebook
hw2/tf_tests.ipynb
zhenjiezhang/RL
5fbd2e4f76d07aea1b63c4237caa152a741404ec
[ "MIT" ]
null
null
null
hw2/tf_tests.ipynb
zhenjiezhang/RL
5fbd2e4f76d07aea1b63c4237caa152a741404ec
[ "MIT" ]
null
null
null
hw2/tf_tests.ipynb
zhenjiezhang/RL
5fbd2e4f76d07aea1b63c4237caa152a741404ec
[ "MIT" ]
null
null
null
17
53
0.456717
[ [ [ "import tensorflow as tf", "_____no_output_____" ], [ "with tf.variable_scope(\"vs1\", reuse=True):\n tv1 = tf.get_variable(\"v1\", [1])\nwith tf.variable_scope(\"vs1\", reuse=True):\n tv2 = tf.get_variable(\"v1\")", "_____no_output_____" ], [ "assert tv1 == tv2", "_____no_output_____" ], [ "type(tv1)", "_____no_output_____" ], [ "with tf.name_scope(\"ns1\"):\n n1 = tf.Variable(0, name = \"vn1\")\nwith tf.name_scope(\"ns1\"):\n n3 = tf.get_variable(\"vs1\", [0])\n n2 = tf.Variable(0, \"vn1\")", "_____no_output_____" ], [ "type(n1)", "_____no_output_____" ], [ "n1.name", "_____no_output_____" ], [ "tv1.name", "_____no_output_____" ], [ "n1.op.name", "_____no_output_____" ], [ "l1 = tf.Variable(0, name = \"l1\")", "_____no_output_____" ], [ "l1.name", "_____no_output_____" ], [ "n3.name", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a6b6dfe41e8adcf2f586a1cfb257fd3b8b7fb67
1,019,078
ipynb
Jupyter Notebook
NEU/Tejas_Bawaskar _DL/Deep Network Visualization Diff weights (1).ipynb
ashwinjohn3/Deep_Learning
bc9f09bd218cb5a269a99f5f170a239c3ff35a6e
[ "MIT" ]
1
2020-06-03T15:07:12.000Z
2020-06-03T15:07:12.000Z
NEU/Tejas_Bawaskar _DL/Deep Network Visualization Diff weights (1).ipynb
KailashDN/Deep_Learning
bc9f09bd218cb5a269a99f5f170a239c3ff35a6e
[ "MIT" ]
null
null
null
NEU/Tejas_Bawaskar _DL/Deep Network Visualization Diff weights (1).ipynb
KailashDN/Deep_Learning
bc9f09bd218cb5a269a99f5f170a239c3ff35a6e
[ "MIT" ]
null
null
null
470.92329
94,234
0.915
[ [ [ "### Convolutional autoencoder\n\nSince our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better.\n\nLet's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers.", "_____no_output_____" ] ], [ [ "from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.models import Model\nfrom keras import backend as K\nimport numpy as np", "_____no_output_____" ], [ "from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.models import Model\nfrom keras import backend as K\n\ninput_img = Input(shape=(32, 32, 3)) # adapt this if using `channels_first` image data format\n\nx1 = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)\nx2 = MaxPooling2D((2, 2), padding='same')(x1)\nx3 = Conv2D(8, (6, 6), activation='relu', padding='same')(x2)\nx4 = MaxPooling2D((2, 2), padding='same')(x3)\nx5 = Conv2D(8, (9, 9), activation='relu', padding='same')(x4)\nencoded = MaxPooling2D((2, 2), padding='same')(x5)\n\n# at this point the representation is (4, 4, 8) i.e. 128-dimensional\n\nx6 = Conv2D(8, (9, 9), activation='relu', padding='same')(encoded)\nx7 = UpSampling2D((2, 2))(x6)\nx8 = Conv2D(8, (6, 6), activation='relu', padding='same')(x7)\nx9 = UpSampling2D((2, 2))(x8)\nx10 = Conv2D(16, (3, 3), activation='relu', padding='same')(x9)\nx11 = UpSampling2D((2, 2))(x10)\ndecoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x11)\n\nautoencoder = Model(input_img, decoded)\nautoencoder.compile(optimizer='adagrad', loss='binary_crossentropy')", "_____no_output_____" ], [ "from keras.datasets import cifar10\nimport numpy as np\n\n(x_train, _), (x_test, _) = cifar10.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = np.reshape(x_train, (len(x_train), 32, 32, 3)) # adapt this if using `channels_first` image data format\nx_test = np.reshape(x_test, (len(x_test), 32, 32, 3)) # adapt this if using `channels_first` image data format", "_____no_output_____" ], [ "autoencoder.fit(x_train, x_train,\n epochs=50,\n batch_size=128,\n shuffle=True,\n validation_data=(x_test, x_test))", "Train on 50000 samples, validate on 10000 samples\nEpoch 1/50\n50000/50000 [==============================] - 148s - loss: 0.6373 - val_loss: 0.6144\nEpoch 2/50\n50000/50000 [==============================] - 148s - loss: 0.6095 - val_loss: 0.6055\nEpoch 3/50\n50000/50000 [==============================] - 149s - loss: 0.6035 - val_loss: 0.6015\nEpoch 4/50\n50000/50000 [==============================] - 149s - loss: 0.5995 - val_loss: 0.5982\nEpoch 5/50\n50000/50000 [==============================] - 148s - loss: 0.5964 - val_loss: 0.5952\nEpoch 6/50\n50000/50000 [==============================] - 149s - loss: 0.5939 - val_loss: 0.5937\nEpoch 7/50\n50000/50000 [==============================] - 147s - loss: 0.5918 - val_loss: 0.5911\nEpoch 8/50\n50000/50000 [==============================] - 147s - loss: 0.5904 - val_loss: 0.5906\nEpoch 9/50\n50000/50000 [==============================] - 149s - loss: 0.5894 - val_loss: 0.5897\nEpoch 10/50\n50000/50000 [==============================] - 147s - loss: 0.5887 - val_loss: 0.5891\nEpoch 11/50\n50000/50000 [==============================] - 148s - loss: 0.5879 - val_loss: 0.5890\nEpoch 12/50\n50000/50000 [==============================] - 149s - loss: 0.5873 - val_loss: 0.5877\nEpoch 13/50\n50000/50000 [==============================] - 148s - loss: 0.5867 - val_loss: 0.5870\nEpoch 14/50\n50000/50000 [==============================] - 148s - loss: 0.5862 - val_loss: 0.5868\nEpoch 15/50\n50000/50000 [==============================] - 148s - loss: 0.5857 - val_loss: 0.5858\nEpoch 16/50\n50000/50000 [==============================] - 149s - loss: 0.5852 - val_loss: 0.5861\nEpoch 17/50\n50000/50000 [==============================] - 148s - loss: 0.5849 - val_loss: 0.5859\nEpoch 18/50\n50000/50000 [==============================] - 149s - loss: 0.5845 - val_loss: 0.5848\nEpoch 19/50\n50000/50000 [==============================] - 150s - loss: 0.5842 - val_loss: 0.5846\nEpoch 20/50\n50000/50000 [==============================] - 149s - loss: 0.5838 - val_loss: 0.5857\nEpoch 21/50\n50000/50000 [==============================] - 148s - loss: 0.5836 - val_loss: 0.5838\nEpoch 22/50\n50000/50000 [==============================] - 148s - loss: 0.5832 - val_loss: 0.5845\nEpoch 23/50\n50000/50000 [==============================] - 149s - loss: 0.5830 - val_loss: 0.5834\nEpoch 24/50\n50000/50000 [==============================] - 149s - loss: 0.5827 - val_loss: 0.5833\nEpoch 25/50\n50000/50000 [==============================] - 149s - loss: 0.5824 - val_loss: 0.5830\nEpoch 26/50\n50000/50000 [==============================] - 149s - loss: 0.5822 - val_loss: 0.5832\nEpoch 27/50\n50000/50000 [==============================] - 148s - loss: 0.5819 - val_loss: 0.5824\nEpoch 28/50\n50000/50000 [==============================] - 149s - loss: 0.5818 - val_loss: 0.5821\nEpoch 29/50\n50000/50000 [==============================] - 148s - loss: 0.5816 - val_loss: 0.5834\nEpoch 30/50\n50000/50000 [==============================] - 148s - loss: 0.5814 - val_loss: 0.5818\nEpoch 31/50\n50000/50000 [==============================] - 149s - loss: 0.5813 - val_loss: 0.5820\nEpoch 32/50\n50000/50000 [==============================] - 148s - loss: 0.5811 - val_loss: 0.5821\nEpoch 33/50\n50000/50000 [==============================] - 149s - loss: 0.5809 - val_loss: 0.5813\nEpoch 34/50\n50000/50000 [==============================] - 148s - loss: 0.5808 - val_loss: 0.5821\nEpoch 35/50\n50000/50000 [==============================] - 149s - loss: 0.5806 - val_loss: 0.5814\nEpoch 36/50\n50000/50000 [==============================] - 149s - loss: 0.5805 - val_loss: 0.5811\nEpoch 37/50\n50000/50000 [==============================] - 149s - loss: 0.5804 - val_loss: 0.5809\nEpoch 38/50\n50000/50000 [==============================] - 149s - loss: 0.5802 - val_loss: 0.5807\nEpoch 39/50\n50000/50000 [==============================] - 149s - loss: 0.5802 - val_loss: 0.5810\nEpoch 40/50\n50000/50000 [==============================] - 149s - loss: 0.5800 - val_loss: 0.5811\nEpoch 41/50\n50000/50000 [==============================] - 148s - loss: 0.5799 - val_loss: 0.5806\nEpoch 42/50\n50000/50000 [==============================] - 148s - loss: 0.5799 - val_loss: 0.5810\nEpoch 43/50\n50000/50000 [==============================] - 148s - loss: 0.5797 - val_loss: 0.5805\nEpoch 44/50\n50000/50000 [==============================] - 148s - loss: 0.5796 - val_loss: 0.5804\nEpoch 45/50\n50000/50000 [==============================] - 149s - loss: 0.5796 - val_loss: 0.5807\nEpoch 46/50\n50000/50000 [==============================] - 150s - loss: 0.5795 - val_loss: 0.5801\nEpoch 47/50\n50000/50000 [==============================] - 173s - loss: 0.5794 - val_loss: 0.5800\nEpoch 48/50\n50000/50000 [==============================] - 180s - loss: 0.5793 - val_loss: 0.5804\nEpoch 49/50\n50000/50000 [==============================] - 181s - loss: 0.5793 - val_loss: 0.5806\nEpoch 50/50\n50000/50000 [==============================] - 180s - loss: 0.5792 - val_loss: 0.5800\n" ], [ "from keras.models import load_model\n\n#autoencoder.save('cifar10_autoencoders.h5') # creates a HDF5 file 'my_model.h5'\n#del model # deletes the existing model.\n", "Using TensorFlow backend.\n" ], [ "# returns a compiled model\n# identical to the previous one\nautoencoder = load_model('cifar10_autoencoders.h5')", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\ndecoded_imgs = autoencoder.predict(x_test)\n\nn = 10\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(x_test[i].reshape(32, 32, 3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \n # display reconstruction\n ax = plt.subplot(2, n, i + n + 1)\n plt.imshow(decoded_imgs[i].reshape(32, 32, 3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()", "_____no_output_____" ] ], [ [ "### Plotting the weights from the first layer", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nn = 8\n\nfor i in range(n):\n fig = plt.figure(figsize=(1,1))\n conv_1 = np.asarray(autoencoder.layers[1].get_weights())[0][:,:,0,i]\n ax = fig.add_subplot(111)\n plt.imshow(conv_1.transpose(), cmap = 'gray')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()", "_____no_output_____" ], [ "autoencoder.layers[3].get_weights()", "_____no_output_____" ], [ "from keras import backend as K", "_____no_output_____" ], [ "# K.learning_phase() is a flag that indicates if the network is in training or\n# predict phase. It allow layer (e.g. Dropout) to only be applied during training\ninputs = [K.learning_phase()] + autoencoder.inputs\n\n_layer1_f = K.function(inputs, [x2])\ndef convout1_f(X):\n # The [0] is to disable the training phase flag\n return _layer1_f([0] + [X])\n\n#_lay_f = K.function(inputs, [x1])\n#def convout1_f(X):\n # The [0] is to disable the training phase flag\n# return _layer1_f([0] + [X])\n\n_layer2_f = K.function(inputs, [x4])\ndef convout2_f(X):\n # The [0] is to disable the training phase flag\n return _layer2_f([0] + [X])\n\n_layer3_f = K.function(inputs, [encoded])\ndef convout3_f(X):\n # The [0] is to disable the training phase flag\n return _layer3_f([0] + [X])\n\n_up_layer1_f = K.function(inputs, [x6])\ndef convout4_f(X):\n # The [0] is to disable the training phase flag\n return _up_layer1_f([0] + [X])\n\n_up_layer2_f = K.function(inputs, [x8])\ndef convout5_f(X):\n # The [0] is to disable the training phase flag\n return _up_layer2_f([0] + [X])\n\n_up_layer3_f = K.function(inputs, [x10])\ndef convout6_f(X):\n # The [0] is to disable the training phase flag\n return _up_layer3_f([0] + [X])\n\n_up_layer4_f = K.function(inputs, [decoded])\ndef convout7_f(X):\n # The [0] is to disable the training phase flag\n return _up_layer4_f([0] + [X])\n", "_____no_output_____" ], [ "x2", "_____no_output_____" ], [ "i = 1\nx = x_test[i:i+1]", "_____no_output_____" ] ], [ [ "### Visualizing the first convnet/output layer_1 with sample first test image", "_____no_output_____" ] ], [ [ "np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0).shape", "_____no_output_____" ], [ "#Plotting conv_1\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(4, 4, figsize=(5, 5))\n for i in range(4):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k], cmap = 'gray')\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ], [ "check.shape", "_____no_output_____" ] ], [ [ "### Visualizing the second convnet/output layer_2 with sample test image", "_____no_output_____" ] ], [ [ "i = 3\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout2_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plotting conv_2\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(2, 4, figsize=(5, 5))\n for i in range(2):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k])\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Plotting the third convnet/output layer_3 with sample test image", "_____no_output_____" ] ], [ [ "i = 3\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout3_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plotting conv_3\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(2, 4, figsize=(5, 5))\n for i in range(2):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k])\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Visualizing the fourth convnet/decoded/output layer_4 with sample test image", "_____no_output_____" ] ], [ [ "i = 3\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout4_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plotting conv_4\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(2, 4, figsize=(5, 5))\n for i in range(2):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k])\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Visualizing the fifth convnet/decoded/output layer_5 with sample test image", "_____no_output_____" ] ], [ [ "i = 3\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout5_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plotting conv_5\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(2, 4, figsize=(5, 5))\n for i in range(2):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k])\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Visualizing the sixth convnet/decoded/output layer_6 with sample test image", "_____no_output_____" ] ], [ [ "i = 3\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout6_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plotting conv_6\nfor i in range(4):\n #i = 3\n x = x_test[i:i+1]\n check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0)\n\n temp = x[0,:,:,:]\n fig, axes = plt.subplots(1, 1, figsize=(3, 3))\n plt.imshow(temp)\n plt.show()\n\n\n k = 0\n while k < check.shape[2]:\n #plt.figure()\n #plt.subplot(231 + i)\n fig, axes = plt.subplots(4, 4, figsize=(5, 5))\n for i in range(4):\n for j in range(4):\n axes[i,j].imshow(check[:,:,k])\n k += 1\n #axes[0, 0].imshow(R, cmap='jet')\n #plt.imshow(check[:,:,i])\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Visualizing the final decoded/output layer with sample test image", "_____no_output_____" ] ], [ [ "i = 1\nx = x_test[i:i+1]\ncheck = np.squeeze(np.squeeze(np.array(convout7_f(x)),0),0)\ncheck.shape", "_____no_output_____" ], [ "#Plot final decoded layer\n\ndecoded_imgs = autoencoder.predict(x_test)\n\nn = 4\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(x_test[i].reshape(32, 32, 3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + n + 1)\n plt.imshow(decoded_imgs[i].reshape(32, 32, 3))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a6bac92890befbdbc88ab9b49b89b314db9f885
404,931
ipynb
Jupyter Notebook
nb/03_Numerical_Integration.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
null
null
null
nb/03_Numerical_Integration.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
null
null
null
nb/03_Numerical_Integration.ipynb
shishitao/boffi_dynamics
365f16d047fb2dbfc21a2874790f8bef563e0947
[ "MIT" ]
2
2019-06-23T12:32:39.000Z
2021-08-15T18:33:55.000Z
44.057339
298
0.479822
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]