repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
donsheehy/VoronoiWall
[ "4d85dfa3c1b3ae302f841fb16204c15eaa8398e4" ]
[ "VoronoiWall/deprecated/model.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import Delaunay\nfrom scipy.spatial import Voronoi\nfrom scipy.spatial import ConvexHull\nfrom math import sqrt, ceil\nfrom matplotlib.widgets import Button\nfrom matplotlib.widgets import TextBox\nimport pickle\nfrom util.math_utils import barycenter\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport halfedge\nimport diagram\n\n\"\"\"\nGenerates an STL file for 3D printing a Voronoi diagram. (eventually)\n\"\"\"\nclass DelaunayTris:\n def __init__(self, points=[]):\n self.points = points\n self.cells = [] #Faces for halfedge data structure\n\n self.target_point = -1\n\n self.fig = plt.figure()\n self.ax = self.fig.gca(projection=\"3d\")\n plt.subplots_adjust(bottom=0.2)\n # self.cid_press = self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)\n # self.cid_release = self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)\n # self.cid_motion = self.ax.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)\n\n axprepare = plt.axes([0.7, 0.05, 0.15, 0.075])\n axsave = plt.axes([0.5, 0.05, 0.15, 0.075])\n axopen = plt.axes([0.3, 0.05, 0.15, 0.075])\n axfile_name = plt.axes([0.05, 0.05, 0.15, 0.075])\n\n self.bprepare = Button(axprepare, 'Prepare stl')\n self.bprepare.on_clicked(self.prepare)\n\n self.bsave = Button(axsave, 'Save Points')\n self.bsave.on_clicked(self.save_points)\n\n self.bopen = Button(axopen, 'Open Points')\n self.bopen.on_clicked(self.open_points)\n\n self.points_file = \"points.p\"\n self.textbox_file_name = TextBox(axfile_name, \"\", initial=\"points.p\")\n self.textbox_file_name.on_text_change(self.update_file_name)\n\n self.triangulate_vis()\n\n def load_points(self, points):\n \"\"\"Function to load and display an array of points\"\"\"\n if len(points) == 0:\n print(\"No points provided!\")\n return\n print(\"Loading \" + str(len(points)) + \" \" + str(len(points[0])) + \" dimensional points\")\n self.points = points\n self.triangulate_vis()\n\n def open_points(self, event):\n file_points = pickle.load(open(self.points_file, \"rb\"))\n self.load_points(file_points)\n\n def save_points(self, event):\n pickle.dump(self.points, open(self.points_file, \"wb+\"))\n\n\n def update_file_name(self, text):\n self.points_file = text\n\n def prepare(self, event, filePath=\"out.stl\"):\n \"\"\"\n Prepares a 3D printable model of the given Voronoi diagram.\n\n :param voronoi: computed Voronoi attributes\n :return: unused\n \"\"\"\n self.subdivideFace(0)\n print(\"Preparing\")\n # points = np.array([[6, 4, 2], [9, 5, 8], [9, 1, 9], [8, 9, 1], [3, 8, 8], [2, 6, 2], [8, 2, 10], [3, 6, 1], [9, 8, 9],\n # [7, 7, 4],\n # [2, 10, 5], [4, 3, 10], [5, 3, 9], [4, 7, 4], [3, 6, 7], [7, 4, 3], [6, 4, 9], [5, 8, 4], [2, 9, 10],\n # [7, 8, 6], [9, 2, 7], [6, 10, 7], [9, 9, 3], [2, 9, 4], [5, 9, 6], [4, 8, 9], [9, 1, 2], [6, 9, 1],\n # [10, 6, 5], [1, 9, 9], [2, 1, 3], [10, 1, 5], [4, 10, 2]])\n\n\n output = open(filePath, \"w\")\n output.write(\"solid Voronoi\\n\")\n faces = []\n for indexList in self.voronoi.ridge_vertices:\n if -1 not in indexList:\n face = []\n for index in indexList:\n face.append(self.voronoi.vertices[index])\n faces.append(np.asarray(face))\n # I'm thinking order could be important for the triangle vertices and is being lost?\n for face in faces:\n triangles = self.triangulate(face)\n # compute a normal vector for this face\n normal = np.cross(face[1] - face[0], face[2] - face[1])\n # process points in batches of 3 (points of a triangle)\n for i in range(0, len(triangles), 3):\n # begin a new STL triangle\n output.write(\"facet normal {} {} {}\\n\".format(normal[0], normal[1], normal[2]))\n output.write(\"outer loop\\n\")\n trianglePoints = triangles[i:i + 3]\n print(trianglePoints)\n for j in range(0, 3):\n output.write(\"vertex {} {} {}\\n\".format(trianglePoints[j][0], trianglePoints[j][1], trianglePoints[j][2]))\n output.write(\"endloop\\nendfacet\\n\")\n\n output.write(\"endsolid Voronoi\\n\")\n\n def triangulate(self, points):\n \"\"\"\n Splits a 3D planar facet into triangles.\n :param points: vertex coordinates for a planar face in 3D\n :return: vertices of the divided plane\n \"\"\"\n # move all points by this much so the shape has to be touching the origin\n average_point = np.zeros((1,3))\n for point in points:\n average_point += point\n average_point /= len(points)\n return np.append(points, average_point, axis=0)\n\n def subdivideOnce(self, points):\n \"\"\"\n Given the vertices of a 2D shape located in the XY coordinate plane, subdivides the inner area into triangular\n shapes (necessary for 3D printing) using the Delaunay triangulation.\n\n :param points: a numpy array of input points; this array is modified in-place\n :return: unused\n \"\"\"\n\n from scipy.spatial import Delaunay\n\n triangulation = Delaunay(points)\n trianglePoints = []\n for indexList in triangulation.simplices:\n for index in indexList:\n trianglePoints.append(points[index])\n return trianglePoints\n\n def chopOffThirdDimension(self, npArrayOf3DPoints):\n return np.delete(npArrayOf3DPoints, 2, 1)\n\n\n def addEmptyThirdDimension(self, npArrayOf2DPoints):\n return np.insert(npArrayOf2DPoints, 2, values=0, axis=1)\n\n\n def rotateToPlane(self, points, normalVectorOriginal, normalVectorNew, isAtOrigin=True, offset=np.array([0, 0, 0])):\n \"\"\"\n Rotates a shape defined by its vertices about a defined axis. Useful for putting a planar shape located in 3D into\n a coordinate plane or restoring it to its original location in 3D space.\n\n :param points: list of points to rotate about an axis\n :param normalVectorOriginal: vector (as numpy array) which is normal to the original plane\n :param normalVectorNew: vector (as numpy array) which is normal to the desired plane\n :param isAtOrigin: True if the shape defined by the given points is located at the origin\n :param offset: a vector (as numpy array) offset which is either subtracted from the given points or\n added to the resulting points if isAtOrigin is False or True\n :return: new numpy array of points rotated about the defined axis\n \"\"\"\n from math import sqrt\n if not isAtOrigin:\n # translate points by the offset, typically moving the shape to the origin\n points = points - offset\n M = normalVectorOriginal\n N = normalVectorNew\n # compute costheta using the geometric dot product\n costheta = np.dot(M, N) / (np.linalg.norm(M) * np.linalg.norm(N))\n # cross the two axis vectors, make the result a unit vector\n mncross = np.cross(M, N)\n axis = mncross / np.linalg.norm(mncross)\n # shorten variable names (s = sintheta)\n c = costheta\n s = sqrt(1 - c * c)\n C = 1 - c\n [x, y, z] = axis\n\n # rotation matrix via https://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle\n rmat = np.array([[x * x * C + c, x * y * C - z * s, x * z * C + y * s],\n [y * x * C + z * s, y * y * C + c, y * z * C - x * s],\n [z * x * C - y * s, z * y * C + x * s, z * z * C + c]])\n\n if isAtOrigin:\n # rotate all of the points and then move the shape back to its original location\n return list(map(lambda point: np.dot(rmat, point) + offset, points))\n else:\n # rotate all of the points; will only work correctly if the shape is at the origin\n return list(map(lambda point: np.dot(rmat, point), points))\n\n\n def triangulate_vis(self):\n #self.subdivideFace(self.points)\n self.plotVoronoi(self.points)\n\n\n def subdivideFace(self, face_index):\n \"\"\"\n Given the index of a 3D face located in self.faces, subdivides the inner area into triangular\n shapes (necessary for 3D printing)\n :param points: a numpy array of input points; this array is modified in-place\n :return: array of tris\n \"\"\"\n\n face = self.faces[face_index]\n verts = face.getVertices()\n\n print(verts)\n center = barycenter(verts)\n\n tris = []\n for i in range(len(face.halfedges)):\n cur_point = face.halfedges[i].vertex\n next_point = face.halfedges[i].next.vertex\n tris.append([cur_point.location, next_point.location, center])\n\n print(tris)\n return\n triangulation = Delaunay(points)\n\n trianglePoints = []\n for indexList in triangulation.simplices:\n for index in indexList:\n trianglePoints.append(points[index])\n\n points = trianglePoints\n return trianglePoints\n\n\n\n def plotVoronoi(self, points):\n \"\"\"\n Display the subdivided face in 2D with matplotlib.\n\n Adapted from: https://stackoverflow.com/a/24952758\n :param points: points to plot, connecting them by simultaneously visualizing the Delaunary triangulation\n :return: unused\n \"\"\"\n\n self.cells = []\n self.voronoi = Voronoi(points)\n\n vertices = []\n for i in range(len(self.voronoi.vertices)):\n location = [self.voronoi.vertices[i, 0],self.voronoi.vertices[i, 1],self.voronoi.vertices[i, 2]]\n vertices.append(halfedge.vertex(location=location))\n\n for r in range(len(self.voronoi.regions)):\n #self.regions.append(halfedge.face())\n cell = halfedge.cell()\n faces = []\n region = self.voronoi.regions[r]\n region_points = []\n region_point_indices = []\n\n for index in region:\n if index == -1 or index >= len(vertices):\n break\n region_points.append(vertices[index].location)\n region_point_indices.append(vertices[index])\n\n if len(region_points) != len(region) or len(region) < 3:\n continue\n\n hull = ConvexHull(region_points)\n for simplex in hull.simplices:\n face = halfedge.face()\n\n edges = []\n for i in range(len(simplex)):\n edges.append(halfedge.halfedge(vertex=vertices[simplex[i]], face=face))\n if i > 0:\n edges[i].previous = edges[i-1] #Previous edge is edge before this one in the list\n edges[i-1].next = edges[i] #This edge is the next edge for the one before\n edges[i-1].vertex.halfedge = edges[i] #This edge is the outgoing edge for the last vertex\n if i == len(simplex)-1:\n edges[0].previous = edges[len(simplex)-1]\n edges[len(simplex)-1].next = edges[0]\n edges[len(simplex)-1].vertex.halfedge = edges[0]\n face.halfedges = edges\n faces.append(face)\n cell.faces = faces\n self.cells.append(cell)\n print(len(self.cells[0].faces))\n\n # face = halfedge.face()\n #\n # edges = []\n # for i in range(len(region)):\n # vertex_index = region[i]\n # if vertex_index == -1:\n # break\n #\n # edges.append(halfedge.halfedge(vertex=vertices[vertex_index], face=face))\n # if i > 0:\n # edges[i].previous = edges[i-1] #Previous edge is edge before this one in the list\n # edges[i-1].next = edges[i] #This edge is the next edge for the one before\n # edges[i-1].vertex.halfedge = edges[i] #This edge is the outgoing edge for the last vertex\n # if i == len(region)-1:\n # edges[0].previous = edges[len(region)-1]\n # edges[len(region)-1].next = edges[0]\n # edges[len(region)-1].vertex.halfedge = edges[0]\n #\n # if len(edges) < len(region):\n # continue\n # else:\n # face.halfedges = edges\n # self.cells.append(face)\n #\n #\n # #Algorithm to fill in the opposite field for halfedges\n # for f in range(len(self.cells)): #Loop through every face\n # halfedges = self.faces[f].halfedges #Get the halfedges that make up the face\n # for edge in halfedges: #Do this for every halfedge\n # if edge.opposite == None: #only if the edge doesn't have an opposite\n # next = edge.next #Get the next edge\n # vertex_next_edges = edge.vertex.halfedges #List of outgoing edges from next vertex\n # for vertex_next_edge in vertex_next_edges: #loop through these outgoing edges\n # if not(vertex_next_edge == next) and vertex_next_edge.vertex == edge.previous.vertex: #if the outgoing edge isn't the next halfedge and it goes\n # #into the same vertex that the current edge originates from\n # edge.opposite = vertex_next_edge #Set the opposite of the current edge\n # vertex_next_edge.opposite = edge #to the outgoing edge\n\n\n\n\n self.ax.clear()\n\n #PLOTTING FROM SCIPY VORONOI DATA STRUCTURE\n self.voronoi_points = self.ax.scatter(self.voronoi.points[:,0],self.voronoi.points[:,1],self.voronoi.points[:,2])\n self.ax.scatter(self.voronoi.vertices[:,0],self.voronoi.vertices[:,1],self.voronoi.vertices[:,2], 'r')\n\n for i in range(len(self.voronoi.ridge_vertices)):\n points = np.zeros((0,3))\n for j in range(len(self.voronoi.ridge_vertices[i])):\n index = self.voronoi.ridge_vertices[i][j]\n if index >= 0:\n points = np.append(points, np.array([[self.voronoi.vertices[index, 0],self.voronoi.vertices[index, 1],self.voronoi.vertices[index, 2]]]), axis=0)\n if len(points) > 1:\n self.ax.plot(points[:, 0], points[:, 1], points[:, 2], 'b')\n\n #PLOTTING FROM HALFEDGE DATA STRUCTURE\n for cell in self.cells:\n print(len(cell.faces))\n for face in cell.faces:\n if len(face.halfedges)<1:\n continue\n start_edge = face.halfedges[-1]\n cur_edge = start_edge.next\n while True:\n locations = np.array([cur_edge.previous.vertex.location, cur_edge.vertex.location])\n self.ax.plot(locations[:,0], locations[:,1], locations[:,2], 'go-')\n\n if cur_edge == start_edge:\n break\n cur_edge = cur_edge.next\n\n plt.show()\n\n def on_press(self, event):\n print(event.inaxes)\n print(event.xdata)\n print(event.ydata)\n print(self.voronoi_points.axes)\n if event.inaxes != self.voronoi_points.axes:\n print(\"Not in axis!\")\n return\n print(self.target_point)\n min_dist_squared = 1e10\n close_point = -1;\n for i in range(len(self.points)):\n dist=(event.xdata - self.points[i][0])*(event.xdata - self.points[i][0]) + (event.ydata - self.points[i][1])*(event.ydata - self.points[i][1])\n if dist < min_dist_squared:\n min_dist_squared = dist\n close_point = i\n if sqrt(min_dist_squared)<0.1:\n self.target_point = close_point\n print(self.target_point)\n else:\n self.target_point = -1\n print(self.target_point)\n\n\n def on_release(self, event):\n if event.inaxes != self.voronoi_points.axes:\n print(\"Not in axis!\")\n return\n if self.target_point == -1:\n self.add_point(event)\n else:\n self.points[self.target_point] = [event.xdata, event.ydata, 0]\n self.target_point = -1\n\n self.triangulate_vis()\n\n def on_motion(self, event):\n print(self.target_point)\n if event.inaxes != self.voronoi_points.axes:\n return\n if self.target_point >= 0:\n self.points[self.target_point] = [event.xdata, event.ydata, 0]\n self.triangulate_vis()\n\n\n def add_point(self, event):\n \"\"\"\n Function to add a point on a mouse click\n :param event:\n :return:\n \"\"\"\n\n self.points = np.append(points, [[event.xdata, event.ydata, 0]], axis=0)\n print(self.points)\n\n self.triangulate_vis()\n\n\nif __name__==\"__main__\":\n points = np.array([[6, 4, 2], [9, 5, 8], [9, 1, 9], [8, 9, 1], [3, 8, 8], [2, 6, 2], [8, 2, 10], [3, 6, 1], [9, 8, 9],\n [7, 7, 4],\n [2, 10, 5], [4, 3, 10], [5, 3, 9], [4, 7, 4], [3, 6, 7], [7, 4, 3], [6, 4, 9], [5, 8, 4], [2, 9, 10],\n [7, 8, 6], [9, 2, 7], [6, 10, 7], [9, 9, 3], [2, 9, 4], [5, 9, 6], [4, 8, 9], [9, 1, 2], [6, 9, 1],\n [10, 6, 5], [1, 9, 9], [2, 1, 3], [10, 1, 5], [4, 10, 2]])\n tris = DelaunayTris(points=points)\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.widgets.Button", "numpy.delete", "matplotlib.widgets.TextBox", "numpy.zeros", "scipy.spatial.Voronoi", "numpy.dot", "numpy.linalg.norm", "numpy.asarray", "matplotlib.pyplot.figure", "matplotlib.pyplot.axes", "numpy.append", "numpy.insert", "matplotlib.pyplot.show", "scipy.spatial.ConvexHull", "matplotlib.pyplot.subplots_adjust", "scipy.spatial.Delaunay", "numpy.cross" ] ]
MoonBlvd/pytorch-i3d
[ "3804ab2e1df018619cd12342dff7976bb302058e" ]
[ "pytorch_c3d.py" ]
[ "import torch\nimport torch.nn as nn\n# from mypath import Path\nimport pdb\nclass C3D(nn.Module):\n \"\"\"\n The C3D network.\n \"\"\"\n\n def __init__(self, num_classes, pretrained=False):\n super(C3D, self).__init__()\n\n self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))\n\n self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\n\n self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\n\n self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\n\n self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\n self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))\n \n self.pool_output_size = 8192\n # self.pool_output_size = 512 * 6 * 11\n self.fc6 = nn.Linear(self.pool_output_size, 4096)\n self.fc7 = nn.Linear(4096, 4096)\n self.fc8 = nn.Linear(4096, num_classes)\n\n self.dropout = nn.Dropout(p=0.5)\n\n self.relu = nn.ReLU()\n\n self.__init_weight()\n\n if pretrained:\n self.__load_pretrained_weights()\n\n def forward(self, x, extract_features=False):\n\n x = self.relu(self.conv1(x))\n x = self.pool1(x)\n\n x = self.relu(self.conv2(x))\n x = self.pool2(x)\n\n x = self.relu(self.conv3a(x))\n x = self.relu(self.conv3b(x))\n x = self.pool3(x)\n\n x = self.relu(self.conv4a(x))\n x = self.relu(self.conv4b(x))\n x = self.pool4(x)\n\n x = self.relu(self.conv5a(x))\n x = self.relu(self.conv5b(x))\n x = self.pool5(x)\n x = x.view(-1, self.pool_output_size)\n x = self.fc6(x)\n if extract_features:\n return x\n x = self.relu(x)\n x = self.dropout(x)\n x = self.relu(self.fc7(x))\n x = self.dropout(x)\n \n logits = self.fc8(x)\n\n return logits\n\n def __load_pretrained_weights(self):\n \"\"\"Initialiaze network.\"\"\"\n corresp_name = {\n # Conv1\n \"features.0.weight\": \"conv1.weight\",\n \"features.0.bias\": \"conv1.bias\",\n # Conv2\n \"features.3.weight\": \"conv2.weight\",\n \"features.3.bias\": \"conv2.bias\",\n # Conv3a\n \"features.6.weight\": \"conv3a.weight\",\n \"features.6.bias\": \"conv3a.bias\",\n # Conv3b\n \"features.8.weight\": \"conv3b.weight\",\n \"features.8.bias\": \"conv3b.bias\",\n # Conv4a\n \"features.11.weight\": \"conv4a.weight\",\n \"features.11.bias\": \"conv4a.bias\",\n # Conv4b\n \"features.13.weight\": \"conv4b.weight\",\n \"features.13.bias\": \"conv4b.bias\",\n # Conv5a\n \"features.16.weight\": \"conv5a.weight\",\n \"features.16.bias\": \"conv5a.bias\",\n # Conv5b\n \"features.18.weight\": \"conv5b.weight\",\n \"features.18.bias\": \"conv5b.bias\",\n # # fc6\n # \"classifier.0.weight\": \"fc6.weight\",\n # \"classifier.0.bias\": \"fc6.bias\",\n # # fc7\n # \"classifier.3.weight\": \"fc7.weight\",\n # \"classifier.3.bias\": \"fc7.bias\",\n }\n\n p_dict = torch.load('models/c3d-pretrained.pth')#Path.model_dir()\n s_dict = self.state_dict()\n # pdb.set_trace()\n for name in p_dict:\n if name not in corresp_name:\n continue\n s_dict[corresp_name[name]] = p_dict[name]\n # pdb.set_trace()\n self.load_state_dict(s_dict)\n\n def __init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\ndef get_1x_lr_params(model):\n \"\"\"\n This generator returns all the parameters for conv and two fc layers of the net.\n \"\"\"\n b = [model.conv1, model.conv2, model.conv3a, model.conv3b, model.conv4a, model.conv4b,\n model.conv5a, model.conv5b, model.fc6, model.fc7]\n for i in range(len(b)):\n for k in b[i].parameters():\n if k.requires_grad:\n yield k\n\ndef get_10x_lr_params(model):\n \"\"\"\n This generator returns all the parameters for the last fc layer of the net.\n \"\"\"\n b = [model.fc8]\n for j in range(len(b)):\n for k in b[j].parameters():\n if k.requires_grad:\n yield k\n\nif __name__ == \"__main__\":\n inputs = torch.rand(1, 3, 16, 112, 112)\n net = C3D(num_classes=101, pretrained=True)\n\n outputs = net.forward(inputs)\n print(outputs.size())" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.nn.Dropout", "torch.nn.MaxPool3d", "torch.nn.init.kaiming_normal_", "torch.nn.ReLU", "torch.nn.Conv3d", "torch.load" ] ]
sefabolge/Clustering-of-Large-Unlabeled-Dataset
[ "c208993f7fe1a50f6758ed57141594cb54239a14" ]
[ "Plotting/willingness_plot.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mtl\ndata = [];\ncolorDict = {0: 'blue', 1: 'red', 2: 'green', 3: 'orange' }\nactivity = 0\nlang = 0\ncluster = 0\n\ndef return_colour( cluster ):\n ret = []\n for i in cluster:\n ret.append(str(colorDict[i]))\n return ret\n\n#File should be your analysis file.. \n#file = open('willingness_analysis.csv', 'r')\nfile = open('willingness_analysis_mllib.csv', 'r')\nfor line in file:\n l = line.split(',')\n cluster = int(l[2])\n #userId = int(l[1])\n reputation = int(l[0])\n totalScore = int(l[1])\n data.append([reputation, totalScore, cluster])\n\nimport matplotlib.patches as mpatches\npatch0 = mpatches.Patch(color=colorDict[0], label='Cluster 0')\npatch1 = mpatches.Patch(color=colorDict[1], label='Cluster 1')\npatch2 = mpatches.Patch(color=colorDict[2], label='Cluster 2')\npatch3 = mpatches.Patch(color=colorDict[3], label='Cluster 3')\n\n#tData = list(zip(*data))\n\n#colourList = return_colour(tData[2])\n\nfig, ax = plt.subplots()\n#ax.scatter(tData[1], tData[0],s=1, color=colourList )\nax.set_xlabel('Reputation')\nax.set_ylabel('Answer Count')\n\nfig.suptitle('Willingness analysis in StackOverFlow')\nfig.subplots_adjust(wspace=0.09) \nfig.legend( [patch0, patch1, patch2, patch3],['Cluster 0', 'Cluster 1','Cluster 2', 'Cluster 3'], loc = 'lower right', ncol=2, labelspacing=0. )\nplt.ylim(-10, 23000)\nplt.xlim(-10, 800000)\n#plt.show()\nc0 = filter(lambda l: l[2] == 0,data)\nc1 = filter(lambda l: l[2] == 1,data)\nc2 = filter(lambda l: l[2] == 2,data)\nc3 = filter(lambda l: l[2] == 3,data)\ntotal = data.__len__()\n\nprint('Cluster 0 Size: '+ str(c0.__len__()) + '('+ str(c0.__len__()*100.00/total) +'%)')\nprint('Cluster 1 Size: '+ str(c1.__len__()) + '('+ str(c1.__len__()*100.00/total) +'%)')\nprint('Cluster 2 Size: '+ str(c2.__len__()) + '('+ str(c2.__len__()*100.00/total) +'%)')\nprint('Cluster 3 Size: '+ str(c3.__len__()) + '('+ str(c3.__len__()*100.00/total) +'%)')\nprint('Total Size: '+ str(total))\n" ]
[ [ "matplotlib.patches.Patch", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots" ] ]
nicolalandro/autovc
[ "3dd7489075c0f24400d188b689fe0b170e755e46" ]
[ "test_audio.py" ]
[ "import librosa\nimport numpy as np\nimport pyloudnorm as pyln\nfrom univoc import Vocoder\nimport torch\nimport soundfile as sf\nfrom librosa.filters import mel\nfrom scipy import signal\nfrom scipy.signal import get_window\nfrom numpy.random import RandomState\nfrom collections import OrderedDict\nfrom math import ceil\n\nfrom model_bl import D_VECTOR\nfrom model_vc import Generator\n\n# Melc vocoder\ndef melspectrogram(\n wav,\n sr=16000,\n hop_length=200,\n win_length=800,\n n_fft=2048,\n n_mels=128,\n fmin=50,\n preemph=0.97,\n top_db=80,\n ref_db=20,\n):\n mel = librosa.feature.melspectrogram(\n librosa.effects.preemphasis(wav, coef=preemph),\n sr=sr,\n hop_length=hop_length,\n win_length=win_length,\n n_fft=n_fft,\n n_mels=n_mels,\n fmin=fmin,\n norm=1,\n power=1,\n )\n logmel = librosa.amplitude_to_db(mel, top_db=None) - ref_db\n logmel = np.maximum(logmel, -top_db)\n return logmel / top_db\n\nmeter = pyln.Meter(16000)\n\ndef wav2melV2(wav):\n loudness = meter.integrated_loudness(wav)\n wav = pyln.normalize.loudness(wav, loudness, -24)\n peak = np.abs(wav).max()\n if peak >= 1:\n wav = wav / peak * 0.999\n mel = melspectrogram(wav, n_mels=80)\n mel = np.transpose(mel, (1, 0))\n return mel\n\n# Mel speacker encoder\n\ndef butter_highpass(cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)\n return b, a\n \n \ndef pySTFT(x, fft_length=1024, hop_length=256):\n \n x = np.pad(x, int(fft_length//2), mode='reflect')\n \n noverlap = fft_length - hop_length\n shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length)\n strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1])\n result = np.lib.stride_tricks.as_strided(x, shape=shape,\n strides=strides)\n \n fft_window = get_window('hann', fft_length, fftbins=True)\n result = np.fft.rfft(fft_window * result, n=fft_length).T\n \n return np.abs(result) \n\n\n\nmel_basis = mel(16000, 1024, fmin=90, fmax=7600, n_mels=80).T\nmin_level = np.exp(-100 / 20 * np.log(10))\nb, a = butter_highpass(30, 16000, order=5)\n\ndef prepare_spectrogram(path, rd_int=None):\n x, fs = sf.read(path)\n y = signal.filtfilt(b, a, x)\n if rd_int is None:\n rd_int = int(path.split('/')[-2][1:])\n prng = RandomState(rd_int) # cosa vuol dire?\n wav = y * 0.96 + (prng.rand(y.shape[0])-0.5)*1e-06\n D = pySTFT(wav).T\n D_mel = np.dot(D, mel_basis)\n D_db = 20 * np.log10(np.maximum(min_level, D_mel)) - 16\n S = np.clip((D_db + 100) / 100, 0, 1) \n S = S.astype(np.float32)\n return S\n\n# Speacker encoder\nC = D_VECTOR(dim_input=80, dim_cell=768, dim_emb=256).eval().cuda()\nc_checkpoint = torch.load('3000000-BL.ckpt')\nnew_state_dict = OrderedDict()\nfor key, val in c_checkpoint['model_b'].items():\n new_key = key[7:]\n new_state_dict[new_key] = val\nC.load_state_dict(new_state_dict)\nnum_uttrs = 10\nlen_crop = 128\n\n\ndef process_speacker(tmp):\n left = np.random.randint(0, tmp.shape[0]-len_crop)\n melsp = torch.from_numpy(tmp[np.newaxis, left:left+len_crop, :]).cuda()\n emb = C(melsp)\n return emb.detach().squeeze().cpu().numpy()\n\n\n# AUTOVC\ndef pad_seq(x, base=32):\n len_out = int(base * ceil(float(x.shape[0])/base))\n len_pad = len_out - x.shape[0]\n assert len_pad >= 0\n return np.pad(x, ((0,len_pad),(0,0)), 'constant'), len_pad\n\ndef prepare_input(s1, emb1, emb2, G):\n x_org, len_pad = pad_seq(s1)\n uttr_org = torch.from_numpy(x_org[np.newaxis, :, :]).to(device)\n emb_org = torch.from_numpy(emb1[np.newaxis, :]).to(device)\n \n emb_trg = torch.from_numpy(emb2[np.newaxis, :]).to(device)\n \n with torch.no_grad():\n _, x_identic_psnt, _ = G(uttr_org, emb_org, emb_trg)\n if len_pad == 0:\n uttr_trg = x_identic_psnt[0, 0, :, :]\n else:\n uttr_trg = x_identic_psnt[0, 0, :-len_pad, :]\n return uttr_trg\n\n\n\n# DATA\ndevice = 'cuda:0'\npath1=\"wavs/i300/galatea_01_barrili_f000001.wav\"\npath2=\"wavs/i301/imalavoglia_00_verga_f000002.wav\"\n\n# PreProcessing\nwav1, _ = librosa.load(path1, sr=16000)\nvocoder_mel1 = wav2melV2(wav1)\nspeacker_encoder_mel1 = prepare_spectrogram(path1, rd_int=0)\n\nwav2, _ = librosa.load(path2, sr=16000)\nvocoder_mel2 = wav2melV2(wav2)\nspeacker_encoder_mel2 = prepare_spectrogram(path2, rd_int=1)\n\n# Encod\nemb1 = process_speacker(speacker_encoder_mel1)\nemb2 = process_speacker(speacker_encoder_mel2)\n\n# AutoVC\nG = Generator(32,256,512,32).eval().to(device)\nmodel_path = '/home/super/Models/autovc_simple/generator_run2.pth'\ng_checkpoint = torch.load(model_path, map_location=device)\nG.load_state_dict(g_checkpoint)\n\nspect_vc1 = prepare_input(vocoder_mel1, emb1, emb2, G)\n\n# vocoder\nvocoder = Vocoder.from_pretrained(\n \"https://github.com/bshall/UniversalVocoding/releases/download/v0.2/univoc-ljspeech-7mtpaq.pt\",\n).cuda()\n\ntorch_mel = spect_vc1.unsqueeze(dim=0)\ntorch_mel = torch_mel.cuda()\nwith torch.no_grad():\n wav2, sr = vocoder.generate(torch_mel)\n\npath2= \"out.wav\"\nsf.write(path2, wav2, sr)\n\n" ]
[ [ "numpy.clip", "numpy.lib.stride_tricks.as_strided", "numpy.dot", "numpy.fft.rfft", "numpy.log", "numpy.random.RandomState", "numpy.pad", "torch.no_grad", "scipy.signal.butter", "torch.from_numpy", "scipy.signal.filtfilt", "numpy.transpose", "scipy.signal.get_window", "torch.load", "numpy.abs", "numpy.random.randint", "numpy.maximum" ] ]
Barry-lab/SpatialAutoDACQ
[ "f39341ea5c1a51c328ec43dba8e4d9a8f7d49a48" ]
[ "openEPhys_DACQ/NWBio.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport h5py\nimport numpy as np\nimport os\nimport sys\nfrom openEPhys_DACQ.HelperFunctions import tetrode_channels, channels_tetrode, closest_argmin\nfrom pprint import pprint\nfrom copy import copy\nimport argparse\nimport importlib\nfrom tqdm import tqdm\n\n\ndef OpenEphys_SamplingRate():\n return 30000\n\n\ndef bitVolts():\n return 0.195\n\n\ndef spike_waveform_leftwards_shift():\n \"\"\"Returns the leftwards shift of waveforms from detection point in seconds.\n\n :return:\n :rtype: float\n \"\"\"\n return 6 * (1.0 / OpenEphys_SamplingRate())\n\n\ndef get_filename(path):\n if not os.path.isfile(path):\n return os.path.join(path, 'experiment_1.nwb')\n else:\n return path\n\n\ndef delete_path_in_file(filename, path):\n with h5py.File(filename, 'r+') as h5file:\n del h5file[path]\n\n\ndef get_recordingKey(filename):\n with h5py.File(filename, 'r') as h5file:\n return list(h5file['acquisition']['timeseries'].keys())[0]\n\n\ndef get_all_processorKeys(filename):\n with h5py.File(filename, 'r') as h5file:\n return list(h5file['acquisition']['timeseries'][get_recordingKey(filename)]['continuous'].keys())\n\n\ndef get_processorKey(filename):\n return get_all_processorKeys(filename)[0]\n\n\ndef get_all_processor_paths(filename):\n return ['/acquisition/timeseries/' + get_recordingKey(filename)\n + '/continuous/' + processorKey\n for processorKey in get_all_processorKeys(filename)]\n\n\ndef get_processor_path(filename):\n return '/acquisition/timeseries/' + get_recordingKey(filename) \\\n + '/continuous/' + get_processorKey(filename)\n\n\ndef check_if_open_ephys_nwb_file(filename):\n \"\"\"\n Returns True if processor path can be identified\n in the file and False otherwise.\n \"\"\"\n try:\n processor_path = get_processor_path(filename)\n with h5py.File(filename, 'r') as h5file:\n return processor_path in h5file\n except:\n return False\n\n\ndef get_downsampled_data_paths(filename):\n \"\"\"\n Returns paths to downsampled data in NWB file.\n\n :param filename: path to NWB file\n :type filename: str\n :return: paths\n :rtype: dict\n \"\"\"\n processor_path = get_processor_path(filename)\n return {'tetrode_data': processor_path + '/downsampled_tetrode_data/',\n 'aux_data': processor_path + '/downsampled_AUX_data/',\n 'timestamps': processor_path + '/downsampled_timestamps/',\n 'info': processor_path + '/downsampling_info/'}\n\n\ndef check_if_downsampled_data_available(filename):\n \"\"\"\n Checks if downsampled data is available in the NWB file.\n\n :param filename: path to NWB file\n :type filename: str\n :return: available\n :rtype: bool\n \"\"\"\n paths = get_downsampled_data_paths(filename)\n with h5py.File(filename, 'r') as h5file:\n # START Workaround for older downsampled datasets\n if '/acquisition/timeseries/recording1/continuous/processor102_100/tetrode_lowpass' in h5file:\n return True\n # END Workaround for older downsampled datasets\n for path in [paths[key] for key in paths]:\n if not (path in h5file):\n return False\n if h5file[paths['tetrode_data']].shape[0] == 0:\n return False\n if h5file[paths['tetrode_data']].shape[0] \\\n != h5file[paths['timestamps']].shape[0] \\\n != h5file[paths['aux_data']].shape[0]:\n return False\n\n return True\n\n\ndef get_raw_data_paths(filename):\n \"\"\"\n Returns paths to downsampled data in NWB file.\n\n :param filename: path to NWB file\n :type filename: str\n :return: paths\n :rtype: dict\n \"\"\"\n processor_path = get_processor_path(filename)\n return {'continuous': processor_path + '/data',\n 'timestamps': processor_path + '/timestamps'}\n\n\ndef check_if_raw_data_available(filename):\n \"\"\"\n Returns paths to raw data in NWB file.\n\n :param filename:\n :type filename: str\n :return: paths\n :rtype: dict\n \"\"\"\n paths = get_raw_data_paths(filename)\n if all([check_if_path_exists(filename, paths[key]) for key in paths]):\n return True\n else:\n return False\n\n\ndef save_downsampling_info_to_disk(filename, info):\n # Get paths to respective dataset locations\n paths = get_downsampled_data_paths(filename)\n # Ensure dictionary fields are in correct format\n info = {'original_sampling_rate': np.int64(info['original_sampling_rate']),\n 'downsampled_sampling_rate': np.int64(info['downsampled_sampling_rate']),\n 'downsampled_channels': np.array(info['downsampled_channels'], dtype=np.int64)}\n # Write data to disk\n with h5py.File(filename, 'r+') as h5file:\n recursively_save_dict_contents_to_group(h5file, paths['info'], info)\n\n\ndef save_downsampled_data_to_disk(filename, tetrode_data, timestamps, aux_data, info):\n # Get paths to respective dataset locations\n paths = get_downsampled_data_paths(filename)\n # Write data to disk\n save_downsampling_info_to_disk(filename, info)\n with h5py.File(filename, 'r+') as h5file:\n h5file[paths['tetrode_data']] = tetrode_data\n h5file[paths['timestamps']] = timestamps\n h5file[paths['aux_data']] = aux_data\n\n\ndef delete_raw_data(filename, only_if_downsampled_data_available=True):\n if only_if_downsampled_data_available:\n if not check_if_downsampled_data_available(filename):\n print('Warning', 'Downsampled data not available in NWB file. Raw data deletion aborted.')\n return None\n if not check_if_raw_data_available(filename):\n print('Warning', 'Raw data not available to be deleted in: ' + filename)\n else:\n raw_data_paths = get_raw_data_paths(filename)\n with h5py.File(filename,'r+') as h5file:\n for path in [raw_data_paths[key] for key in raw_data_paths]:\n del h5file[path]\n\n\ndef repack_NWB_file(filename, replace_original=True, check_validity_with_downsampled_data=True):\n # Create a repacked copy of the file\n os.system('h5repack ' + filename + ' ' + (filename + '.repacked'))\n # Check that the new file is not corrupted\n if check_validity_with_downsampled_data:\n if not check_if_downsampled_data_available(filename):\n raise Exception('Downsampled data cannot be found in repacked file. Original file not replaced.')\n # Replace original file with repacked file\n if replace_original:\n os.system('mv ' + (filename + '.repacked') + ' ' + filename)\n\n\ndef repack_all_nwb_files_in_directory_tree(folder_path, replace_original=True,\n check_validity_with_downsampled_data=True):\n # Commence directory walk\n for dir_name, subdirList, fileList in os.walk(folder_path):\n for fname in fileList:\n fpath = os.path.join(dir_name, fname)\n if fname == 'experiment_1.nwb':\n print('Repacking file {}'.format(fpath))\n repack_NWB_file(fpath, replace_original=replace_original,\n check_validity_with_downsampled_data=check_validity_with_downsampled_data)\n\n\ndef list_AUX_channels(filename, n_tetrodes):\n data = load_continuous(filename)\n n_channels = data['continuous'].shape[1]\n data['file_handle'].close()\n aux_chan_list = range(n_tetrodes * 4 - 1, n_channels)\n\n return aux_chan_list\n\n\ndef load_continuous(filename):\n # Load data file\n h5file = h5py.File(filename, 'r')\n # Load timestamps and continuous data\n recordingKey = get_recordingKey(filename)\n processorKey = get_processorKey(filename)\n path = '/acquisition/timeseries/' + recordingKey + '/continuous/' + processorKey\n if check_if_path_exists(filename, path + '/data'):\n continuous = h5file[path + '/data'] # not converted to microvolts!!!! need to multiply by 0.195\n timestamps = h5file[path + '/timestamps']\n data = {'continuous': continuous, 'timestamps': timestamps, 'file_handle': h5file}\n else:\n data = None\n\n return data\n\n\ndef load_raw_data_timestamps_as_array(filename):\n data = load_continuous(filename)\n timestamps = np.array(data['timestamps']).squeeze()\n data['file_handle'].close()\n\n return timestamps\n\n\ndef load_data_columns_as_array(filename, data_path, first_column, last_column):\n \"\"\"\n Loads a contiguous columns of dataset efficiently from HDF5 dataset.\n \"\"\"\n with h5py.File(filename, 'r') as h5file:\n data = h5file[data_path]\n data = h5file[data_path][:, first_column:last_column]\n\n return data\n\n\ndef load_data_as_array(filename, data_path, columns):\n \"\"\"\n Fast way of reading a single column or a set of columns.\n \n filename - str - full path to file\n columns - list - column numbers to include (starting from 0).\n Single column can be given as a single list element or int.\n Columns in the list must be in sorted (ascending) order.\n \"\"\"\n # Make columns variable into a list if int given\n if isinstance(columns, int):\n columns = [columns]\n # Check that all elements of columns are integers\n if isinstance(columns, list):\n for column in columns:\n if not isinstance(column, int):\n raise ValueError('columns argument must be a list of int values.')\n else:\n raise ValueError('columns argument must be list or int.')\n # Check that column number are sorted\n if sorted(columns) != columns:\n raise ValueError('columns was not in sorted (ascending) order.')\n # Check that data is available, otherwise return None\n if not check_if_path_exists(filename, data_path):\n raise ValueError('File ' + filename + '\\n'\n + 'Does not contain path ' + data_path)\n # Find contiguous column groups\n current_column = columns[0]\n column_groups = [current_column]\n for i in range(1, len(columns)):\n if (columns[i] - columns[i - 1]) == 1:\n column_groups.append(current_column)\n else:\n column_groups.append(columns[i])\n current_column = columns[i]\n # Find start and end column numbers for contiguous groups\n column_ranges = []\n for first_channel in sorted(set(column_groups)):\n last_channel = first_channel + column_groups.count(first_channel)\n column_ranges.append((first_channel, last_channel))\n # Get contiguous column segments for each group\n column_group_data = []\n for column_range in column_ranges:\n column_group_data.append(\n load_data_columns_as_array(filename, data_path, *column_range))\n # Concatenate column groups\n data = np.concatenate(column_group_data, axis=1)\n\n return data\n\n\ndef load_continuous_as_array(filename, channels):\n \"\"\"\n Fast way of reading a single channel or a set of channels.\n \n filename - str - full path to file\n channels - list - channel numbers to include (starting from 0).\n Single channel can be given as a single list element or int.\n Channels in the list must be in sorted (ascending) order.\n \"\"\"\n # Generate path to raw continuous data\n root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \\\n + '/continuous/' + get_processorKey(filename)\n data_path = root_path + '/data'\n timestamps_path = root_path + '/timestamps'\n # Check that data is available, otherwise return None\n if not check_if_path_exists(filename, data_path):\n return None\n if not check_if_path_exists(filename, timestamps_path):\n return None\n # Load continuous data\n continuous = load_data_as_array(filename, data_path, channels)\n # Load timestamps for data\n with h5py.File(filename, 'r') as h5file:\n timestamps = np.array(h5file[timestamps_path])\n # Arrange output into a dictionary\n data = {'continuous': continuous, 'timestamps': timestamps}\n\n return data\n\n\ndef remove_surrounding_binary_markers(text):\n if text.startswith(\"b'\"):\n text = text[2:]\n if text.endswith(\"'\"):\n text = text[:-1]\n return text\n\n\ndef get_downsampling_info_old(filename):\n # Generate path to downsampling data info\n root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \\\n + '/continuous/' + get_processorKey(filename)\n data_path = root_path + '/downsampling_info'\n # Load info from file\n with h5py.File(filename, 'r') as h5file:\n data = h5file[data_path]\n data = [str(i) for i in data]\n # Remove b'x' markers from strings if present. Python 3 change.\n data = list(map(remove_surrounding_binary_markers, data))\n # Parse elements in loaded data\n info_dict = {}\n for x in data:\n key, value = x.split(' ')\n if key == 'original_sampling_rate':\n info_dict[key] = np.int64(value)\n elif key == 'downsampled_sampling_rate':\n info_dict[key] = np.int64(value)\n elif key == 'downsampled_channels':\n info_dict[key] = np.array(list(map(int, value.split(',')))).astype(np.int64)\n\n return info_dict\n\n\ndef get_downsampling_info(filename):\n root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \\\n + '/continuous/' + get_processorKey(filename)\n data_path = root_path + '/downsampling_info/'\n with h5py.File(filename, 'r') as h5file:\n return recursively_load_dict_contents_from_group(h5file, data_path)\n\n\ndef load_downsampled_tetrode_data_as_array(filename, tetrode_nrs):\n \"\"\"\n Returns a dict with downsampled continuous data for requested tetrodes\n \n filename - str - full path to file\n tetrode_nrs - list - tetrode numbers to include (starting from 0).\n Single tetrode can be given as a single list element or int.\n Tetrode numbers in the list must be in sorted (ascending) order.\n If data is not available for a given tetrode number, error is raised.\n \"\"\"\n # Generate path to raw continuous data\n root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \\\n + '/continuous/' + get_processorKey(filename)\n data_path = root_path + '/downsampled_tetrode_data'\n timestamps_path = root_path + '/downsampled_timestamps'\n # Check that data is available, otherwise return None\n if not check_if_path_exists(filename, data_path):\n return None\n if not check_if_path_exists(filename, timestamps_path):\n return None\n # Get info on downsampled data\n info = get_downsampling_info(filename)\n sampling_rate = int(info['downsampled_sampling_rate'])\n downsampled_channels = list(info['downsampled_channels'])\n # Map tetrode_nrs elements to columns in downsampled_tetrode_data\n columns = []\n channels_used = []\n tetrode_nrs_remaining = copy(tetrode_nrs)\n for tetrode_nr in tetrode_nrs:\n for chan in tetrode_channels(tetrode_nr):\n if chan in downsampled_channels:\n columns.append(downsampled_channels.index(chan))\n channels_used.append(chan)\n tetrode_nrs_remaining.pop(tetrode_nrs_remaining.index(tetrode_nr))\n break\n # Check that all tetrode numbers were mapped\n if len(tetrode_nrs_remaining) > 0:\n raise Exception('The following tetrodes were not represented in downsampled data\\n' \\\n + ','.join(list(map(str, tetrode_nrs_remaining))))\n # Load continuous data\n continuous = load_data_as_array(filename, data_path, columns)\n # Load timestamps for data\n with h5py.File(filename, 'r') as h5file:\n timestamps = np.array(h5file[timestamps_path])\n # Arrange output into a dictionary\n data = {'continuous': continuous, 'timestamps': timestamps,\n 'tetrode_nrs': tetrode_nrs, 'channels': channels_used,\n 'sampling_rate': sampling_rate}\n\n return data\n\n\ndef empty_spike_data():\n \"\"\"\n Creates a fake waveforms of 0 values and at timepoint 0\n \"\"\"\n waveforms = np.zeros((1,4,40), dtype=np.int16)\n timestamps = np.array([0], dtype=np.float64)\n\n return {'waveforms': waveforms, 'timestamps': timestamps}\n\n\ndef get_tetrode_nrs_if_spikes_available(filename, spike_name='spikes'):\n \"\"\"\n Returns a list of tetrode numbers if spikes available in NWB file.\n \"\"\"\n spikes_path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/'\n # Get tetrode keys if available\n with h5py.File(filename, 'r') as h5file:\n if not (spikes_path in h5file):\n # Return empty list if spikes data not available\n return []\n tetrode_keys = list(h5file[spikes_path].keys())\n # Return empty list if spikes not available on any tetrode\n if len(tetrode_keys) == 0:\n return []\n # Extract tetrode numbers\n tetrode_nrs = []\n for tetrode_key in tetrode_keys:\n tetrode_nrs.append(int(tetrode_key[9:]) - 1)\n # Sort tetrode numbers in ascending order\n tetrode_nrs.sort()\n\n return tetrode_nrs\n\n\ndef construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name='spikes'):\n spikes_path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/'\n return [(spikes_path + 'electrode' + str(tetrode_nr + 1) + '/') for tetrode_nr in tetrode_nrs]\n\n\ndef count_spikes(filename, tetrode_nrs, spike_name='spikes', use_idx_keep=False):\n \"\"\"\n :param filename: full path to NWB file\n :type filename: str\n :param tetrode_nrs: tetrode numbers to count spikes for\n :type tetrode_nrs: list\n :param spike_name: type of spikes to look for (field in NWB file)\n :type spike_name: str\n :param use_idx_keep: If False (default) all spikes are counted, otherwise only filtered spikes are counted\n :type use_idx_keep: bool\n :return: total number of spikes on each tetrode\n :rtype: list\n \"\"\"\n tetrode_paths = construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name=spike_name)\n count = []\n with h5py.File(filename, 'r') as h5file:\n for tetrode_path in tetrode_paths:\n if use_idx_keep:\n count.append(sum(np.array(h5file[tetrode_path + 'idx_keep'][()]).squeeze()))\n else:\n count.append(h5file[tetrode_path + 'timestamps/'].shape[0])\n\n return count\n\n\ndef load_spikes(filename, spike_name='spikes', tetrode_nrs=None, use_idx_keep=False,\n use_badChan=False, no_waveforms=False, clustering_name=None, verbose=True):\n \"\"\"\n Inputs:\n filename - pointer to NWB file to load\n tetrode_nrs [list] - can be a list of tetrodes to load (from 0)\n use_idx_keep [bool] - if True, only outputs spikes according to idx_keep of tetrode, if available\n use_badChan [bool] - if True, sets all spikes on badChannels to 0\n no_waveforms [bool] - if True, waveforms are not loaded\n clustering_name [str] - if specified, clusterID will be loaded from:\n electrode[nr]/clustering/clustering_name\n verbose [bool] - prints out loading progress bar if True (default)\n Output:\n List of dictionaries for each tetrode in correct order where:\n List is empty, if no spike data detected\n 'waveforms' is a list of tetrode waveforms in the order of channels\n 'timestamps' is a list of spike detection timestamps corresponding to 'waveforms'\n If available, two more variables will be in the dictionary\n 'idx_keep' is boolan index for 'waveforms' and 'timestamps' indicating the spikes\n that are to be used for further processing (based on filtering for artifacts etc)\n 'clusterIDs' is the cluster identities of spikes in 'waveforms'['idx_keep',:,:]\n \"\"\"\n # If not provided, get tetrode_nrs\n if tetrode_nrs is None:\n tetrode_nrs = get_tetrode_nrs_if_spikes_available(filename, spike_name=spike_name)\n tetrode_paths = construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name=spike_name)\n with h5py.File(filename, 'r') as h5file:\n # Put waveforms and timestamps into a list of dictionaries in correct order\n data = []\n if verbose:\n print('Loading tetrodes from {}'.format(filename))\n iterable = zip(tetrode_nrs, tetrode_paths)\n for nr_tetrode, tetrode_path in (tqdm(iterable, total=len(tetrode_nrs)) if verbose else iterable):\n # Load waveforms and timestamps\n if no_waveforms:\n waveforms = empty_spike_data()['waveforms']\n else:\n waveforms = h5file[tetrode_path + 'data/'][()]\n timestamps = h5file[tetrode_path + 'timestamps/'][()]\n if not isinstance(timestamps, np.ndarray):\n timestamps = np.array([timestamps])\n if waveforms.shape[0] == 0:\n # If no waveforms are available, enter one waveform of zeros at timepoint zero\n waveforms = empty_spike_data()['waveforms']\n timestamps = empty_spike_data()['timestamps']\n # Arrange waveforms, timestamps and nr_tetrode into a dictionary\n tet_data = {'waveforms': waveforms,\n 'timestamps': timestamps,\n 'nr_tetrode': nr_tetrode}\n # Include idx_keep if available\n idx_keep_path = tetrode_path + 'idx_keep'\n if idx_keep_path in h5file:\n tet_data['idx_keep'] = np.array(h5file[idx_keep_path][()])\n if use_idx_keep:\n # If requested, filter wavefoms and timestamps based on idx_keep\n if np.sum(tet_data['idx_keep']) == 0:\n tet_data['waveforms'] = empty_spike_data()['waveforms']\n tet_data['timestamps'] = empty_spike_data()['timestamps']\n else:\n if not no_waveforms:\n tet_data['waveforms'] = tet_data['waveforms'][tet_data['idx_keep'], :, :]\n tet_data['timestamps'] = tet_data['timestamps'][tet_data['idx_keep']]\n # Include clusterIDs if available\n if clustering_name is None:\n clusterIDs_path = tetrode_path + 'clusterIDs'\n else:\n clusterIDs_path = tetrode_path + '/clustering/' + clustering_name\n if clusterIDs_path in h5file:\n tet_data['clusterIDs'] = np.int16(h5file[clusterIDs_path][()]).squeeze()\n # Set spikes to zeros for channels in badChan list if requested\n if use_badChan and not no_waveforms:\n badChan = listBadChannels(filename)\n if len(badChan) > 0:\n for nchan in tetrode_channels(nr_tetrode):\n if nchan in badChan:\n tet_data['waveforms'][:, np.mod(nchan, 4), :] = 0\n data.append(tet_data)\n \n return data\n\ndef save_spikes(filename, tetrode_nr, data, timestamps, spike_name='spikes', overwrite=False):\n \"\"\"\n Stores spike data in NWB file in the same format as with OpenEphysGUI.\n tetrode_nr=0 for first tetrode.\n \"\"\"\n if data.dtype != np.int16:\n raise ValueError('Waveforms are not int16.')\n if timestamps.dtype != np.float64:\n raise ValueError('Timestamps are not float64.')\n recordingKey = get_recordingKey(filename)\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \\\n 'electrode' + str(tetrode_nr + 1) + '/'\n if check_if_path_exists(filename, path):\n if overwrite:\n # If overwrite is true, path is first cleared\n with h5py.File(filename, 'r+') as h5file:\n del h5file[path]\n else:\n raise Exception('Spikes already in file and overwrite not requested.\\n' \\\n + 'File: ' + filename + '\\n' \\\n + 'path: ' + path)\n with h5py.File(filename, 'r+') as h5file:\n h5file[path + 'data'] = data\n h5file[path + 'timestamps'] = np.float64(timestamps).squeeze()\n\ndef processing_method_and_spike_name_combinations():\n \"\"\"\n Outputs a list of potential processing_method and spike_name combinations\n \"\"\"\n processing_methods = ['klustakwik', 'klustakwik_raw', 'kilosort']\n spike_names = ['spikes', 'spikes_raw', 'spikes_kilosort']\n\n return processing_methods, spike_names\n\ndef get_spike_name_for_processing_method(processing_method):\n processing_methods, spike_names = processing_method_and_spike_name_combinations()\n spike_name = spike_names[processing_methods.index(processing_method)]\n\n return spike_name\n\ndef load_events(filename, internally_generated=False):\n # Outputs a dictionary timestamps and eventIDs for TTL signals received\n # timestamps are in seconds, aligned to timestamps of continuous recording\n # eventIDs indicate TTL channel number (starting from 1) and are positive for rising signals\n\n if internally_generated:\n ttl_type = 'ttl2'\n else:\n ttl_type = 'ttl1'\n\n # Load data file\n recordingKey = get_recordingKey(filename)\n with h5py.File(filename, 'r') as h5file:\n # Load timestamps and TLL signal info\n timestamps = h5file['acquisition']['timeseries'][recordingKey]['events'][ttl_type]['timestamps'][()]\n eventID = h5file['acquisition']['timeseries'][recordingKey]['events'][ttl_type]['data'][()]\n data = {'eventID': eventID, 'timestamps': timestamps}\n\n return data\n\ndef load_GlobalClock_timestamps(filename, GlobalClock_TTL_channel=1):\n \"\"\"\n Returns timestamps of GlobalClock TTL pulses.\n \"\"\"\n data = load_events(filename)\n return data['timestamps'][data['eventID'] == GlobalClock_TTL_channel]\n\n\ndef load_open_ephys_generated_ttl_events(filename):\n \"\"\"Returns Open Ephys generated TTL pulse events with channel numbers and timestamps\n\n :param str filename: full path to NWB file\n :return: channel_event, timestamps\n \"\"\"\n data = load_events(filename, internally_generated=True)\n return data['eventID'], data['timestamps']\n\n\ndef load_network_events(filename):\n \"\"\"returns network_events_data\n\n Extracts the list of network messages from NWB file \n and returns it along with corresponding timestamps\n in dictionary with keys ['messages', 'timestamps']\n \n 'messages' - list of str\n \n 'timestamps' - list of float\n\n :param filename: full path to NWB file\n :type filename: str\n :return: network_events_data\n :rtype: dict\n \"\"\"\n # Load data file\n recordingKey = get_recordingKey(filename)\n with h5py.File(filename, 'r') as h5file:\n # Load timestamps and messages\n timestamps = h5file['acquisition']['timeseries'][recordingKey]['events']['text1']['timestamps'][()]\n messages = h5file['acquisition']['timeseries'][recordingKey]['events']['text1']['data'][()]\n messages = [x.decode('utf-8') for x in messages]\n timestamps = [float(x) for x in timestamps]\n\n data = {'messages': messages, 'timestamps': timestamps}\n\n return data\n\n\ndef check_if_path_exists(filename, path):\n with h5py.File(filename, 'r') as h5file:\n return path in h5file\n\n\ndef save_list_of_dicts_to_group(h5file, path, dlist, overwrite=False, list_suffix='_NWBLIST'):\n # Check that all elements are dictionaries\n for dic in dlist:\n if not isinstance(dic, dict):\n raise Exception('List elements must be dictionaries')\n # Write elements to file\n for i, dic in enumerate(dlist):\n recursively_save_dict_contents_to_group(h5file, (path + str(i) + '/'), dic,\n overwrite=overwrite, list_suffix=list_suffix)\n\n\ndef recursively_save_dict_contents_to_group(h5file, path, dic, overwrite=False, list_suffix='_NWBLIST', verbose=False):\n \"\"\"\n h5file - h5py.File\n path - str - path to group in h5file. Must end with '/'\n overwrite - bool - any dictionary elements or lists that already exist are overwritten.\n Default is False, if elements already exist in NWB file, error is raised.\n list_suffix - str - suffix used to highlight paths created from lists of dictionaries.\n Must be consistent when saving and loading data.\n verbose - bool - If True (default is False), h5file path used is printed for each recursion\n\n Only works with: numpy arrays, numpy int64 or float64, strings, bytes, lists of strings and dictionaries these are contained in.\n Also works with lists dictionaries as part of the hierachy.\n Long lists of dictionaries are discouraged, as individual groups are created for each element.\n \"\"\"\n if verbose:\n print(path)\n if len(dic) == 0:\n if path in h5file:\n del h5file[path]\n h5file.create_group(path)\n for key, item in dic.items():\n if isinstance(item, (int, float)):\n item = np.array(item)\n if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes)):\n if overwrite:\n if path + key in h5file:\n del h5file[path + key]\n h5file[path + key] = item\n elif isinstance(item, dict):\n recursively_save_dict_contents_to_group(h5file, path + key + '/', item,\n overwrite=overwrite, list_suffix=list_suffix,\n verbose=verbose)\n elif isinstance(item, list):\n if all(isinstance(i, str) for i in item):\n if overwrite:\n if path + key in h5file:\n del h5file[path + key]\n asciiList = [n.encode(\"ascii\", \"ignore\") for n in item]\n h5file[path + key] = h5file.create_dataset(None, (len(asciiList),),'S100', asciiList)\n else:\n if overwrite:\n if path + key + list_suffix in h5file:\n del h5file[path + key + list_suffix]\n save_list_of_dicts_to_group(h5file, path + key + list_suffix + '/', item, \n overwrite=overwrite, list_suffix=list_suffix)\n elif item is None:\n h5file.create_group(path + key)\n else:\n raise ValueError('Cannot save %s type'%type(item) + ' from ' + path + key)\n\n\ndef convert_bytes_to_string(b):\n \"\"\"\n If input is bytes, returns str decoded with utf-8\n\n :param b:\n :type b: bytes\n :return: string decoded with utf-8 if input is bytes object, otherwise returns unchanged input\n :rtype: str\n \"\"\"\n if isinstance(b, bytes):\n if sys.version_info >= (3, 0):\n return str(b, 'utf-8')\n else:\n return str(b.decode('utf-8'))\n else:\n return b\n\n\ndef load_list_of_dicts_from_group(h5file, path, list_suffix='_NWBLIST', ignore=()):\n # Load all elements on this path\n items = []\n for key in list(h5file[path].keys()):\n items.append(\n (int(key), recursively_load_dict_contents_from_group(h5file, path + key + '/', \n list_suffix=list_suffix,\n ignore=ignore))\n )\n # Create a list from items sorted by group keys\n ans = [item for _, item in sorted(items)]\n\n return ans\n\n\ndef recursively_load_dict_contents_from_group(h5file, path, list_suffix='_NWBLIST', ignore=()):\n \"\"\"\n Returns value at path if it has no further items\n\n h5file - h5py.File\n path - str - path to group in h5file. Must end with '/'\n list_suffix - str - suffix used to highlight paths created from lists of dictionaries.\n Must be consistent when saving and loading data.\n ignore - tuple - paths including elements matching any element in this tuple return None\n \"\"\"\n if not path.endswith('/'):\n raise ValueError('Input path must end with \"/\"')\n\n if path.split('/')[-2] in ignore or path.split('/')[-2][:-len(list_suffix)] in ignore:\n ans = None\n\n elif path[:-1].endswith(list_suffix):\n ans = load_list_of_dicts_from_group(h5file, path, list_suffix=list_suffix,\n ignore=ignore)\n elif hasattr(h5file[path], 'items'):\n\n ans = {}\n for key, item in h5file[path].items():\n\n if key.endswith(list_suffix):\n ans[str(key)[:-len(list_suffix)]] = load_list_of_dicts_from_group(\n h5file, path + key + '/', list_suffix=list_suffix,\n ignore=ignore\n )\n\n elif isinstance(item, h5py._hl.dataset.Dataset):\n if 'S100' == item.dtype:\n tmp = list(item[()])\n ans[str(key)] = [convert_bytes_to_string(i) for i in tmp]\n elif item.dtype == 'bool' and item.ndim == 0:\n ans[str(key)] = np.array(bool(item[()]))\n else:\n ans[str(key)] = convert_bytes_to_string(item[()])\n\n elif isinstance(item, h5py._hl.group.Group):\n ans[str(key)] = recursively_load_dict_contents_from_group(h5file, path + key + '/',\n ignore=ignore)\n\n else:\n ans = convert_bytes_to_string(h5file[path][()])\n\n return ans\n\n\ndef save_settings(filename, Settings, path='/'):\n \"\"\"\n Writes into an existing file if path is not yet used.\n Creates a new file if filename does not exist.\n Only works with: numpy arrays, numpy int64 or float64, strings, bytes, lists of strings and dictionaries these are contained in.\n To save specific subsetting, e.g. TaskSettings, use:\n Settings=TaskSetttings, path='/TaskSettings/'\n \"\"\"\n full_path = '/general/data_collection/Settings' + path\n if os.path.isfile(filename):\n write_method = 'r+'\n else:\n write_method = 'w'\n with h5py.File(filename, write_method) as h5file:\n recursively_save_dict_contents_to_group(h5file, full_path, Settings)\n\ndef load_settings(filename, path='/', ignore=()):\n \"\"\"\n By default loads all settings from path\n '/general/data_collection/Settings/'\n or for example to load animal ID, use:\n path='/General/animal/'\n\n ignore - tuple - any paths including any element of ignore are returned as None\n \"\"\"\n full_path = '/general/data_collection/Settings' + path\n with h5py.File(filename, 'r') as h5file:\n data = recursively_load_dict_contents_from_group(h5file, full_path, ignore=ignore)\n\n return data\n\n\ndef check_if_settings_available(filename, path='/'):\n \"\"\"\n Returns whether settings information exists in NWB file\n Specify path='/General/badChan/' to check for specific settings\n \"\"\"\n full_path = '/general/data_collection/Settings' + path\n with h5py.File(filename, 'r') as h5file:\n return full_path in h5file\n\n\ndef save_analysis(filename, data, overwrite=False, complete_overwrite=False, verbose=False):\n \"\"\"Stores analysis results from nested dictionary to /analysis path in NWB file.\n\n See :py:func:`NWBio.recursively_save_dict_contents_to_group` for details on supported data structures.\n\n :param str filename: path to NWB file\n :param dict data: analysis data to be stored in NWB file\n :param bool overwrite: if True, any existing data at same dictionary keys\n as in previously saved data is overwritten.\n Default is False.\n :param bool complete_overwrite: if True, all previous analysis data is discarded before writing.\n Default is False.\n :param bool verbose: if True (default is False), the path in file for each element is printed.\n \"\"\"\n with h5py.File(filename, 'r+') as h5file:\n if complete_overwrite:\n del h5file['/analysis']\n recursively_save_dict_contents_to_group(h5file, '/analysis/', data, overwrite=overwrite, verbose=verbose)\n\n\ndef load_analysis(filename, ignore=()):\n \"\"\"Loads analysis results from /analysis path in NWB file into a dictionary.\n\n :param str filename: path to NWB file\n :param tuple ignore: paths containing any element of ignore are terminated with None.\n In the output dictionary any elements downstream of a key matching any element of ignore\n is not loaded and dictionary tree is terminated at that point with value None.\n \"\"\"\n with h5py.File(filename, 'r') as h5file:\n return recursively_load_dict_contents_from_group(h5file, '/analysis/', ignore=ignore)\n\n\ndef listBadChannels(filename):\n if check_if_settings_available(filename,'/General/badChan/'):\n badChanString = load_settings(filename,'/General/badChan/')\n # Separate input string into a list using ',' as deliminaters\n if badChanString.find(',') > -1: # If more than one channel specified\n # Find all values tetrode and channel values listed\n badChanStringList = badChanString.split(',')\n else:\n badChanStringList = [badChanString]\n # Identify any ranges specified with '-' and append these channels to the list\n for chanString in badChanStringList:\n if chanString.find('-') > -1:\n chan_from = chanString[:chanString.find('-')]\n chan_to = chanString[chanString.find('-') + 1:]\n for nchan in range(int(chan_to) - int(chan_from) + 1):\n badChanStringList.append(str(nchan + int(chan_from)))\n badChanStringList.remove(chanString) # Remove the '-' containing list element\n # Reorder list of bad channels\n badChanStringList.sort(key=int)\n badChan = list(np.array(list(map(int, badChanStringList))) - 1)\n else:\n badChan = []\n\n return badChan\n\ndef save_tracking_data(filename, TrackingData, ProcessedPos=False, overwrite=False):\n \"\"\"\n TrackingData is expected as dictionary with keys for each source ID\n If saving processed data, TrackingData is expected to be numpy array\n Use ProcessedPos=True to store processed data\n Use overwrite=True to force overwriting existing processed data\n \"\"\"\n if os.path.isfile(filename):\n write_method = 'r+'\n else:\n write_method = 'w'\n recordingKey = get_recordingKey(filename)\n with h5py.File(filename, write_method) as h5file:\n full_path = '/acquisition/timeseries/' + recordingKey + '/tracking/'\n if not ProcessedPos:\n recursively_save_dict_contents_to_group(h5file, full_path, TrackingData)\n elif ProcessedPos:\n processed_pos_path = full_path + 'ProcessedPos/'\n # If overwrite is true, path is first cleared\n if overwrite:\n if full_path in h5file and 'ProcessedPos' in list(h5file[full_path].keys()):\n del h5file[processed_pos_path]\n h5file[processed_pos_path] = TrackingData\n\n\ndef get_recording_cameraIDs(filename):\n path = '/general/data_collection/Settings/CameraSettings/CameraSpecific'\n with h5py.File(filename, 'r') as h5file:\n if path in h5file:\n return list(h5file[path].keys())\n\n\ndef load_raw_tracking_data(filename, cameraID, specific_path=None):\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/' + cameraID + '/'\n if not (specific_path is None):\n path = path + '/' + specific_path + '/'\n with h5py.File(filename, 'r') as h5file:\n if path in h5file:\n return recursively_load_dict_contents_from_group(h5file, path)\n\ndef load_processed_tracking_data(filename, subset='ProcessedPos'):\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/'\n path = path + subset\n with h5py.File(filename, 'r') as h5file:\n return np.array(h5file[path][()])\n\ndef get_processed_tracking_data_timestamp_edges(filename, subset='ProcessedPos'):\n if check_if_processed_position_data_available(filename):\n data = load_processed_tracking_data(filename, subset=subset)\n edges = [data[0, 0], data[-1, 0]]\n else:\n print('Warning! ProcessedPos not available. Using continuous data timestamps')\n h5file = h5py.File(filename, 'r')\n recordingKey = get_recordingKey(filename)\n processorKey = get_processorKey(filename)\n path = '/acquisition/timeseries/' + recordingKey + '/continuous/' + processorKey + '/timestamps'\n edges = [h5file[path][0], h5file[path][-1]]\n h5file.close()\n\n return edges\n\ndef check_if_tracking_data_available(filename):\n if check_if_settings_available(filename, path='/General/Tracking/'):\n return load_settings(filename, path='/General/Tracking/')\n else:\n return False\n\ndef check_if_processed_position_data_available(filename, subset='ProcessedPos'):\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/'\n path = path + subset\n return check_if_path_exists(filename, path)\n\ndef check_if_binary_pos(filename):\n # Checks if binary position data exists in NWB file\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/events/binary1/'\n return check_if_path_exists(filename, path)\n\n\ndef save_tetrode_idx_keep(filename, ntet, idx_keep, spike_name='spikes', overwrite=False):\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \\\n 'electrode' + str(ntet + 1) + '/idx_keep'\n with h5py.File(filename, 'r+') as h5file:\n if path in h5file:\n if overwrite:\n del h5file[path]\n else:\n raise ValueError('Tetrode ' + str(ntet + 1) + ' idx_keep already exists in ' + filename)\n h5file[path] = idx_keep\n\ndef save_tetrode_clusterIDs(filename, ntet, clusterIDs, spike_name='spikes', overwrite=False):\n path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \\\n 'electrode' + str(ntet + 1) + '/clusterIDs'\n with h5py.File(filename, 'r+') as h5file:\n if path in h5file:\n if overwrite:\n del h5file[path]\n else:\n raise ValueError('Tetrode ' + str(ntet + 1) + ' clusterIDs already exists in ' + filename)\n h5file[path] = np.int16(clusterIDs).squeeze()\n\ndef fill_empty_dictionary_from_source(selection, src_dict):\n \"\"\"\n Populates a dictionary with None values with values from a source\n dictionary with identical structure.\n \"\"\"\n dst_dict = copy(selection)\n for key, item in dst_dict.items():\n if isinstance(item, dict):\n dst_dict[key] = fill_empty_dictionary_from_source(item, src_dict[key])\n elif item is None:\n dst_dict[key] = src_dict[key]\n else:\n raise ValueError('Destination dictionary has incorrect.')\n\n return dst_dict\n\n\ndef get_recording_start_timestamp_offset(filename):\n \"\"\"Returns the first timestamp of raw or downsampled continuous data.\n\n :param str filename: path to NWB file\n :return: first timestamp of continuous data\n :rtype: float\n \"\"\"\n if check_if_raw_data_available(filename):\n path = get_raw_data_paths(filename)['timestamps']\n elif check_if_downsampled_data_available(filename):\n path = get_downsampled_data_paths(filename)['timestamps']\n else:\n raise Exception('NWB file does not contain raw or downsampled data ' + filename)\n with h5py.File(filename, 'r') as h5file:\n return float(h5file[path][0:1])\n\n\ndef get_recording_full_duration(filename):\n \"\"\"Returns the total duration from first to last timestamp of\n raw or downsampled continuous data.\n\n :param str filename: path to NWB file\n :return: total duration from first to last timestamp of continuous data\n :rtype: float\n \"\"\"\n if check_if_raw_data_available(filename):\n path = get_raw_data_paths(filename)['timestamps']\n elif check_if_downsampled_data_available(filename):\n path = get_downsampled_data_paths(filename)['timestamps']\n else:\n raise Exception('NWB file does not contain raw or downsampled data ' + filename)\n with h5py.File(filename, 'r') as h5file:\n return float(h5file[path][-1]) - float(h5file[path][0:1])\n\n\ndef import_task_specific_log_parser(task_name):\n \"\"\"\n Returns LogParser module for the specific task.\n\n :param task_name: name of the task\n :type task_name: str\n :return: TaskLogParser\n :rtype: module\n \"\"\"\n if task_name == 'Pellets_and_Rep_Milk_Task': # Temporary workaround to function with older files\n task_name = 'Pellets_and_Rep_Milk'\n try:\n return importlib.import_module('.Tasks.' + task_name + '.LogParser', package='openEPhys_DACQ')\n except ModuleNotFoundError:\n print('Task {} LogParser not found. Returning None.'.format(task_name))\n return None\n\n\ndef load_task_name(filename):\n \"\"\"\n Returns the name of the task active in the recording.\n\n :param filename: absolute path to NWB recording file\n :type filename: str\n :return: task_name\n :rtype: str\n \"\"\"\n return load_settings(filename, path='/TaskSettings/name/')\n\n\ndef get_recording_log_parser(filename, final_timestamp=None):\n \"\"\"Finds task specific LogParser class and returns it initialized\n with network events from that recording.\n\n :param str filename:\n :return: Task specific log parser initialized with network events\n :rtype: LogParser class\n \"\"\"\n task_log_parser = import_task_specific_log_parser(load_task_name(filename))\n if task_log_parser is None:\n return None\n else:\n return task_log_parser.LogParser(task_settings=load_settings(filename, path='/TaskSettings/'),\n final_timestamp=final_timestamp,\n **load_network_events(filename))\n\n\ndef get_channel_map(filename):\n return load_settings(filename, '/General/channel_map/')\n\n\ndef list_tetrode_nrs_for_area_channel_map(area_channel_map):\n return list(set([channels_tetrode(chan) for chan in list(area_channel_map['list'])]))\n\n\ndef get_channel_map_with_tetrode_nrs(filename):\n channel_map = get_channel_map(filename)\n for area in channel_map:\n channel_map[area]['tetrode_nrs'] = list_tetrode_nrs_for_area_channel_map(channel_map[area])\n\n return channel_map\n\n\ndef check_if_channel_maps_are_same(channel_map_1, channel_map_2):\n \"\"\"\n Determines if two channel maps are identical\n \"\"\"\n # Check that there are same number of areas in the dictionary\n if len(channel_map_1) != len(channel_map_2):\n return False\n # Sort the area names because dictionary is not ordered\n channel_map_1_keys = sorted(list(channel_map_1.keys()))\n channel_map_2_keys = sorted(list(channel_map_2.keys()))\n # Check that the areas have the same name\n for n_area in range(len(channel_map_1_keys)):\n if channel_map_1_keys[n_area] != channel_map_2_keys[n_area]:\n return False\n # Check that the channel lists are the same\n for area in channel_map_1_keys:\n if not all(channel_map_1[area]['list'] == channel_map_2[area]['list']):\n return False\n\n return True\n\n\ndef estimate_open_ephys_timestamps_from_other_timestamps(open_ephys_global_clock_times, other_global_clock_times,\n other_times, other_times_divider=None):\n \"\"\"Returns Open Ephys timestamps for each timestamp from another device by synchronising with global clock.\n\n Note, other times must be in same units as open_ephys_global_clock_times. Most likely seconds.\n For example, Raspberry Pi camera timestamps would need to be divided by 10 ** 6\n\n :param numpy.ndarray open_ephys_global_clock_times: shape (N,)\n :param numpy.ndarray other_global_clock_times: shape (M,)\n :param numpy.ndarray other_times: shape (K,)\n :param int other_times_divider: if provided, timestamps from the other devices are divided by this value\n before matching to Open Ephys time. This allows inputting timestamps from other device in original units.\n In case of Raspberry Pi camera timestamps, this value should be 10 ** 6.\n If this value is not provided, all provided timestamps must be in same units.\n :return: open_ephys_times\n :rtype: numpy.ndarray\n \"\"\"\n\n # Crop data if more timestamps recorded on either system.\n if open_ephys_global_clock_times.size > other_global_clock_times.size:\n open_ephys_global_clock_times = open_ephys_global_clock_times[:other_global_clock_times.size]\n print('[ Warning ] OpenEphys recorded more GlobalClock TTL pulses than other system.\\n' +\n 'Dumping extra OpenEphys timestamps from the end.')\n elif open_ephys_global_clock_times.size < other_global_clock_times.size:\n other_global_clock_times = other_global_clock_times[:open_ephys_global_clock_times.size]\n print('[ Warning ] Other system recorded more GlobalClock TTL pulses than Open Ephys.\\n' +\n 'Dumping extra other system timestamps from the end.')\n\n # Find closest other_global_clock_times indices to each other_times\n other_times_gc_indices = closest_argmin(other_times, other_global_clock_times)\n\n # Compute difference from the other_global_clock_times for each value in other_times\n other_times_nearest_global_clock_times = other_global_clock_times[other_times_gc_indices]\n other_times_global_clock_delta = other_times - other_times_nearest_global_clock_times\n\n # Convert difference values to Open Ephys timestamp units\n if not (other_times_divider is None):\n other_times_global_clock_delta = other_times_global_clock_delta / float(other_times_divider)\n\n # Use other_times_global_clock_delta to estimate timestamps in OpenEphys time\n other_times_nearest_open_ephys_global_clock_times = open_ephys_global_clock_times[other_times_gc_indices]\n open_ephys_times = other_times_nearest_open_ephys_global_clock_times + other_times_global_clock_delta\n\n return open_ephys_times\n\n\ndef extract_recording_info(filename, selection='default'):\n \"\"\"\n Returns recording info for the recording file.\n\n selection - allows specifying which data return\n 'default' - some hard-coded selection of data\n 'all' - all of the recording settings\n dict - a dictionary with the same exact keys and structure\n as the recording settings, with None for item values\n and missing keys for unwanted elements. The dictionary\n will be returned with None values populated by values\n from recording settings.\n \"\"\"\n recording_info = {}\n if isinstance(selection, str) and selection == 'default':\n recording_info.update(load_settings(filename, '/General/'))\n del recording_info['experimenter']\n del recording_info['rec_file_path']\n del recording_info['root_folder']\n if recording_info['TaskActive']:\n recording_info.update({'TaskName': load_settings(filename, '/TaskSettings/name/')})\n for key in list(recording_info['channel_map'].keys()):\n del recording_info['channel_map'][key]['list']\n pos_edges = get_processed_tracking_data_timestamp_edges(filename)\n recording_info['duration'] = pos_edges[1] - pos_edges[0]\n recording_info['duration (min)'] = int(round((pos_edges[1] - pos_edges[0]) / 60))\n recording_info['time'] = load_settings(filename, '/Time/')\n elif isinstance(selection, str) and selection == 'all':\n recording_info = load_settings(filename)\n elif isinstance(selection, dict):\n full_recording_info = load_settings(filename)\n recording_info = fill_empty_dictionary_from_source(selection, full_recording_info)\n\n return recording_info\n\n\ndef display_recording_data(root_path, selection='default'):\n \"\"\"\n Prints recording info for the whole directory tree.\n \"\"\"\n for dirName, subdirList, fileList in os.walk(root_path):\n for fname in fileList:\n if fname == 'experiment_1.nwb':\n filename = os.path.join(dirName, fname)\n recording_info = extract_recording_info(filename, selection)\n print('Data on path: ' + dirName)\n pprint(recording_info)\n\n\nif __name__ == '__main__':\n # Input argument handling and help info\n parser = argparse.ArgumentParser(description='Extract info from Open Ephys.')\n parser.add_argument('root_path', type=str, nargs=1, \n help='Root directory for recording(s)')\n args = parser.parse_args()\n # Get paths to recording files\n display_recording_data(args.root_path[0])\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.float64", "numpy.int64", "numpy.mod", "numpy.int16" ] ]
wykjLDF/plato
[ "9978357a8cc285272596161ed7999d59b0308859" ]
[ "examples/custom_model.py" ]
[ "\"\"\"\nThis example uses a very simple model and the MNIST dataset to show how the model,\nthe training and validation datasets, as well as the training and testing loops can\nbe customized in Plato.\n\"\"\"\nimport os\n\nimport torch\nfrom torch import nn\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import ToTensor\n\n# os.environ['config_file'] = 'configs/fedavg_lenet5.yml'\n\nfrom plato.clients import simple\nfrom plato.datasources import base\nfrom plato.servers import fedavg\nfrom plato.trainers import basic\n\n\nclass DataSource(base.DataSource):\n \"\"\"A custom datasource with custom training and validation\n datasets.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n self.trainset = MNIST(\"./data\",\n train=True,\n download=True,\n transform=ToTensor())\n self.testset = MNIST(\"./data\",\n train=False,\n download=True,\n transform=ToTensor())\n\n\nclass Trainer(basic.Trainer):\n \"\"\"A custom trainer with custom training and testing loops. \"\"\"\n def train_model(self, config, trainset, sampler, cut_layer=None): # pylint: disable=unused-argument\n \"\"\"A custom training loop. \"\"\"\n optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = torch.utils.data.DataLoader(\n dataset=trainset,\n shuffle=False,\n batch_size=config['batch_size'],\n sampler=sampler)\n\n num_epochs = 1\n for __ in range(num_epochs):\n for examples, labels in train_loader:\n examples = examples.view(len(examples), -1)\n\n logits = self.model(examples)\n loss = criterion(logits, labels)\n print(\"train loss: \", loss.item())\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n def test_model(self, config, testset): # pylint: disable=unused-argument\n \"\"\"A custom testing loop. \"\"\"\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=config['batch_size'], shuffle=False)\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for examples, labels in test_loader:\n examples, labels = examples.to(self.device), labels.to(\n self.device)\n\n examples = examples.view(len(examples), -1)\n outputs = self.model(examples)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = correct / total\n return accuracy\n\n\ndef main():\n \"\"\"A Plato federated learning training session using a custom model. \"\"\"\n model = nn.Sequential(\n nn.Linear(28 * 28, 128),\n nn.ReLU(),\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, 10),\n )\n\n datasource = DataSource()\n trainer = Trainer(model=model)\n\n client = simple.Client(model=model, datasource=datasource, trainer=trainer)\n server = fedavg.Server(model=model, trainer=trainer)\n server.run(client)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.Linear", "torch.max", "torch.no_grad", "torch.nn.ReLU", "torch.utils.data.DataLoader", "torch.nn.CrossEntropyLoss" ] ]
Gscorreia89/pyChemometrics
[ "16f3b4a1af873cf7240230439b503c5aee751ce7" ]
[ "pyChemometrics/PCAPlotMixin.py" ]
[ "from abc import ABCMeta\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport seaborn as sns\nimport scipy.stats as st\nfrom sklearn.model_selection import KFold\nfrom copy import deepcopy\nfrom pyChemometrics.PlotMixin import PlotMixin\n\n\nclass PCAPlotMixin(PlotMixin, metaclass=ABCMeta):\n \"\"\"\n\n Mixin Class to add plotting methods to ChemometricsPCA objects if desired.\n\n \"\"\"\n\n def plot_scores(self, comps=[0, 1], color=None):\n \"\"\"\n\n Score plot figure wth an Hotelling T2.\n\n :param comps: Components to use in the 2D plot\n :param color: Variable used to color points\n :return: Score plot figure\n \"\"\"\n try:\n plt.figure()\n if color is not None:\n cmap = cm.get_cmap(name='Set1')\n color = cmap(color)\n comps = np.array(comps)\n\n t2 = self.hotelling_T2(alpha=0.05, comps=comps)\n outlier_idx = np.where(((self.scores[:, comps] ** 2) / t2 ** 2).sum(axis=1) > 1)[0]\n\n if len(comps) == 1:\n plt.scatter(range(self.scores.shape[0]), self.scores[:, comps], color=color)\n plt.scatter(range(self.scores.shape[0]), self.scores[outlier_idx, comps[0]], color=color, marker='x',\n s=1.5*mpl.rcParams['lines.markersize'] ** 2)\n else:\n plt.scatter(self.scores[:, comps[0]], self.scores[:, comps[1]], color=color)\n plt.scatter(self.scores[outlier_idx, comps[0]], self.scores[outlier_idx, comps[1]],\n color=color, marker='x', s=1.5*mpl.rcParams['lines.markersize'] ** 2)\n\n t2 = self.hotelling_T2(comps=comps)\n\n angle = np.arange(-np.pi, np.pi, 0.01)\n x = t2[0] * np.cos(angle)\n y = t2[1] * np.sin(angle)\n plt.axhline(c='k')\n plt.axvline(c='k')\n plt.plot(x, y, c='k')\n\n xmin = np.minimum(min(self.scores[:, comps[0]]), np.min(x))\n xmax = np.maximum(max(self.scores[:, comps[0]]), np.max(x))\n ymin = np.minimum(min(self.scores[:, comps[1]]), np.min(y))\n ymax = np.maximum(max(self.scores[:, comps[1]]), np.max(y))\n\n axes = plt.gca()\n axes.set_xlim([(xmin + (0.2 * xmin)), xmax + (0.2 * xmax)])\n axes.set_ylim([(ymin + (0.2 * ymin)), ymax + (0.2 * ymax)])\n\n except (ValueError, IndexError) as verr:\n print(\"The number of components to plot must not exceed 2 and the component choice cannot \"\n \"exceed the number of components in the model\")\n raise Exception\n\n plt.title(\"PCA score plot\")\n if len(comps) == 1:\n plt.xlabel(\"PC[{0}] - Variance Explained : {1:.2} %\".format((comps[0] + 1), self.modelParameters['VarExpRatio']*100))\n else:\n plt.xlabel(\"PC[{0}] - Variance Explained : {1:.2} %\".format((comps[0] + 1), self.modelParameters['VarExpRatio'][comps[0]]*100))\n plt.ylabel(\"PC[{0}] - Variance Explained : {1:.2} %\".format((comps[1] + 1), self.modelParameters['VarExpRatio'][comps[1]]*100))\n plt.show()\n return None\n\n def plot_model_parameters(self, parameter='p', component=1, cross_val=False, sigma=2, bar=False, xaxis=None):\n\n choices = {'p': self.loadings}\n choices_cv = {'p': 'Loadings'}\n\n # decrement component to adjust for python indexing\n component -= 1\n if cross_val is True:\n mean = self.cvParameters['Mean_' + choices_cv[parameter]][component, :]\n error = sigma * self.cvParameters['Stdev_' + choices_cv[parameter]][component, :]\n else:\n error = None\n mean = choices[parameter][component, :]\n\n if bar is False:\n self._lineplots(mean, error=error, xaxis=xaxis)\n # To use with barplots for other types of data\n else:\n self._barplots(mean, error=error, xaxis=xaxis)\n\n plt.xlabel(\"Variable No\")\n plt.ylabel(\"{0} for PCA component {1}\".format(parameter, (component + 1)))\n plt.show()\n\n return None\n\n def scree_plot(self, x, total_comps=5, cv_method=KFold(7, shuffle=True)):\n \"\"\"\n\n Plot of the R2X and Q2X per number of component to aid in the selection of the component number.\n\n :param x: Data matrix [n samples, m variables]\n :param total_comps: Maximum number of components to fit\n :param cv_method: scikit-learn Base Cross-Validator to use\n :return: Figure with R2X and Q2X Goodness of fit metrics per component\n \"\"\"\n plt.figure()\n models = list()\n\n for ncomps in range(1, total_comps + 1):\n currmodel = deepcopy(self)\n currmodel.ncomps = ncomps\n currmodel.fit(x)\n currmodel.cross_validation(x, outputdist=False, cv_method=cv_method)\n models.append(currmodel)\n\n q2 = np.array([x.cvParameters['Q2X'] for x in models])\n r2 = np.array([x.modelParameters['R2X'] for x in models])\n\n plt.bar([x - 0.1 for x in range(1, total_comps + 1)], height=r2, width=0.2)\n plt.bar([x + 0.1 for x in range(1, total_comps + 1)], height=q2, width=0.2)\n plt.legend(['R2', 'Q2'])\n plt.xlabel(\"Number of components\")\n plt.ylabel(\"R2/Q2X\")\n\n # Specific case where n comps = 2 # TODO check this edge case\n if len(q2) == 2:\n plateau = np.min(np.where(np.diff(q2)/q2[0] < 0.05)[0])\n else:\n percent_cutoff = np.where(np.diff(q2) / q2[0:-1] < 0.05)[0]\n if percent_cutoff.size == 0:\n print(\"Consider exploring a higher level of components\")\n else:\n plateau = np.min(percent_cutoff)\n plt.vlines(x= (plateau + 1), ymin=0, ymax=1, colors='red', linestyles ='dashed')\n print(\"Q2X measure stabilizes (increase of less than 5% of previous value or decrease) \"\n \"at component {0}\".format(plateau + 1))\n plt.show()\n\n return None\n\n def repeated_cv(self, x, total_comps=7, repeats=15, cv_method=KFold(7, shuffle=True)):\n \"\"\"\n\n Perform repeated cross-validation and plot Q2X values and their distribution (violin plot) per component\n number to help select the appropriate number of components.\n\n :param x: Data matrix [n samples, m variables]\n :param total_comps: Maximum number of components to fit\n :param repeats: Number of CV procedure repeats\n :param cv_method: scikit-learn Base Cross-Validator to use\n :return: Violin plot with Q2X values and distribution per component number.\n \"\"\"\n\n q2x = np.zeros((total_comps, repeats))\n\n for ncomps in range(1, total_comps + 1):\n for rep in range(repeats):\n currmodel = deepcopy(self)\n currmodel.ncomps = ncomps\n currmodel.fit(x)\n currmodel.cross_validation(x, cv_method=cv_method, outputdist=False)\n q2x[ncomps - 1, rep] = currmodel.cvParameters['Q2X']\n\n plt.figure()\n ax = sns.violinplot(data=q2x.T, palette=\"Set1\")\n ax = sns.swarmplot(data=q2x.T, edgecolor=\"black\", color='black')\n ax.set_xticklabels(range(1, total_comps + 1))\n plt.xlabel(\"Number of components\")\n plt.ylabel(\"Q2X\")\n plt.show()\n\n return q2x\n\n def plot_dmodx(self, x, alpha=0.05):\n \"\"\"\n\n Plot a figure with DmodX values and the F-statistic critical line.\n\n :param numpy.ndarray x: Data matrix [n samples, m variables]\n :param float alpha: Significance level\n :return: Plot with DmodX values and critical line\n \"\"\"\n\n try:\n dmodx = self.dmodx(x)\n\n dcrit = self._dmodx_fcrit(x, alpha)\n outlier_idx = self.outlier(x, measure='DmodX')\n plt.figure()\n x_axis = np.array([x for x in range(x.shape[0])])\n plt.plot(x_axis, dmodx, 'o')\n plt.plot(x_axis[outlier_idx], dmodx[outlier_idx], 'rx')\n plt.xlabel('Sample Index')\n plt.ylabel('DmodX')\n plt.hlines(dcrit, xmin=0, xmax= x.shape[0], color='r', linestyles='--')\n plt.show()\n return None\n except TypeError as terr:\n raise terr\n except ValueError as verr:\n raise verr\n\n def plot_leverages(self):\n \"\"\"\n Leverage (h) per observation, with a red line plotted at y = 1/Number of samples (expected\n :return: Plot with observation leverages (h)\n \"\"\"\n plt.figure()\n lev = self.leverages()\n plt.xlabel('Sample Index')\n plt.ylabel('Leverage')\n plt.bar(left=range(lev.size), height=lev)\n plt.hlines(y=1/lev.size, xmin=0, xmax=lev.size, colors='r', linestyles='--')\n plt.show()\n return None" ]
[ [ "numpy.min", "numpy.cos", "numpy.max", "numpy.sin", "numpy.arange", "matplotlib.pyplot.gca", "matplotlib.pyplot.hlines", "numpy.array", "numpy.zeros", "matplotlib.cm.get_cmap", "matplotlib.pyplot.axhline", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.diff", "sklearn.model_selection.KFold", "matplotlib.pyplot.axvline", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.vlines", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter" ] ]
ecmwf-lab/infero
[ "4fec006175af48cd0313b2f89722c01636e961db" ]
[ "src/infero/api/pyinfero/pyinfero/pyinfero.py" ]
[ "#\n# (C) Copyright 1996- ECMWF.\n#\n# This software is licensed under the terms of the Apache Licence Version 2.0\n# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.\n# In applying this licence, ECMWF does not waive the privileges and immunities\n# granted to it by virtue of its status as an intergovernmental organisation\n# nor does it submit to any jurisdiction.\n#\n\nimport os\nimport copy\nimport cffi\nimport platform\nimport numpy as np\n\n\nffi = cffi.FFI()\n\n\nclass InferoException(RuntimeError):\n pass\n\n\nclass PatchedLib:\n \"\"\"\n Patch a CFFI library with error handling\n\n Finds the header file associated with the C API and parses it, loads the shared library,\n and patches the accessors with automatic python-C error handling.\n \"\"\"\n __type_names = {}\n\n def __init__(self):\n\n ffi.cdef(self.__read_header()) \n\n libName = {\n 'Linux': 'libinferoapi.so'\n }\n\n self.__lib = ffi.dlopen(libName[platform.system()])\n\n # All of the executable members of the CFFI-loaded library are functions in the Infero\n # C API. These should be wrapped with the correct error handling. Otherwise forward\n # these on directly.\n for f in dir(self.__lib):\n try:\n attr = getattr(self.__lib, f)\n setattr(self, f, self.__check_error(attr, f) if callable(attr) else attr)\n except Exception as e:\n print(e)\n print(\"Error retrieving attribute\", f, \"from library\")\n\n # initialisation flag\n self._initialised = False\n\n # initialise infero lib\n self.__initialise_lib()\n\n def __initialise_lib(self):\n\n if not self._initialised:\n\n # main args not directly used by the API\n args = [\"\"]\n cargs = [ffi.new(\"char[]\", ar.encode('ascii')) for ar in args]\n argv = ffi.new(f'char*[]', cargs)\n\n # init infero lib\n self.__lib.infero_initialise(len(cargs), argv)\n\n self._initialised = True\n\n def __read_header(self):\n with open(os.path.join(os.path.dirname(__file__), 'pyinfero-headers.h'), 'r') as f:\n return f.read()\n\n def __check_error(self, fn, name):\n \"\"\"\n If calls into the Infero library return errors, ensure that they get detected and reported\n by throwing an appropriate python exception.\n \"\"\"\n\n def wrapped_fn(*args, **kwargs):\n retval = fn(*args, **kwargs)\n if retval != self.__lib.INFERO_SUCCESS:\n c_err = ffi.string(self.__lib.infero_error_string(retval))\n error_str = \"Error in function {}: {}\".format(name, c_err)\n raise InferoException(error_str)\n return retval\n\n return wrapped_fn\n\n def __del__(self):\n \"\"\"\n Finalise infero lib\n \"\"\"\n\n if self._initialised:\n self.__lib.infero_finalise()\n\n\n# Bootstrap the library\n\nlib = PatchedLib()\n\n\n\nclass Infero:\n \"\"\"\n Minimal class that wraps the infero C API\n \"\"\"\n\n def __init__(self, model_path, model_type):\n\n # path to infero model\n self.model_path = model_path\n\n # model type (see available infero backends)\n self.model_type = model_type\n\n # inference configuration string\n self.config_str = f\"path: {self.model_path}\\ntype: {self.model_type}\"\n\n # C API handle\n self.infero_hdl = None \n\n # initialised flag\n self._initialised = False\n\n # initialise (create/open handle)\n self.initialise()\n\n def initialise(self):\n \"\"\"\n Initialise the library\n :return:\n \"\"\"\n\n if not self._initialised:\n\n config_cstr = ffi.new(\"char[]\", self.config_str.encode('ascii'))\n\n # get infero handle\n self.infero_hdl = ffi.new('infero_handle_t**')\n\n # self.infero_hdl = ffi.new('int*')\n lib.infero_create_handle_from_yaml_str(config_cstr, self.infero_hdl)\n\n # open the handle\n lib.infero_open_handle(self.infero_hdl[0])\n\n self._initialised = True\n\n\n def infer(self, input_data, output_shape):\n \"\"\"\n Run Inference\n :param input_data:\n :param output_shape:\n :return:\n \"\"\"\n\n # input set to Fortran order\n input_data = np.array(input_data, order='C', dtype=np.float32)\n cdata1p = ffi.cast(\"float *\", input_data.ctypes.data)\n cshape1 = ffi.new(f\"int[]\", input_data.shape)\n\n # output also expected in Fortran order\n cdata2 = np.zeros(output_shape, order='C', dtype=np.float32)\n cdata2p = ffi.cast(\"float *\", cdata2.ctypes.data)\n cshape2 = ffi.new(f\"int[]\", output_shape)\n\n lib.infero_inference_float_ctensor(self.infero_hdl[0],\n len(input_data.shape), cdata1p, cshape1,\n len(output_shape), cdata2p, cshape2)\n\n return_output = copy.deepcopy(cdata2)\n return_output = np.array(return_output)\n\n return return_output\n\n def infer_mimo(self, input_data, output_shapes):\n \"\"\"\n Run multi-input multi-output inference\n :param input_data:\n :param output_shape:\n :return:\n \"\"\"\n\n # ---------- inputs --------------\n n_inputs = len(input_data)\n cdata_ptrs = []\n cshape_ptrs = []\n cname_ptrs = []\n for iname, idata in input_data.items():\n\n # input set to Fortran order\n odata_c = np.array(idata, order='C', dtype=np.float32)\n\n cdata_ptr = ffi.cast(\"float *\", odata_c.ctypes.data)\n cshape_ptr = ffi.new(\"int[]\", odata_c.shape)\n cname_ptr = ffi.new(\"char[]\", iname.encode('ascii'))\n\n cdata_ptrs.append(cdata_ptr)\n cshape_ptrs.append(cshape_ptr)\n cname_ptrs.append(cname_ptr)\n\n data_ptr2ptrs = ffi.new(\"float*[]\", cdata_ptrs)\n shape_ptr2ptrs = ffi.new(\"int*[]\", cshape_ptrs)\n name_ptr2ptrs = ffi.new(\"char*[]\", cname_ptrs)\n iranks = ffi.new(\"int[]\", [len(t.shape) for t in input_data.values()])\n\n # ---------- outputs --------------\n n_output = len(output_shapes)\n out_cdata_ptrs = []\n out_cshape_ptrs = []\n out_cname_ptrs = []\n for oname, oshape in output_shapes.items():\n\n # input set to Fortran order\n odata_c = np.zeros(oshape, order='C', dtype=np.float32)\n\n out_cdata_ptr = ffi.cast(\"float *\", odata_c.ctypes.data)\n out_cshape_ptr = ffi.new(\"int[]\", odata_c.shape)\n out_cname_ptr = ffi.new(\"char[]\", oname.encode('ascii'))\n\n out_cdata_ptrs.append(out_cdata_ptr)\n out_cshape_ptrs.append(out_cshape_ptr)\n out_cname_ptrs.append(out_cname_ptr)\n\n out_data_ptr2ptrs = ffi.new(\"float*[]\", out_cdata_ptrs)\n out_shape_ptr2ptrs = ffi.new(\"int*[]\", out_cshape_ptrs)\n out_name_ptr2ptrs = ffi.new(\"char*[]\", out_cname_ptrs)\n oranks = ffi.new(\"int[]\", [len(t) for t in output_shapes.values()])\n\n lib.infero_inference_float_mimo_ctensor(self.infero_hdl[0],\n n_inputs,\n name_ptr2ptrs,\n iranks,\n shape_ptr2ptrs,\n data_ptr2ptrs,\n n_output,\n out_name_ptr2ptrs,\n oranks,\n out_shape_ptr2ptrs,\n out_data_ptr2ptrs)\n\n output_tensors = {}\n for tidx, t in enumerate(out_data_ptr2ptrs): \n oname = list(output_shapes.keys())[tidx] \n output_tensors.update({oname: np.frombuffer(ffi.buffer(t), dtype=np.float32) })\n\n return output_tensors\n\n\n def finalise(self):\n \"\"\"\n Finalise the Infero API\n :return:\n \"\"\"\n\n if self._initialised:\n\n # close the handle\n lib.infero_close_handle(self.infero_hdl[0])\n\n # delete the handle\n lib.infero_delete_handle(self.infero_hdl[0])\n\n def __del__(self):\n self.finalise()\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
ha2398/pln-tps
[ "700e4f0b706666de6e2bf5dbc96a8a092a87a9fa" ]
[ "tp2/src/tp2.py" ]
[ "#!/usr/bin/env python3\n\n'''\ntp2.py: Trabalho Prático II - Processamento de Linguagem Natural\n@author: Hugo Araujo de Sousa [2013007463]\n@email: [email protected]\n@DCC030 - Processamento de Linguagem Natural - UFMG\n'''\n\n\nimport argparse as ap\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.feature_extraction import FeatureHasher\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# Add command line arguments to the program.\nparser = ap.ArgumentParser()\nparser.add_argument('train_file', type=str, help='Name of train file')\nparser.add_argument('test_file', type=str, help='Name of test file')\nparser.add_argument('validation_file', type=str,\n\thelp='Name of validation file')\nparser.add_argument('-s', dest='RSEED', default=0, type=int,\n\thelp='Random number generation seed')\n\nargs = parser.parse_args()\n\n\n# Global variables\ntags = {}\nid_tag = {}\n\n\ndef features(sentence, index):\n\t''' Return the features of the word at a given index in the sentence.\n\n\t\t@param \tsentence: \tSentence in which the word is.\n\t\t@type \tsentence: \tList of String.\n\n\t\t@param \tindex:\t\tIndex of word in the sentence.\n\t\t@type \tindex: \t\tInteger.\n\n\t\t@return: \tWord features.\n\t\t@rtype: \tDictionary.\n\t\t'''\n\n\tword = sentence[index].split('_')[0]\n\n\treturn {\n\t\t'word': word.lower(),\n\t\t'is_first': index == 0,\n\t\t'is_last': index == len(sentence) - 1,\n\t\t'is_capitalized': word[0].upper() == word[0],\n\t\t'is_all_caps': word.upper() == word,\n\t\t'is_all_lower': word.lower() == word,\n\t\t'prefix-1': word[0].lower(),\n\t\t'prefix-2': word[:2].lower(),\n\t\t'prefix-3': word[:3].lower(),\n\t\t'suffix-1': word[-1].lower(),\n\t\t'suffix-2': word[-2:].lower(),\n\t\t'suffix-3': word[-3:].lower(),\n\t\t'prev_tag': '' if index == 0 else sentence[index - 1].split('_')[1],\n\t\t'next_tag': '' if index == len(sentence) - 1 else \\\n\t\t\tsentence[index + 1].split('_')[1],\n\t\t'has_hyphen': '-' in word,\n\t\t'is_numeric': word.isdigit(),\n\t}\n\ndef build_dataset(file):\n\t''' Read a file with words and their POS tags and create an array\n\t\twith words and their target POS.\n\t\n\t\t@param \tfile: Input file.\n\t\t@type \tfile: File.\n\n\t\t@return: \tData and its targets.\n\t\t@rtype:\t\tNumpy array, Numpy array\n\t\t'''\n\n\tglobal tags\n\n\th = FeatureHasher(n_features=17)\n\n\tdata = []\n\ttarget = []\n\n\tfor line in file:\n\t\twords = line.split()\n\n\t\tfor index in range(len(words)):\n\t\t\tdata.append(features(words, index))\n\t\t\ttag = words[index].split('_')[1]\n\n\t\t\tif tag not in tags:\n\t\t\t\ttag_id = len(tags)\n\t\t\t\ttags[tag] = tag_id\n\t\t\t\tid_tag[tag_id] = tag\n\n\t\t\ttag = tags[tag]\n\t\t\ttarget.append(tag)\n\n\tdata_array = h.transform(data).toarray()\n\ttarget_array = np.array(target)\n\n\treturn data_array, target_array\n\n\ndef read_data(train_filename, test_filename, validation_filename):\n\t''' Read input data from input files.\n\t\t\n\t\t@param \ttrain_filename: Training data file name.\n\t\t@type \ttrain_filename: String.\n\n\t\t@param \ttest_filename: Test data file name.\n\t\t@type \ttest_filename: String.\n\n\t\t@param \tvalidation_filename: Validation data file name.\n\t\t@type \tvalidation_filename: String.\n\n\t\t@return: \tTraining data, test data and validation data.\n\t\t@rtype:\t\tTuple of Tuple of Numpy Array\t\n\t\t'''\n\n\tprint('[+] Reading training file')\n\ttrain_file = open(train_filename, 'r')\n\ttrain_data = build_dataset(train_file)\n\n\tprint('[+] Reading validation file')\n\tvalidation_file = open(validation_filename, 'r')\n\tvalidation_data = build_dataset(validation_file)\n\n\tprint('[+] Reading test file')\n\ttest_file = open(test_filename, 'r')\n\ttest_data = build_dataset(test_file)\n\n\ttrain_file.close()\n\ttest_file.close()\n\tvalidation_file.close()\n\n\tprint()\n\n\treturn train_data, test_data, validation_data\n\n\ndef print_most_precise_pos(real_output, model_output):\n\t''' Print the POS tags for which the model was more precise.\n\n\t\t@param \treal_output: Real data outputs.\n\t\t@type \treal_output: Numpy Array.\n\n\t\t@param \tmodel_output: Model outputs.\n\t\t@type \tmodel_output: Numpy Array.\n\t\t'''\n\n\thits = [0] * len(tags)\n\tcounts = [0] * len(tags)\n\n\tfor i in range(len(real_output)):\n\t\ttag_id = real_output[i]\n\t\tpredicted_tag_id = model_output[i]\n\n\t\tcounts[tag_id] += 1\n\n\t\tif tag_id == predicted_tag_id:\n\t\t\thits[tag_id] += 1\n\n\tprecision = [0] * len(tags)\n\tfor tag in tags:\n\t\ttag_id = tags[tag]\n\t\ttag_precision = hits[tag_id] / counts[tag_id]\n\t\tprecision[tag_id] = (tag, tag_precision)\n\n\tprecision = sorted(precision, key=lambda x: x[1], reverse=True)\n\n\tfor i in range(len(precision)):\n\t\ttag_precision = round(precision[i][1] * 100, 2)\n\t\tprint('\\t', precision[i][0], 'precision: {}%'.format(tag_precision))\n\n\tprint()\n\n\ndef main():\n\n\ttrain_data, test_data, validation_data = \\\n\t\tread_data(args.train_file, args.test_file, args.validation_file)\n\n\tprint('\\tNAIVE BAYES\\n')\n\tgnb = GaussianNB()\n\tpredictor = gnb.fit(train_data[0], train_data[1])\n\n\tnb_y_valid = predictor.predict(validation_data[0])\n\tprecision = ((validation_data[1] == nb_y_valid).sum()) \\\n\t\t/ len(validation_data[0])\n\tprint('[+] Validation precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(validation_data[1], nb_y_valid)\n\n\tnb_y_test = predictor.predict(test_data[0])\n\tprecision = ((test_data[1] == nb_y_test).sum()) / len(test_data[0])\n\tprint('[+] Test precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(test_data[1], nb_y_test)\n\n\tprint(('-' * 80) + '\\n')\n\t\n\tprint('\\tSVM\\n')\n\tsvmc = svm.SVC(random_state=args.RSEED)\n\tpredictor = svmc.fit(train_data[0], train_data[1])\n\n\tsvm_y_valid = predictor.predict(validation_data[0])\n\tprecision = ((validation_data[1] == svm_y_valid).sum()) \\\n\t\t/ len(validation_data[0])\n\tprint('[+] Validation precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(validation_data[1], svm_y_valid)\n\n\tsvm_y_test = predictor.predict(test_data[0])\n\tprecision = ((test_data[1] == svm_y_test).sum()) / len(test_data[0])\n\tprint('[+] Test precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(test_data[1], svm_y_test)\n\t\n\nmain()" ]
[ [ "numpy.array", "sklearn.feature_extraction.FeatureHasher", "sklearn.naive_bayes.GaussianNB", "sklearn.svm.SVC" ] ]
mobiusklein/glycresoft
[ "60d6eddffc6d8c783f0fe470b5010d4f437c7e73" ]
[ "glycan_profiling/tandem/target_decoy.py" ]
[ "# -*- coding: utf-8 -*-\nimport logging\ntry:\n logger = logging.getLogger(\"target_decoy\")\nexcept Exception:\n pass\nimport math\n\nfrom collections import defaultdict, namedtuple\n\nimport numpy as np\ntry:\n from matplotlib import pyplot as plt\nexcept (ImportError, RuntimeError):\n plt = None\n\nScoreCell = namedtuple('ScoreCell', ['score', 'value'])\n\n\nclass NearestValueLookUp(object):\n '''A mapping-like object which simplifies\n finding the value of a pair whose key is nearest\n to a given query.\n\n .. note::\n Queries exceeding the maximum key will return\n the maximum key's value.\n '''\n def __init__(self, items):\n if isinstance(items, dict):\n items = items.items()\n self.items = sorted([ScoreCell(*x) for x in items if not np.isnan(x[0])], key=lambda x: x[0])\n\n def max_key(self):\n try:\n return self.items[-1][0]\n except IndexError:\n return 0\n\n def _find_closest_item(self, value, key_index=0):\n array = self.items\n lo = 0\n hi = len(array)\n n = hi\n\n error_tolerance = 1e-3\n\n if np.isnan(value):\n return lo\n\n if lo == hi:\n return lo\n\n while hi - lo:\n i = (hi + lo) // 2\n x = array[i][key_index]\n err = x - value\n if abs(err) < error_tolerance:\n mid = i\n best_index = mid\n best_error = abs(err)\n i = mid - 1\n while i >= 0:\n x = array[i][key_index]\n err = abs(x - value)\n if err < best_error:\n best_error = err\n best_index = i\n elif err > error_tolerance:\n break\n i -= 1\n i = mid + 1\n while i < n:\n x = array[i][key_index]\n err = abs(x - value)\n if err < best_error:\n best_error = err\n best_index = i\n elif err > error_tolerance:\n break\n i += 1\n return best_index\n elif (hi - lo) == 1:\n mid = i\n best_index = mid\n best_error = abs(err)\n i = mid - 1\n while i >= 0:\n x = array[i][key_index]\n err = abs(x - value)\n if err < best_error:\n best_error = err\n best_index = i\n elif err > error_tolerance:\n break\n i -= 1\n i = mid + 1\n while i < n:\n x = array[i][key_index]\n err = abs(x - value)\n if err < best_error:\n best_error = err\n best_index = i\n elif err > error_tolerance:\n break\n i += 1\n return best_index\n elif x < value:\n lo = i\n elif x > value:\n hi = i\n\n def get_pair(self, key, key_index=0):\n k = self._find_closest_item(key, key_index) + 1\n return self.items[k]\n\n def __len__(self):\n return len(self.items)\n\n def __repr__(self):\n return \"{s.__class__.__name__}({size})\".format(\n s=self, size=len(self))\n\n def __getitem__(self, key):\n return self._get_one(key)\n\n def _get_sequence(self, key):\n value = [self._get_one(k) for k in key]\n if isinstance(key, np.ndarray):\n value = np.array(value, dtype=float)\n return value\n\n def _get_one(self, key):\n ix = self._find_closest_item(key)\n if ix >= len(self):\n ix = len(self) - 1\n if ix < 0:\n ix = 0\n pair = self.items[ix]\n return pair[1]\n\n\ntry:\n _NearestValueLookUp = NearestValueLookUp\n from glycan_profiling._c.tandem.target_decoy import NearestValueLookUp as NearestValueLookUp\nexcept ImportError:\n pass\n\n\nclass ScoreThresholdCounter(object):\n def __init__(self, series, thresholds):\n self.series = sorted(series, key=lambda x: x.score)\n self.thresholds = sorted(set(np.round((thresholds), 10)))\n self.counter = defaultdict(int)\n self.counts_above_threshold = None\n self.n_thresholds = len(self.thresholds)\n self.threshold_index = 0\n self.current_threshold = self.thresholds[self.threshold_index]\n self.current_count = 0\n\n self._i = 0\n self._is_done = False\n\n self.find_counts()\n self.counts_above_threshold = self.compute_complement()\n self.counter = NearestValueLookUp(self.counter)\n\n def advance_threshold(self):\n self.threshold_index += 1\n if self.threshold_index < self.n_thresholds:\n self.current_threshold = self.thresholds[self.threshold_index]\n self.counter[self.current_threshold] = self.current_count\n return True\n else:\n self._is_done = True\n return False\n\n def test(self, item):\n if item.score < self.current_threshold:\n self.current_count += 1\n self._i += 1\n else:\n # Rather than using recursion, just invert the condition\n # being tested and loop here.\n while self.advance_threshold():\n if item.score > self.current_threshold:\n continue\n else:\n self.current_count += 1\n self._i += 1\n break\n\n def find_counts(self):\n for item in self.series:\n self.test(item)\n\n def compute_complement(self):\n complement = defaultdict(int)\n n = len(self.series)\n\n for k, v in self.counter.items():\n complement[k] = n - v\n return NearestValueLookUp(complement)\n\n\nclass TargetDecoySet(namedtuple(\"TargetDecoySet\", ['target_matches', 'decoy_matches'])):\n def target_count(self):\n return len(self.target_matches)\n\n def decoy_count(self):\n return len(self.decoy_matches)\n\n\n# implementation derived from pyteomics\n_precalc_fact = np.log([math.factorial(n) for n in range(20)])\n\n\ndef log_factorial(x):\n x = np.array(x)\n m = (x >= _precalc_fact.size)\n out = np.empty(x.shape)\n out[~m] = _precalc_fact[x[~m].astype(int)]\n x = x[m]\n out[m] = x * np.log(x) - x + 0.5 * np.log(2 * np.pi * x)\n return out\n\n\ndef _log_pi_r(d, k, p=0.5):\n return k * math.log(p) + log_factorial(k + d) - log_factorial(k) - log_factorial(d)\n\n\ndef _log_pi(d, k, p=0.5):\n return _log_pi_r(d, k, p) + (d + 1) * math.log(1 - p)\n\n\ndef _expectation(d, t, p=0.5):\n \"\"\"The conditional tail probability for the negative binomial\n random variable for the number of incorrect target matches\n\n Parameters\n ----------\n d : int\n The number of decoys retained\n t : int\n The number of targets retained\n p : float, optional\n The parameter :math:`p` of the negative binomial,\n :math:`1 / 1 + (ratio of the target database to the decoy database)`\n\n Returns\n -------\n float\n The theoretical number of incorrect target matches\n\n References\n ----------\n Levitsky, L. I., Ivanov, M. V., Lobas, A. A., & Gorshkov, M. V. (2017).\n Unbiased False Discovery Rate Estimation for Shotgun Proteomics Based\n on the Target-Decoy Approach. Journal of Proteome Research, 16(2), 393–397.\n https://doi.org/10.1021/acs.jproteome.6b00144\n \"\"\"\n if t is None:\n return d + 1\n t = int(t)\n m = np.arange(t + 1, dtype=int)\n pi = np.exp(_log_pi(d, m, p))\n return ((m * pi).cumsum() / pi.cumsum())[t]\n\n\ndef expectation_correction(targets, decoys, ratio):\n \"\"\"Estimate a correction for the number of decoys at a given\n score threshold for small data size.\n\n Parameters\n ----------\n targets : int\n The number of targets retained\n decoys : int\n The number of decoys retained\n ratio : float\n The ratio of target database to decoy database\n\n Returns\n -------\n float\n The number of decoys to add for the correction\n\n References\n ----------\n Levitsky, L. I., Ivanov, M. V., Lobas, A. A., & Gorshkov, M. V. (2017).\n Unbiased False Discovery Rate Estimation for Shotgun Proteomics Based\n on the Target-Decoy Approach. Journal of Proteome Research, 16(2), 393–397.\n https://doi.org/10.1021/acs.jproteome.6b00144\n \"\"\"\n p = 1. / (1. + ratio)\n tfalse = _expectation(decoys, targets, p)\n return tfalse\n\n\nclass TargetDecoyAnalyzer(object):\n \"\"\"Estimate the False Discovery Rate using the Target-Decoy method.\n\n Attributes\n ----------\n database_ratio : float\n The ratio of the size of the target database to the decoy database\n target_weight : float\n A weight (less than 1.0) to put on target matches to make them weaker\n than decoys in situations where there is little data.\n decoy_correction : Number\n A quantity to use to correct for decoys, and if non-zero,\n will indicate that the negative binomial correction for decoys should be\n used.\n decoy_pseudocount : Number\n The value to report when querying the decoy count for a score exceeding\n the maximum score of a decoy match. This is distinct from `decoy_correction`\n decoy_count : int\n The total number of decoys\n decoys : list\n The decoy matches to consider\n n_decoys_at : dict\n The number of decoy matches above each threshold\n n_targets_at : dict\n The number of target matches above each threshold\n target_count : TYPE\n The total number of targets\n targets : list\n The target matches to consider\n thresholds : list\n The distinct score thresholds\n with_pit : bool\n Whether or not to use the \"percent incorrect target\" adjustment\n \"\"\"\n\n def __init__(self, target_series, decoy_series, with_pit=False, decoy_correction=0, database_ratio=1.0,\n target_weight=1.0, decoy_pseudocount=1.0):\n self.targets = target_series\n self.decoys = decoy_series\n self.target_count = len(target_series)\n self.decoy_count = len(decoy_series)\n self.database_ratio = database_ratio\n self.target_weight = target_weight\n self.with_pit = with_pit\n self.decoy_correction = decoy_correction\n self.decoy_pseudocount = decoy_pseudocount\n\n self._calculate_thresholds()\n self._q_value_map = self.calculate_q_values()\n\n def pack(self):\n self.targets = []\n self.decoys = []\n\n def _calculate_thresholds(self):\n self.n_targets_at = {}\n self.n_decoys_at = {}\n\n target_series = self.targets\n decoy_series = self.decoys\n\n thresholds = np.array(sorted({case.score for case in target_series} |\n {case.score for case in decoy_series}), dtype=float)\n self.thresholds = thresholds\n if len(thresholds) > 0:\n self.n_targets_at = ScoreThresholdCounter(\n target_series, self.thresholds).counts_above_threshold\n self.n_decoys_at = ScoreThresholdCounter(\n decoy_series, self.thresholds).counts_above_threshold\n\n def n_decoys_above_threshold(self, threshold):\n try:\n if threshold > self.n_decoys_at.max_key():\n return self.decoy_pseudocount + self.decoy_correction\n return self.n_decoys_at[threshold] + self.decoy_correction\n except IndexError:\n if len(self.n_decoys_at) == 0:\n return self.decoy_correction\n else:\n raise\n\n def n_targets_above_threshold(self, threshold):\n try:\n return self.n_targets_at[threshold]\n except IndexError:\n if len(self.n_targets_at) == 0:\n return 0\n else:\n raise\n\n def expectation_correction(self, t, d):\n return expectation_correction(t, d, self.database_ratio)\n\n def target_decoy_ratio(self, cutoff):\n\n decoys_at = self.n_decoys_above_threshold(cutoff)\n targets_at = self.n_targets_above_threshold(cutoff)\n decoy_correction = 0\n if self.decoy_correction:\n try:\n decoy_correction = self.expectation_correction(targets_at, decoys_at)\n except Exception as ex:\n print(ex)\n try:\n ratio = (decoys_at + decoy_correction) / float(\n targets_at * self.database_ratio * self.target_weight)\n except ZeroDivisionError:\n ratio = (decoys_at + decoy_correction)\n return ratio, targets_at, decoys_at\n\n def estimate_percent_incorrect_targets(self, cutoff):\n target_cut = self.target_count - self.n_targets_above_threshold(cutoff)\n decoy_cut = self.decoy_count - self.n_decoys_above_threshold(cutoff)\n percent_incorrect_targets = target_cut / float(decoy_cut)\n\n return percent_incorrect_targets\n\n def estimate_fdr(self, cutoff):\n if self.with_pit:\n percent_incorrect_targets = self.estimate_percent_incorrect_targets(cutoff)\n else:\n percent_incorrect_targets = 1.0\n return percent_incorrect_targets * self.target_decoy_ratio(cutoff)[0]\n\n def calculate_q_values(self):\n thresholds = sorted(self.thresholds, reverse=False)\n mapping = {}\n last_score = float('inf')\n last_q_value = 0\n for threshold in thresholds:\n try:\n q_value = self.estimate_fdr(threshold)\n # If a worse score has a lower q-value than a better score, use that q-value\n # instead.\n if last_q_value < q_value and last_score < threshold:\n q_value = last_q_value\n last_q_value = q_value\n last_score = threshold\n mapping[threshold] = q_value\n except ZeroDivisionError:\n mapping[threshold] = 1.\n return NearestValueLookUp(mapping)\n\n def score_for_fdr(self, fdr_estimate):\n i = -1\n for score, fdr in self.q_value_map.items:\n i += 1\n if fdr_estimate >= fdr:\n cella = self.q_value_map.items[i]\n cellb = self.q_value_map.items[i - 1]\n cellc = self.q_value_map.items[i + 1]\n distance_a = abs(fdr_estimate - cella.value)\n distance_b = abs(fdr_estimate - cellb.value)\n distance_c = abs(fdr_estimate - cellc.value)\n min_distance = min(distance_a, distance_b, distance_c)\n if min_distance == distance_a:\n return cella.score\n elif min_distance == distance_b:\n return cellb.score\n else:\n return cellc.score\n return float('inf')\n\n def plot(self, ax=None):\n if ax is None:\n fig, ax = plt.subplots(1)\n thresholds = sorted(self.thresholds, reverse=False)\n target_counts = np.array(\n [self.n_targets_above_threshold(i) for i in thresholds])\n decoy_counts = np.array([self.n_decoys_above_threshold(i)\n for i in thresholds])\n fdr = np.array([self.q_value_map[i] for i in thresholds])\n try:\n at_5_percent = np.where(fdr < 0.05)[0][0]\n except IndexError:\n at_5_percent = -1\n try:\n at_1_percent = np.where(fdr < 0.01)[0][0]\n except IndexError:\n at_1_percent = -1\n line1 = ax.plot(thresholds, target_counts, label='Target', color='blue')\n line2 = ax.plot(thresholds, decoy_counts, label='Decoy', color='orange')\n tline5 = ax.vlines(\n thresholds[at_5_percent], 0, np.max(target_counts), linestyle='--', color='green',\n lw=0.75, label='5% FDR')\n tline1 = ax.vlines(\n thresholds[at_1_percent], 0, np.max(target_counts), linestyle='--', color='skyblue',\n lw=0.75, label='1% FDR')\n ax.set_ylabel(\"# Matches Retained\")\n ax2 = ax.twinx()\n line3 = ax2.plot(thresholds, fdr, label='FDR',\n color='grey', linestyle='--')\n ax2.set_ylabel(\"FDR\")\n ax.legend([line1[0], line2[0], line3[0], tline5, tline1],\n ['Target', 'Decoy', 'FDR', \"5% FDR\", \"1% FDR\"])\n\n lo, hi = ax.get_ylim()\n lo = max(lo, 0)\n ax.set_ylim(lo, hi)\n lo, hi = ax2.get_ylim()\n ax2.set_ylim(0, hi)\n\n lo, hi = ax.get_xlim()\n ax.set_xlim(-1, hi)\n lo, hi = ax2.get_xlim()\n ax2.set_xlim(-1, hi)\n return ax\n\n def q_values(self):\n q_map = self._q_value_map\n if len(q_map) == 0:\n import warnings\n warnings.warn(\"No FDR estimate what possible.\")\n for target in self.targets:\n target.q_value = 0.0\n for decoy in self.decoys:\n decoy.q_value = 0.0\n return\n for target in self.targets:\n try:\n target.q_value = q_map[target.score]\n except IndexError:\n target.q_value = 0.0\n for decoy in self.decoys:\n try:\n decoy.q_value = q_map[decoy.score]\n except IndexError:\n decoy.q_value = 0.0\n\n def score(self, spectrum_match):\n try:\n spectrum_match.q_value = self._q_value_map[spectrum_match.score]\n except IndexError:\n import warnings\n warnings.warn(\"Empty q-value mapping. q-value will be 0.\")\n spectrum_match.q_value = 0.0\n return spectrum_match\n\n def score_all(self, solution_set):\n for spectrum_match in solution_set:\n self.score(spectrum_match)\n solution_set.q_value = solution_set.best_solution().q_value\n return solution_set\n\n @property\n def q_value_map(self):\n return self._q_value_map\n\n @property\n def fdr_map(self):\n return self._q_value_map\n\n\nclass GroupwiseTargetDecoyAnalyzer(object):\n def __init__(self, target_series, decoy_series, with_pit=False, grouping_functions=None, decoy_correction=0,\n database_ratio=1.0, target_weight=1.0, decoy_pseudocount=1.0):\n if grouping_functions is None:\n grouping_functions = [lambda x: True]\n self.targets = target_series\n self.decoys = decoy_series\n self.with_pit = with_pit\n self.grouping_functions = []\n self.groups = []\n self.group_fits = []\n self.decoy_pseudocount = decoy_pseudocount\n self.decoy_correction = decoy_correction\n self.database_ratio = database_ratio\n self.target_weight = target_weight\n\n for fn in grouping_functions:\n self.add_group(fn)\n\n self.partition()\n\n def pack(self):\n self.targets = []\n self.decoys = []\n self.groups = [[] for g in self.groups]\n for fit in self.group_fits:\n fit.pack()\n\n def partition(self):\n for target in self.targets:\n i = self.find_group(target)\n self.groups[i][0].append(target)\n for decoy in self.decoys:\n i = self.find_group(decoy)\n self.groups[i][1].append(decoy)\n for group in self.groups:\n fit = TargetDecoyAnalyzer(\n *group, with_pit=self.with_pit,\n decoy_correction=self.decoy_correction,\n database_ratio=self.database_ratio,\n target_weight=self.target_weight,\n decoy_pseudocount=self.decoy_pseudocount)\n self.group_fits.append(fit)\n\n def add_group(self, fn):\n self.grouping_functions.append(fn)\n self.groups.append(([], []))\n return len(self.groups)\n\n def find_group(self, spectrum_match):\n for i, fn in enumerate(self.grouping_functions):\n if fn(spectrum_match):\n return i\n return None\n\n def q_values(self):\n for group in self.group_fits:\n group.q_values()\n\n def score(self, spectrum_match):\n i = self.find_group(spectrum_match)\n fit = self.group_fits[i]\n return fit.score(spectrum_match)\n" ]
[ [ "numpy.max", "numpy.array", "numpy.isnan", "numpy.empty", "numpy.log", "numpy.round", "matplotlib.pyplot.subplots", "numpy.where", "numpy.arange" ] ]
bnovate/bactoml
[ "66c89e6db876c5fddca3b8c00e64ae68dc9940e2" ]
[ "bactoml/df_pipeline.py" ]
[ "\"\"\"\nThis module implements the classes needed to integrate sk-learn Pipeline and\nFeatureUnion with pandas DataFrame and FCMeasurment instances (see \nFlowCytometryTools library).\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom types import LambdaType\nfrom itertools import product, chain\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import FeatureUnion, _transform_one\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom FlowCytometryTools import FCMeasurement\n\nfrom bactoml.fcdataset import FCDataSet\nfrom bactoml.decision_tree_classifier import HistogramTransform\n\nclass DFLambdaFunction(BaseEstimator, TransformerMixin):\n \"\"\"Apply a lambda function to a pandas DataFrame. \n The implementation is compatible with the sk-learn API.\n\n \"\"\"\n\n def __init__(self, func, copy=False):\n \"\"\"\n Parameters:\n -----------\n\n func : lambda function,\n Takes a single pandas DataFrame instance as input.\n\n copy : boolean,\n Determine if the transform returns a copy of\n the pandas DataFrame instance or the instance itself.\n\n \"\"\"\n self.func = func\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Fit all the transformers unsing X.\n\n Parameters:\n -----------\n\n X : pandas DataFrame instance.\n Input data.\n\n Returns:\n --------\n\n self : DFLambdaFunction.\n This estimator.\n\n \"\"\"\n if isinstance(self.func, LambdaType):\n return self\n\n elif isinstance(self.func, BaseEstimator):\n self.func.fit(X, y)\n return self\n\n def transform(self, X, y=None):\n \"\"\"\n Parameters:\n -----------\n\n X : pandas DataFrame instance.\n Input data.\n\n Returns:\n --------\n\n pandas Dataframe instance.\n Result of the call of self.func on the pandas \n DataFrame instance X.\n\n \"\"\"\n X_ = X if not self.copy else X.copy()\n\n if isinstance(self.func, LambdaType):\n try:\n #check for iterable input\n iter(X_)\n except TypeError:\n return self.func(X_) \n else:\n if isinstance(X_, FCMeasurement) or (isinstance(X_, pd.DataFrame) and X_.shape[0] == 1):\n return self.func(X_) \n else:\n out = []\n for i in range(len(X_)):\n out.append(self.func(X_[i]))\n return out\n \n elif isinstance(self.func, BaseEstimator):\n X_[X_.columns] = self.func.transform(X_.values)\n return X_\n\nclass DFInPlaceLambda(BaseEstimator, TransformerMixin):\n \"\"\"Perform in place modification on the DataFrame \n columns. \n\n \"\"\"\n\n def __init__(self, func, columns=None):\n \"\"\"\n Parameters:\n -----------\n\n func : lambda function,\n Takes two inputs, the pandas DataFrame\n and the colums:\n DFInPlaceLambda(['TCC'], lambda C, DF : C / DF['VOL'])\n\n columns : array of strings,\n Contains the name of the columns to \n which the function func will be \n applied.\n\n \"\"\"\n self.func = func\n self.columns = columns\n\n def fit(self, X, y=None):\n \"\"\"Fit all the transformers unsing X.\n\n Parameters:\n -----------\n\n X : pandas DataFrame instance.\n Input data.\n\n Returns:\n --------\n\n self : DFLambdaFunction.\n This estimator.\n\n \"\"\"\n return self\n\n def transform(self, X, y=None):\n \"\"\"Apply the transform to the pandas DataFrame in place.\n\n Parameters:\n -----------\n\n X : pandas DataFrame instance.\n Input data.\n\n Returns:\n --------\n\n pandas Dataframe instance.\n Result of the call of self.func on the columns of \n the pandas DataFrame instance X.\n\n \"\"\"\n X_ = X.copy()\n\n if self.columns:\n for c in self.columns:\n X_[c] = self.func(X_[c], X_)\n else:\n for c in X.columns:\n X_[c] = self.func(X_[c], X_)\n return X_\n\nclass DFFeatureUnion(FeatureUnion):\n \"\"\"Feature union that support dataframe as inputs and\n outputs.\n Inputs can be FCMeasurement but to be able to concatenate\n the results, the output will be dataframes.\n\n Note : nesting the DFFeatureUnion doesn't conserve the columns name of the deeper DFFeatureUnion.\n\n \"\"\"\n\n\n def transform(self, X):\n \"\"\"Transform X separately by each transformer or Pipeline then concatenate the results.\n\n Parameters:\n -----------\n\n X : FCMeasurment or pandas DataFrame.\n Input data to be transformed.\n\n Returns:\n --------\n \n X_t : pandas DataFrame, shape(n_samples, \n sum_n_components)\n hstack of results of transformers.\n sum_n_components is the sum of n_components\n (output dimension) over transformers.\n\n \"\"\"\n Xs = Parallel(n_jobs=self.n_jobs)(\n delayed(_transform_one)(trans, weight, X)\n for name, trans, weight in self._iter())\n\n #get the name of the branches of the feature union and the dimension of the results\n names = list(zip(*self._iter()))[0]\n dim = list(map(lambda X : np.atleast_1d(X).size, Xs)) #if not isinstance(X, int) else 1\n\n #generate the name of the columns\n columns = [list(map(lambda X : '{}_{}'.format(*X), product([n], range(d)))) if d > 1 else [n] for n, d in zip(names, dim)]\n columns = list(chain.from_iterable(columns)) #flatten the array\n\n #flatten the list of returns\n values = np.concatenate(list(map(lambda X : X.values.flatten() if isinstance(X, pd.DataFrame) else np.atleast_1d(X), Xs)))\n\n X_t = pd.DataFrame(data={col : [val] for col, val in zip(columns, values)})\n\n return X_t\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Fit all transformers, transform the data and \n concatenate the results.\n\n Parameters:\n -----------\n\n X : FCMeasurement or pandas DataFrame.\n Input data to be transformed.\n\n y : array-like, shape(n_sample, ...) optional\n Targets for supervised learning.\n\n Returns:\n --------\n\n X_t : pandas DataFrame, shape (n_samples,\n sum_n_components)\n hstack of results of transformers.\n sum_n_components is the sum of n_components\n (output dimension) over transformers.\n\n \"\"\"\n return self.fit(X, y, **fit_params).transform(X)\n\nclass SampleWisePipeline(Pipeline):\n \"\"\"Apply the whole pipeline to each sample sequentially.\n\n At each steps Sklearn Pipeline applies the fit/transform\n function to the whole dataset. This object applies all the\n steps to each sample before moving to the next one. This\n is useful when dealing with preprocessing steps, when\n the samples have different dimensions and the pipeline \n implements a dimensionality reduction / feature selection,\n or when dealing with time series and the order of the \n sample is important.\n\n \"\"\"\n\n def __init__(self, steps, memory=None):\n \"\"\"\n Parameters:\n -----------\n\n See sklearn.pipeline.Pipeline documentation.\n Note : all the steps must be pre-fitted / initialized.\n \n \"\"\"\n super().__init__(steps, memory)\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model and transform with the final estimator.\n\n Process the sample sequentially and for each fits all the \n transforms one after the other and transforms the sample, \n then uses fit_transform on the transformed data with the\n final estimator.\n\n Parameters:\n -----------\n\n X : iterable,\n Training data. Must fulfill input requirements of first\n step of the pipeline.\n \n Returns:\n --------\n \n Xt : pandas DataFrame, shape = [n_sample, n_transformed_features]\n Transformed samples.\n\n \"\"\"\n try:\n #apply the whole pipeline fit_transform sequentially to all the sample\n if isinstance(X, FCDataSet) or isinstance(X, list):\n output = pd.concat((super(SampleWisePipeline, self).fit_transform(sample) for sample in X), axis=0, join='outer')\n output = output.reset_index(drop=True)\n elif isinstance(X, pd.DataFrame):\n output = pd.concat((super(SampleWisePipeline, self).fit_transform(pd.DataFrame(data=[sample.values], columns=sample.index)) for _, sample in X.iterrows()), axis=0, join='outer')\n output = output.reset_index(drop=True)\n\n except AttributeError:\n print('One or multiple estimator in the pipeline are not pre-fitted / initialized.')\n raise\n\n return output\n\n\nclass AggregatedHist:\n \n \"\"\"Generates an aggregated histogram of all the FCMeasurements.\"\"\"\n\n def __init__(self, fcms, edges, pre_pipe = None):\n \n \"\"\"\n Parameters:\n ----------\n\n fcms: FCDataSet object\n edges: dct, shape (n_channels, )\n Dictionary with key:value as follow\n 'channel_id':edges with edges an array \n containing the edges of the bins along a \n particular channel.\n pre_pipe: Pipeline object\n scikit-learn pipeline object consisting of preprocessing steps \n (e.g. tlog step, gating)\n \n \"\"\"\n self.fcms = fcms\n self.edges = edges\n\n if isinstance(pre_pipe, Pipeline):\n self.preprocessing = True\n self.pipe = pre_pipe\n else:\n self.preprocessing = False\n\n def aggregate(self):\n\n \"\"\"Applies a finely spaced grid to every FCMeasurement and aggregates the resulting \n counts into a single histogram.\n \n Returns:\n --------\n \n super_hist \n aggregated histogram\n\n \"\"\"\n hist = HistogramTransform(self.edges)\n\n if self.preprocessing:\n fc_prep = self.pipe.transform(self.fcms[0])\n else:\n fc_prep = self.fcms[0]\n super_hist = hist.transform(fc_prep)\n\n for fc in self.fcms[1:]:\n if self.preprocessing:\n fc_prep = self.pipe.transform(fc)\n else:\n fc_prep = fc\n super_hist['counts'] += hist.transform(fc_prep)['counts']\n\n return super_hist\n" ]
[ [ "pandas.DataFrame", "sklearn.externals.joblib.Parallel", "numpy.atleast_1d", "sklearn.externals.joblib.delayed" ] ]
Turing311/Data-Efficient-Model-Compression
[ "1519dbf35beca8d2f538533c92f0c07cf6343ae0" ]
[ "pu_compress/model.py" ]
[ "# 2019.12.05-Changed output of forward function, adding attention based model.\n# Huawei Technologies Co., Ltd. <[email protected]>\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n \nclass ResNet_PU(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet_PU, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc = nn.Linear(512*block.expansion, num_classes)\n self.se = SELayer(960)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out1 = self.layer1(out)\n out2 = self.layer2(out1)\n out3 = self.layer3(out2)\n out4 = self.layer4(out3)\n out1 = F.avg_pool2d(out1, 32)\n out2 = F.avg_pool2d(out2, 16)\n out3 = F.avg_pool2d(out3, 8)\n out4 = F.avg_pool2d(out4, 4)\n out = torch.cat((out1, out2, out3, out4),1)\n out = self.se(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n \ndef ResNet18_PU(num_classes=10):\n return ResNet_PU(BasicBlock, [2,2,2,2], num_classes=num_classes)\n\ndef ResNet34_PU(num_classes=10):\n return ResNet_PU(BasicBlock, [3,4,6,3], num_classes=num_classes)\n\ndef ResNet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes)\n\ndef ResNet34(num_classes=10):\n return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes)\n\n# def ResNet50():\n# return ResNet(Bottleneck, [3,4,6,3])\n\n# def ResNet101():\n# return ResNet(Bottleneck, [3,4,23,3])\n\n# def ResNet152():\n# return ResNet(Bottleneck, [3,8,36,3])" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.functional.avg_pool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Sigmoid", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.relu" ] ]
iamgmujtaba/gif_acoustic
[ "96fd6d87329609b939f09e0a2bae69663d6b555f" ]
[ "utils/feature_extraction.py" ]
[ "from keras.utils import to_categorical\nimport numpy as np\n\nimport os\nimport librosa\n\n\n####################################################################\n####################################################################\n# description: Method to split a song into multiple songs using overlapping windows\ndef splitsongs(X, y, window = 0.1, overlap = 0.5):\n # Empty lists to hold our results\n temp_X = []\n temp_y = []\n\n # Get the input song array size\n xshape = X.shape[0]\n chunk = int(xshape*window)\n offset = int(chunk*(1.-overlap))\n \n # Split the song and create new ones on windows\n spsong = [X[i:i+chunk] for i in range(0, xshape - chunk + offset, offset)]\n for s in spsong:\n temp_X.append(s)\n temp_y.append(y)\n\n return np.array(temp_X), np.array(temp_y)\n \n####################################################################\n####################################################################\n# Method to convert a list of songs to a np array of melspectrograms\ndef to_melspectrogram(songs, n_fft = 1024, hop_length = 512):\n # Transformation function\n melspec = lambda x: librosa.feature.melspectrogram(x, n_fft = n_fft,\n hop_length = hop_length)[:,:,np.newaxis]\n\n # map transformation of input songs to melspectrogram using log-scale\n tsongs = map(melspec, songs)\n return np.array(list(tsongs))\n\n####################################################################\n####################################################################\n# Read audio files from folder\ndef read_data(src_dir, genres, song_samples, \n n_fft = 1024, hop_length = 512, debug = True):\n # Empty array of dicts with the processed features from all files\n arr_specs = []\n arr_genres = []\n\n # Read files from the folders\n for x, _ in genres.items():\n folder = src_dir + x\n \n for root, subdirs, files in os.walk(folder):\n for file in files:\n # Read the audio file\n file_name = folder + \"/\" + file\n signal, sr = librosa.load(file_name)\n signal = signal[:song_samples]\n \n # Debug process\n if debug:\n print(\"Reading file: {}\".format(file_name))\n \n # Convert to dataset of spectograms/melspectograms\n signals, y = splitsongs(signal, genres[x])\n \n # Convert to \"spec\" representation\n specs = to_melspectrogram(signals, n_fft, hop_length)\n \n # Save files\n arr_genres.extend(y)\n arr_specs.extend(specs)\n \n return np.array(arr_specs), to_categorical(np.array(arr_genres))\n" ]
[ [ "numpy.array" ] ]
vavaroutsos/zipline
[ "f053bc56531b91c8936e6578f9d6ebab125c4328" ]
[ "tests/pipeline/test_column.py" ]
[ "\"\"\"\nTests BoundColumn attributes and methods.\n\"\"\"\nimport operator\nfrom unittest import skipIf\n\nfrom nose_parameterized import parameterized\nfrom pandas import Timestamp, DataFrame\nfrom pandas.util.testing import assert_frame_equal\n\nfrom zipline.lib.labelarray import LabelArray\nfrom zipline.pipeline import Pipeline\nfrom zipline.pipeline.data import USEquityPricing\nfrom zipline.pipeline.data.testing import TestingDataSet as TDS\nfrom zipline.pipeline.domain import US_EQUITIES\nfrom zipline.testing.fixtures import (\n WithSeededRandomPipelineEngine,\n WithTradingSessions,\n ZiplineTestCase\n)\nfrom zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning, \\\n new_pandas, skip_pipeline_new_pandas\n\n\nclass LatestTestCase(WithSeededRandomPipelineEngine,\n WithTradingSessions,\n ZiplineTestCase):\n\n START_DATE = Timestamp('2014-01-01')\n END_DATE = Timestamp('2015-12-31')\n SEEDED_RANDOM_PIPELINE_SEED = 100\n ASSET_FINDER_EQUITY_SIDS = list(range(5))\n ASSET_FINDER_COUNTRY_CODE = 'US'\n SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = US_EQUITIES\n\n @classmethod\n def init_class_fixtures(cls):\n super(LatestTestCase, cls).init_class_fixtures()\n cls.engine = cls.seeded_random_engine\n cls.sids = cls.ASSET_FINDER_EQUITY_SIDS\n cls.assets = cls.engine._finder.retrieve_all(\n cls.ASSET_FINDER_EQUITY_SIDS)\n\n def expected_latest(self, column, slice_):\n loader = self.seeded_random_loader\n index = self.trading_days[slice_]\n columns = self.assets\n values = loader.values(column.dtype, self.trading_days, self.sids)[\n slice_]\n\n if column.dtype.kind in ('O', 'S', 'U'):\n # For string columns, we expect a categorical in the output.\n return LabelArray(\n values,\n missing_value=column.missing_value,\n ).as_categorical_frame(\n index=index,\n columns=columns,\n )\n\n return DataFrame(\n loader.values(column.dtype, self.trading_days, self.sids)[slice_],\n index=self.trading_days[slice_],\n columns=self.assets,\n )\n\n @skipIf(new_pandas, skip_pipeline_new_pandas)\n def test_latest(self):\n columns = TDS.columns\n pipe = Pipeline(\n columns={c.name: c.latest for c in columns},\n )\n\n cal_slice = slice(20, 40)\n dates_to_test = self.trading_days[cal_slice]\n result = self.engine.run_pipeline(\n pipe,\n dates_to_test[0],\n dates_to_test[-1],\n )\n for column in columns:\n with ignore_pandas_nan_categorical_warning():\n col_result = result[column.name].unstack()\n\n expected_col_result = self.expected_latest(column, cal_slice)\n assert_frame_equal(col_result, expected_col_result)\n\n @parameterized.expand([\n (operator.gt,),\n (operator.ge,),\n (operator.lt,),\n (operator.le,),\n ])\n def test_comparison_errors(self, op):\n for column in TDS.columns:\n with self.assertRaises(TypeError):\n op(column, 1000)\n with self.assertRaises(TypeError):\n op(1000, column)\n with self.assertRaises(TypeError):\n op(column, 'test')\n with self.assertRaises(TypeError):\n op('test', column)\n\n def test_comparison_error_message(self):\n column = USEquityPricing.volume\n err_msg = (\n \"Can't compare 'EquityPricing<US>.volume' with 'int'.\"\n \" (Did you mean to use '.latest'?)\"\n )\n\n with self.assertRaises(TypeError) as e:\n column < 1000\n self.assertEqual(str(e.exception), err_msg)\n\n try:\n column.latest < 1000\n except TypeError:\n self.fail()\n" ]
[ [ "pandas.Timestamp", "pandas.util.testing.assert_frame_equal" ] ]
cleber-si/Buscando-Exoplanetas-com-IA
[ "56cccccaeb51982ee90ef51a22f3825a63503acc" ]
[ "treino/Floresta_Randomica.py" ]
[ "import numpy as np\nimport gerenciar_arqs as GA\nfrom joblib import dump\n\n\n# Carrega os dados para treino\ng_treino, l_treino = GA.carrega_treino()\nX_g_treino, X_l_treino, y_treino = GA.ajusta_dados(g_treino, l_treino)\n\nX_g_treino_np = np.array(X_g_treino)\nX_l_treino_np = np.array(X_l_treino)\ny_treino = np.array(y_treino)\n\n# Carrega os dados aumentados para treino\ng_treino_a, l_treino_a = GA.carrega_treino_aumentado()\nX_g_treino_a, X_l_treino_a, y_treino_a = GA.ajusta_dados(g_treino_a, l_treino_a)\n\nX_g_treino_a_np = np.array(X_g_treino_a)\nX_l_treino_a_np = np.array(X_l_treino_a)\ny_treino_a = np.array(y_treino_a)\n\n# Concatena X_g com X_l\nX_treino = [np.concatenate([X_g_treino_np[i],X_l_treino_np[i]]) for i in range(len(X_g_treino_np))]\nX_treino_a = [np.concatenate([X_g_treino_a_np[i],X_l_treino_a_np[i]]) for i in range(len(X_g_treino_a_np))]\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Treina o modelo com os dados padrão\nforest_clf = RandomForestClassifier(n_estimators=100, random_state=23)\n\nforest_clf.fit(X_treino, y_treino)\n#y_pred = logistc_clf.pred(X_teste) # ---> Necessário carregar os dados de teste\n\n# Treina o modelo com os dados aumentados\nforest_clf_A = RandomForestClassifier(n_estimators=100, random_state=23)\n\nforest_clf_A.fit(X_treino_a, y_treino_a)\n\n\n# Local para salvar os modelos\nsalva_arq = \"/home/caminho/da/pasta/modelos\"\n\n# Cria a pasta para armazenar os arquivos salvos caso não exista.\nif not os.path.exists(salva_arq):\n os.makedirs(salva_arq)\n\n# Salva o primeiro modelo\nsalva_ML_arq = salva_arq + \"/forest_clf.joblib\"\ndump(forest_clf, salva_ML_arq)\n\n# Salva o segundo modelo\nsalva_ML_arq = salva_arq + \"/forest_clf_A.joblib\"\ndump(forest_clf_A, salva_ML_arq)\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.array", "numpy.concatenate" ] ]
NathanDai5287/air-pollution-covid-19
[ "dbf030bba7df22efc53d2262cea469309c884791" ]
[ "Data Collection/Apparatus/top_counties.py" ]
[ "import numpy as np\nimport requests\nimport datetime\nimport pandas as pd\nfrom io import StringIO\nimport csv\nimport sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport zip_conversion\n\ndef most_infected(n: int, day: datetime.date, return_zip: bool) -> list:\n \"\"\"returns a list of the most infected counties\n\n Args:\n n (int): top n counties will be returned\n day (datetime.date): date to observe counties\n\n Returns:\n list: names of counties or zip codes\n \"\"\"\n \n day = day.strftime('%m-%d-%Y')\n url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{day}.csv'\n\n df = pd.read_csv(StringIO(requests.get(url).text))\n df = df.loc[df['Country_Region'] == 'US'].sort_values(by='Confirmed', ascending=False) # only US and sort by Confirmed\n df = df.loc[(df['Admin2'] != 'Unassigned') & (df['Province_State'] != 'Puerto Rico')] # remove Unassigned\n\n locations = list(df.head(n)[['Admin2', 'Province_State']].values)\n\n df = pd.read_csv(r'Data Collection\\Apparatus\\Docs\\zip_code_database.csv')[['county', 'state', 'zip']]\n df['county'] = df['county'].str.replace(' County','').str.replace(' City','')\n \n\n if (return_zip):\n state_abbreviation = pd.read_csv(r'Data Collection\\Apparatus\\Docs\\states_and_counties.csv')\n state_abbreviation['State Name'].str.title()\n\n result = []\n for county, state in locations:\n if (type(county) == str):\n county = county.replace(' County', '').replace(' City', '')\n state = zip_conversion.state_to_abbreviation(state, state_abbreviation)\n result.append([str(code).zfill(5) for code in zip_conversion.county_to_zip(county, state, df)])\n\n result = [codes for codes in result if codes != []]\n return [y for x in result for y in x]\n\n else:\n result = []\n for county, state in locations:\n if (type(county) == str):\n county = county.replace(' County', '').replace(' City', '')\n result.append((county, state))\n\n return result\n\n\ndef top_percent(n: float, day: datetime.date) -> int:\n \"\"\"how many counties make up n percent of cases\n\n Args:\n n (float): fraction of total cases\n day (datetime.date): day to check\n\n Returns:\n int: this many counties makes up n of the cases\n \"\"\"\n\n day = day.strftime('%m-%d-%Y')\n url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{day}.csv'\n\n df = pd.read_csv(StringIO(requests.get(url).text))\n df = df.loc[df['Country_Region'] == 'US'].sort_values(by='Confirmed', ascending=False)\n\n confirmed = list(df['Confirmed'])\n reach = sum(confirmed) * n\n top = list(np.cumsum(confirmed) >= reach).index(True)\n\n return top\n\n\nif __name__ == \"__main__\":\n date = datetime.date(2020, 4, 1)\n zip_code = False\n a = top_percent(0.77, date)\n # b = most_infected(a, date, zip_code)\n print(a)\n\n # print(a)\n exit(0)\n\n with open('counties.csv', 'w', newline='') as f:\n writer = csv.writer(f)\n # writer.writerows([[i] for i in b])\n\n if (zip_code):\n for code in b:\n f.write(code + '\\n')\n else:\n for location in b:\n writer.writerow(location)\n" ]
[ [ "pandas.read_csv", "numpy.cumsum" ] ]
seattleflu/id3c-customizations
[ "d1321d86777b6ecff9c6f502622c5d1bf9a2efda" ]
[ "lib/seattleflu/id3c/cli/command/clinical.py" ]
[ "\"\"\"\nParse and upload clinical data.\n\nClinical data will contain PII (personally identifiable information) and\nunnecessary information that does not need to be stored. This process will only\npull out specific columns of interest that will then be stored in the receiving\nschema of ID3C.\n\"\"\"\nimport click\nimport hashlib\nimport logging\nimport os\nimport re\nimport pandas as pd\nimport id3c.db as db\nfrom functools import partial\nfrom math import ceil\nfrom id3c.db.session import DatabaseSession\nfrom id3c.cli import cli\nfrom id3c.cli.io.pandas import dump_ndjson, load_file_as_dataframe, read_excel\nfrom . import (\n add_provenance,\n age_ceiling,\n barcode_quality_control,\n trim_whitespace,\n group_true_values_into_list,\n)\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"clinical\", help = __doc__)\ndef clinical():\n pass\n\n# UW Clinical subcommand\[email protected](\"parse-uw\")\[email protected](\"uw_filename\", metavar = \"<UW Clinical Data filename>\")\[email protected](\"-o\", \"--output\", metavar=\"<output filename>\",\n help=\"The filename for the output of missing barcodes\")\n\n\ndef parse_uw(uw_filename, output):\n \"\"\"\n Process clinical data from UW.\n\n Given a <UW Clinical Data filename> of an Excel document, selects specific\n columns of interest and reformats the queried data into a stream of JSON\n documents suitable for the \"upload\" sibling command.\n\n <output filename> is the desired filepath of the output CSV of problematic\n barcodes encountered while parsing. If not provided, the problematic\n barcodes print to the log.\n\n All clinical records parsed are output to stdout as newline-delimited JSON\n records. You will likely want to redirect stdout to a file.\n \"\"\"\n if uw_filename.endswith('.csv'):\n read = pd.read_csv\n else:\n read = pd.read_excel\n\n read_uw = partial(\n read,\n dtype = {'tract_identifier': 'string'},\n parse_dates = ['Collection.Date', 'LabDtTm'],\n na_values = ['NA', '', 'Unknown', 'NULL'],\n )\n\n clinical_records = (\n read_uw(uw_filename)\n .pipe(trim_whitespace)\n .pipe(add_provenance, uw_filename)\n .pipe(coalesce_columns, \"encountered\", \"Collection.Date\", \"LabDtTm\")\n .pipe(create_unique_identifier))\n\n # Standardize names of columns that will be added to the database\n column_map = {\n 'Age': 'age',\n 'Collection_ID': 'barcode',\n 'EthnicGroup': 'HispanicLatino',\n 'Fac': 'site',\n 'encountered': 'encountered',\n 'PersonID': 'individual',\n 'Race': 'Race',\n 'Sex': 'AssignedSex',\n 'tract_identifier': 'census_tract',\n 'fluvaccine': 'FluShot',\n 'identifier': 'identifier',\n '_provenance': '_provenance',\n }\n\n clinical_records = clinical_records.rename(columns=column_map)\n\n # Normalize barcode to strings and lowercase\n clinical_records['barcode'] = clinical_records['barcode'].str.lower()\n clinical_records['individual'] = clinical_records['individual'].str.lower()\n\n barcode_quality_control(clinical_records, output)\n\n # Age must be converted to Int64 dtype because pandas does not support NaNs\n # with normal type 'int'\n clinical_records[\"age\"] = clinical_records[\"age\"].astype(pd.Int64Dtype())\n\n # Subset df to drop missing barcodes\n clinical_records = drop_missing_rows(clinical_records, 'barcode')\n\n # Drop columns we're not tracking\n clinical_records = clinical_records[column_map.values()]\n\n clinical_records = remove_pii(clinical_records)\n\n\n dump_ndjson(clinical_records)\n\n\ndef coalesce_columns(df: pd.DataFrame, new_column: str, column_a: str, column_b: str) -> pd.DataFrame:\n \"\"\"\n Coalesces values from *column_a* and *column_b* of *df* into *new_column*.\n \"\"\"\n return df.assign(**{new_column: df[column_a].combine_first(df[column_b])})\n\n\ndef create_unique_identifier(df: pd.DataFrame):\n \"\"\"Generate a unique identifier for each encounter and drop duplicates\"\"\"\n\n # This could theoretically use the EID (encounter id) column provided to\n # us, but sticking to this constructed identifier has two benefits I see:\n #\n # 1. We will continue to match existing data if we get updated records or\n # re-process old datasets. This is somewhat unlikely, but possible.\n #\n # 2. More importantly, a clinical encounter may span multiple days (unlike\n # those in ID3C) and so multiple samples may be collected on different\n # days from one encounter. We want to keep treating those as multiple\n # encounters on our end.\n #\n # -trs, 2 Dec 2019\n\n df['identifier'] = (df['labMRN'] + df['LabAccNum'] + \\\n df['encountered'].astype('string')\n ).str.lower()\n return df.drop_duplicates(subset=\"identifier\")\n\ndef remove_pii(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Remove personally identifiable information from a given *df*.\n Return the new DataFrame.\n \"\"\"\n df['age'] = df['age'].apply(age_ceiling)\n df[\"individual\"] = df[\"individual\"].apply(generate_hash)\n df[\"identifier\"] = df[\"identifier\"].apply(generate_hash)\n\n return df\n\n\ndef generate_hash(identifier: str):\n \"\"\"\n Generate hash for *identifier* that is linked to identifiable records.\n Must provide a \"PARTICIPANT_DEIDENTIFIER_SECRET\" as an OS environment\n variable.\n \"\"\"\n secret = os.environ[\"PARTICIPANT_DEIDENTIFIER_SECRET\"]\n\n assert len(secret) > 0, \"Empty *secret* provided!\"\n assert len(identifier) > 0, \"Empty *identifier* provided!\"\n\n new_hash = hashlib.sha256()\n new_hash.update(identifier.encode(\"utf-8\"))\n new_hash.update(secret.encode(\"utf-8\"))\n return new_hash.hexdigest()\n\ndef drop_missing_rows(df: pd.DataFrame, column: str) -> pd.DataFrame:\n \"\"\"\n Returns a filtered version of the given *df* where rows with ``null`` values\n for the given *column* have been removed.\n \"\"\"\n return df.loc[df[column].notnull()]\n\[email protected](\"parse-sch\")\[email protected](\"sch_filename\", metavar = \"<SCH Clinical Data filename>\")\[email protected](\"-o\", \"--output\", metavar=\"<output filename>\",\n help=\"The filename for the output of missing barcodes\")\n\ndef parse_sch(sch_filename, output):\n \"\"\"\n Process clinical data from SCH.\n\n All clinical records parsed are output to stdout as newline-delimited JSON\n records. You will likely want to redirect stdout to a file.\n \"\"\"\n clinical_records = load_file_as_dataframe(sch_filename) \\\n .replace({\"\": None, \"NA\": None})\n clinical_records['age'] = clinical_records['age'].astype('float')\n\n clinical_records = trim_whitespace(clinical_records)\n clinical_records = add_provenance(clinical_records, sch_filename)\n clinical_records = add_insurance(clinical_records)\n\n # Standardize column names\n column_map = {\n \"pat_id2\": \"individual\",\n \"study_id\": \"barcode\",\n \"drawndate\": \"encountered\",\n \"age\": \"age\",\n \"sex\": \"AssignedSex\",\n \"ethnicity\": \"HispanicLatino\",\n \"race\": \"Race\",\n \"vaccine_given\": \"FluShot\",\n \"MedicalInsurance\": \"MedicalInsurance\",\n \"census_tract\": \"census_tract\",\n \"_provenance\": \"_provenance\",\n }\n clinical_records = clinical_records.rename(columns=column_map)\n\n barcode_quality_control(clinical_records, output)\n\n # Subset df to drop missing encountered date\n clinical_records = drop_missing_rows(clinical_records, 'encountered')\n\n # Drop unnecessary columns\n columns_to_keep = list(column_map.values()) + [ # Test result columns\n 'adeno', 'chlamydia', 'corona229e', 'corona_hku1', 'corona_nl63', 'corona_oc43',\n 'flu_a_h3', 'flu_a_h1_2009', 'flu_b', 'flu_a', 'flu_a_h1', 'hmpv', 'mycoplasma',\n 'paraflu_1_4', 'pertussis', 'rhino_ent', 'rsv'\n ]\n clinical_records = clinical_records[columns_to_keep]\n\n # Convert dtypes\n clinical_records[\"encountered\"] = pd.to_datetime(clinical_records[\"encountered\"])\n\n # Insert static value columns\n clinical_records[\"site\"] = \"SCH\"\n\n clinical_records = create_encounter_identifier(clinical_records)\n clinical_records = remove_pii(clinical_records)\n\n dump_ndjson(clinical_records)\n\n\ndef add_insurance(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Adds a new column for insurance type to a given *df*. Returns the new\n DataFrame.\n \"\"\"\n def insurance(series: pd.Series) -> pd.Series:\n \"\"\" Returns an array of unique insurance types from a given *series*. \"\"\"\n insurance_columns = ['insurance_1', 'insurance_2']\n insurances = [ series[i] for i in insurance_columns if not pd.isna(series[i])]\n return list(set(insurances))\n\n df['MedicalInsurance'] = df.apply(insurance, axis='columns')\n return df\n\ndef create_encounter_identifier(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Creates an encounter identifier column on a given *df*. Return the\n modified DataFrame.\n \"\"\"\n df[\"identifier\"] = (\n df[\"individual\"] + df[\"encountered\"].astype('string')\n ).str.lower()\n\n return df\n\n\[email protected](\"parse-kp\")\[email protected](\"kp_filename\", metavar = \"<KP Clinical Data filename>\")\[email protected](\"kp_specimen_manifest_filename\",\n metavar = \"<KP Specimen Manifest filename(s)>\",\n nargs = -1)\[email protected](\"--manifest-format\",\n metavar=\"<manifest format>\",\n default=\"year2\",\n type=click.Choice(['year1','year2']),\n help=\"The format of input manifest file; default is \\\"year2\\\"\")\[email protected](\"-o\", \"--output\", metavar=\"<output filename>\",\n help=\"The filename for the output of missing barcodes\")\n\ndef parse_kp(kp_filename, kp_specimen_manifest_filename, manifest_format, output):\n \"\"\"\n Process clinical data from KP.\n\n All clinical records parsed are output to stdout as newline-delimited JSON\n records. You will likely want to redirect stdout to a file.\n \"\"\"\n clinical_records = pd.read_csv(kp_filename)\n clinical_records.columns = clinical_records.columns.str.lower()\n\n clinical_records = trim_whitespace(clinical_records)\n clinical_records = add_provenance(clinical_records, kp_filename)\n clinical_records = add_kp_manifest_data(clinical_records, kp_specimen_manifest_filename, manifest_format)\n\n clinical_records = convert_numeric_columns_to_binary(clinical_records)\n clinical_records = rename_symptoms_columns(clinical_records)\n clinical_records = collapse_columns(clinical_records, 'symptom')\n clinical_records = collapse_columns(clinical_records, 'race')\n\n clinical_records['FluShot'] = clinical_records['fluvaxdt'].notna()\n\n column_map = {\n \"enrollid\": \"individual\",\n \"enrolldate\": \"encountered\",\n \"barcode\": \"barcode\",\n \"age\": \"age\",\n \"sex\": \"AssignedSex\",\n \"race\": \"Race\",\n \"hispanic\": \"HispanicLatino\",\n \"symptom\": \"Symptoms\",\n \"FluShot\": \"FluShot\",\n \"censustract\": \"census_tract\",\n \"_provenance\": \"_provenance\",\n }\n\n if manifest_format==\"year1\":\n del column_map[\"censustract\"]\n \n clinical_records = clinical_records.rename(columns=column_map)\n\n barcode_quality_control(clinical_records, output)\n\n # Drop unnecessary columns\n clinical_records = clinical_records[column_map.values()]\n\n # Convert dtypes\n clinical_records[\"encountered\"] = pd.to_datetime(clinical_records[\"encountered\"])\n\n # Insert static value columns\n clinical_records[\"site\"] = \"KP\"\n\n clinical_records = create_encounter_identifier(clinical_records)\n clinical_records = remove_pii(clinical_records)\n\n # Placeholder columns for future data.\n # See https://seattle-flu-study.slack.com/archives/CCAA9RBFS/p1568156642033700?thread_ts=1568145908.029300&cid=CCAA9RBFS\n clinical_records[\"MedicalInsurace\"] = None\n\n dump_ndjson(clinical_records)\n\n\ndef add_kp_manifest_data(df: pd.DataFrame, manifest_filenames: tuple, manifest_format: str) -> pd.DataFrame:\n \"\"\"\n Join the specimen manifest data from the given *manifest_filenames* with the\n given clinical records DataFrame *df*\n \"\"\"\n manifest_data = pd.DataFrame()\n\n if manifest_format==\"year1\":\n sheet_name = 'KP'\n rename_map = {\n 'Barcode ID (Sample ID)': 'barcode',\n 'kp_id': 'enrollid',\n }\n else:\n sheet_name = 'aliquoting'\n rename_map = {\n 'sample_id': 'barcode',\n 'kp_id': 'enrollid',\n }\n\n for filename in manifest_filenames:\n manifest = read_excel(filename, sheet_name = sheet_name)\n manifest_data = manifest_data.append(manifest)\n\n manifest_data.dropna(subset = ['kp_id'], inplace = True)\n \n regex = re.compile(r\"^KP-([0-9]{6,})-[0-9]$\", re.IGNORECASE)\n manifest_data.kp_id = manifest_data.kp_id.apply(lambda x: regex.sub('WA\\\\1', x))\n\n manifest_data = manifest_data.rename(columns=rename_map)\n manifest_data = trim_whitespace(manifest_data)\n\n return df.merge(manifest_data[['barcode', 'enrollid']], how='left')\n\n\ndef convert_numeric_columns_to_binary(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n In a given DataFrame *df* of clinical records, convert a hard-coded list of\n columns from numeric coding to binary.\n\n See Kaiser Permanente data dictionary for details\n \"\"\"\n numeric_columns = [\n 'runnynose',\n 'hispanic',\n 'racewhite',\n 'raceblack',\n 'raceasian',\n 'raceamerind',\n 'racenativehi',\n ]\n for col in numeric_columns:\n df.loc[df[col] > 1, col] = None\n\n return df\n\n\ndef rename_symptoms_columns(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Renames the hard-coded symptoms columns in a given DataFrame *df* \"\"\"\n symptoms_columns = [\n 'fever',\n 'sorethroat',\n 'runnynose',\n 'cough'\n ]\n\n symptoms_map = {}\n for symptom in symptoms_columns:\n symptoms_map[symptom] = 'symptom' + symptom\n\n return df.rename(columns=symptoms_map)\n\n\ndef collapse_columns(df: pd.DataFrame, stub: str, pid='enrollid') -> pd.DataFrame:\n \"\"\"\n Given a pandas DataFrame *df* of clinical records, collapses the 0/1\n encoding of multiple race options into a single array in a resulting\n column called \"Race\". Removes the original \"Race*\" option columns. Returns\n the new DataFrame.\n \"\"\"\n stub_data = df.filter(regex=f'{pid}|{stub}*', axis='columns')\n stub_columns = list(stub_data)\n stub_columns.remove(pid)\n\n df = df.drop(columns=stub_columns)\n\n stub_data_long = pd.wide_to_long(stub_data, stub, i=pid, j=f\"new_{stub}\",\n suffix='\\\\w+').reset_index()\n\n stub_data_new = group_true_values_into_list(stub_data_long, stub, [pid])\n\n return df.merge(stub_data_new, how='left')\n\n\[email protected](\"upload\")\[email protected](\"clinical_file\",\n metavar = \"<clinical.ndjson>\",\n type = click.File(\"r\"))\n\ndef upload(clinical_file):\n \"\"\"\n Upload clinical records into the database receiving area.\n\n <clinical.ndjson> must be a newline-delimited JSON file produced by this\n command's sibling commands.\n\n Once records are uploaded, the clinical ETL routine will reconcile the\n clinical records with known sites, individuals, encounters and samples.\n \"\"\"\n db = DatabaseSession()\n\n try:\n LOG.info(f\"Copying clinical records from {clinical_file.name}\")\n\n row_count = db.copy_from_ndjson((\"receiving\", \"clinical\", \"document\"), clinical_file)\n\n LOG.info(f\"Received {row_count:,} clinical records\")\n LOG.info(\"Committing all changes\")\n db.commit()\n\n except:\n LOG.info(\"Rolling back all changes; the database will not be modified\")\n db.rollback()\n raise\n" ]
[ [ "pandas.to_datetime", "pandas.isna", "pandas.DataFrame", "pandas.wide_to_long", "pandas.Int64Dtype", "pandas.read_csv" ] ]
jxy/tensorflow
[ "8275aa702f787341451b91cbc76f98b8b307c562" ]
[ "tensorflow/python/autograph/utils/testing.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Testing utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport sys\nimport types\nimport unittest\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import op_callbacks\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass AutoGraphTestCase(test.TestCase):\n \"\"\"Tests specialized for AutoGraph, which run as tf.functions.\n\n These tests use a staged programming-like approach: most of the test code runs\n as-is inside a tf.function, but the assertions are lifted outside the\n function, and run with the corresponding function values instead.\n\n For example, the test:\n\n def test_foo(self):\n baz = bar();\n self.assertEqual(baz, value)\n\n is equivalent to writing:\n\n def test_foo(self):\n @tf.function\n def test_fn():\n baz = bar();\n return baz, value\n\n baz_actual, value_actual = test_fn()\n self.assertEqual(baz_actual, value_actual)\n\n Only assertions that require evaluation outside the function are lifted\n outside the function scope. The rest execute inline, at function creation\n time.\n \"\"\"\n\n def __new__(cls, *args):\n obj = super().__new__(cls)\n\n for name in cls.__dict__:\n if not name.startswith(unittest.TestLoader.testMethodPrefix):\n continue\n m = getattr(obj, name)\n if callable(m):\n wrapper = obj._run_as_tf_function(m)\n setattr(obj, name, types.MethodType(wrapper, obj))\n\n return obj\n\n def _op_callback(\n self, op_type, inputs, attrs, outputs, op_name=None, graph=None):\n self.trace_log.append(op_type)\n\n def _run_as_tf_function(self, fn):\n\n def wrapper(self):\n @def_function.function(autograph=False) # Testing autograph itself.\n def fn_wrapper():\n self.assertions = []\n self.raises_cm = None\n self.graph_assertions = []\n self.trace_log = []\n fn()\n targets = [args for _, args in self.assertions]\n return targets\n\n try:\n tensors = fn_wrapper()\n\n for assertion in self.graph_assertions:\n assertion(fn_wrapper.get_concrete_function().graph)\n\n actuals = self.evaluate(tensors)\n\n except: # pylint:disable=bare-except\n if self.raises_cm is not None:\n # Note: Yes, the Raises and function contexts cross.\n self.raises_cm.__exit__(*sys.exc_info())\n return\n else:\n raise\n\n for (assertion, _), values in zip(self.assertions, actuals):\n assertion(*values)\n\n return wrapper\n\n def variable(self, name, value, dtype):\n with ops.init_scope():\n if name not in self.variables:\n self.variables[name] = variables.Variable(value, dtype=dtype)\n self.evaluate(self.variables[name].initializer)\n return self.variables[name]\n\n def setUp(self):\n super().setUp()\n self.variables = {}\n self.trace_log = []\n self.raises_cm = None\n op_callbacks.add_op_callback(self._op_callback)\n\n def tearDown(self):\n op_callbacks.remove_op_callback(self._op_callback)\n self.trace_log = None\n self.variables = None\n super().tearDown()\n\n def assertGraphContains(self, op_regex, n):\n def assertion(graph):\n matches = []\n for node in graph.as_graph_def().node:\n if re.match(op_regex, node.name):\n matches.append(node)\n for fn in graph.as_graph_def().library.function:\n for node_def in fn.node_def:\n if re.match(op_regex, node_def.name):\n matches.append(node_def)\n self.assertLen(matches, n)\n\n self.graph_assertions.append(assertion)\n\n def assertOpCreated(self, op_type):\n self.assertIn(op_type, self.trace_log)\n\n def assertOpsNotCreated(self, op_types):\n self.assertEmpty(set(op_types) & set(self.trace_log))\n\n def assertNoOpsCreated(self):\n self.assertEmpty(self.trace_log)\n\n def assertEqual(self, *args):\n self.assertions.append((super().assertEqual, list(args)))\n\n def assertLess(self, *args):\n self.assertions.append((super().assertLess, list(args)))\n\n def assertGreaterEqual(self, *args):\n self.assertions.append((super().assertGreaterEqual, list(args)))\n\n def assertDictEqual(self, *args):\n self.assertions.append((super().assertDictEqual, list(args)))\n\n def assertRaisesRuntime(self, *args):\n if self.raises_cm is not None:\n raise ValueError('cannot use more than one assertRaisesRuntime in a test')\n self.raises_cm = self.assertRaisesRegex(*args)\n self.raises_cm.__enter__()\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.eager.def_function.function", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.framework.op_callbacks.add_op_callback", "tensorflow.python.framework.op_callbacks.remove_op_callback" ] ]
ahmedengu/h2o-3
[ "ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11" ]
[ "h2o-py/tests/testdir_jira/pyunit_pubdev_4723.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pandas\nimport h2o\nfrom tests import pyunit_utils\nfrom pandas.util.testing import assert_frame_equal\n\nTEST_DATASET = pyunit_utils.locate('smalldata/logreg/prostate_missing.csv')\n\n\ndef test_4723():\n pandas_frame = pandas.read_csv(TEST_DATASET)\n frame = h2o.import_file(TEST_DATASET)\n\n # Ensure that the as_data_frame method does not modify the frame\n assert_frame_equal(pandas_frame, frame.as_data_frame())\n\n # Now insert some missing values\n expected_rows_count = frame['RACE'].shape[0]\n\n # Check that the shape of the data frames is not modified\n pandas_default_rows_count = frame['RACE'].as_data_frame(use_pandas=True).shape[0]\n assert pandas_default_rows_count == expected_rows_count, \"Result's rows count when using pandas with default na_value equal to expected_rows_count. Expected: %s, actual: %s\" % (\n expected_rows_count, pandas_default_rows_count)\n no_pandas_default_rows_count = len(frame['RACE'].as_data_frame(use_pandas=False, header=False))\n assert no_pandas_default_rows_count == expected_rows_count, \"Result's rows count when NOT using pandas must be equal to expected_rows_count. Expected: %s, actual: %s\" % (\n expected_rows_count, no_pandas_default_rows_count)\n\n\ndef test_npe_string_vec():\n f = h2o.create_frame(string_fraction = 1)\n f['C1'].insert_missing_values(1)\n print(f['C1'][0,0])\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_4723)\n pyunit_utils.standalone_test(test_npe_string_vec)\nelse:\n test_4723()\n test_npe_string_vec()\n" ]
[ [ "pandas.read_csv" ] ]
inuinana/MusicGenreClassifiaction
[ "0cdfcae8d296b0526fdefd8e9880b317361df99c" ]
[ "backend/src/feature_extraction/audio_feature_extraction.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 23 02:01:21 2018\n@author: Akihiro Inui\n\"\"\"\n# Import libraries/modules\nimport os\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nfrom backend.src.common.config_reader import ConfigReader\nfrom backend.src.preprocess.audio_preprocess import AudioPreProcess\nfrom backend.src.feature_extraction.mel_spectrogram import mel_spectrogram\nfrom backend.src.feature_extraction.fft import FFT\nfrom backend.src.feature_extraction.zerocrossing import zerocrossing\nfrom backend.src.feature_extraction.mfcc import MFCC\nfrom backend.src.feature_extraction.centroid import centroid\nfrom backend.src.feature_extraction.rolloff import rolloff\nfrom backend.src.feature_extraction.rms import rms\nfrom backend.src.feature_extraction.flux import Flux\nfrom backend.src.feature_extraction.osc import OSC\nfrom backend.src.feature_extraction.low_energy import low_energy\nfrom backend.src.feature_extraction.modulation_spectrum_feature import MSF\nfrom backend.src.utils.stats_tool import get_mean, get_std\nfrom backend.src.utils.file_utils import FileUtil\nfrom backend.src.data_process.data_process import DataProcess\n\n\nclass AudioFeatureExtraction:\n \"\"\"\n Audio feature extraction to audio files\n Supported features: mfcc, spectral centroid,\n \"\"\"\n\n # Initialization\n def __init__(self, setting_file: str):\n \"\"\"\n Initialization for parameters and classes\n :param setting_file: config file\n \"\"\"\n # Load parameters from config file\n self.cfg = ConfigReader(setting_file)\n self.sampling_rate = self.cfg.sampling_rate\n self.frame_time = self.cfg.frame_time\n self.overlap_rate = self.cfg.overlap_rate\n self.window_type = self.cfg.window_type\n self.fft_size = self.cfg.fft_size\n self.mod_fft_size = self.cfg.mod_fft_size\n self.window_size = int(self.sampling_rate*0.001*self.frame_time)\n self.hop_size = int(self.window_size*self.cfg.overlap_rate)\n\n # Initialize pre-processing\n self.APP = AudioPreProcess(self.frame_time, self.overlap_rate, self.window_type)\n\n # Feature selection\n self.short_feature_selection_dict = self.cfg.section_reader(\"short_feature_selection\")\n self.long_feature_selection_dict = self.cfg.section_reader(\"long_feature_selection\")\n self.short_feature_list = self.__init_short_feature_select()\n self.long_feature_list = self.__init_long_feature_select()\n\n # Initialize feature extraction classes\n self.mfcc = MFCC(self.cfg.mfcc_coeff, self.sampling_rate, self.fft_size, self.cfg.mfcc_total_filters)\n self.flux = Flux(self.sampling_rate)\n self.osc = OSC(self.cfg.osc_param, self.sampling_rate, self.fft_size)\n self.msf = MSF(self.cfg.omsc_param, self.sampling_rate, self.fft_size, self.mod_fft_size)\n\n def __init_short_feature_select(self) -> list:\n \"\"\"\n Extract setting for short-term feature extraction from config file\n :return list of features to extract\n \"\"\"\n short_feature_list = []\n for short_feature, switch in self.short_feature_selection_dict.items():\n if switch == \"True\":\n short_feature_list.append(short_feature)\n return short_feature_list\n\n def __init_long_feature_select(self) -> list:\n \"\"\"\n Extract setting for long-term feature extraction from config file\n :return list of features to extract\n \"\"\"\n long_feature_list = []\n for short_feature, switch in self.long_feature_selection_dict.items():\n if switch == \"True\":\n long_feature_list.append(short_feature)\n return long_feature_list\n\n def pre_processing(self, audio_file: str) -> tuple:\n \"\"\"\n Pre-processing to audio file\n :param audio_file: name of audio file\n :return tuple of pre-processed audio signal\n \"\"\"\n return self.APP.apply(audio_file)\n\n # Feature extraction to one frame\n def extract_short_frame(self, framed_audio: tuple):\n \"\"\"\n Short-term feature extraction to one frame\n :param framed_audio: tuple of framed audio data from audio file\n :return power_spectrum: power_spectrum from short-term frame\n :return dictionary of extracted features from framed audio data\n {key: name of feature, value: tuple of features from all frames}\n \"\"\"\n # Apply FFT\n spectrum = FFT.fft(framed_audio, self.fft_size)\n power_spectrum = FFT.power_fft(framed_audio, self.fft_size)\n\n # Apply feature extraction to a framed audio and store into a dictionary\n feature_dict = {}\n for short_feature in self.short_feature_list:\n if short_feature == \"zcr\":\n feature_dict[short_feature] = zerocrossing(framed_audio)\n if short_feature == \"mfcc\":\n feature_dict[short_feature] = self.mfcc.main(spectrum)\n if short_feature == \"rms\":\n feature_dict[short_feature] = rms(framed_audio)\n if short_feature == \"centroid\":\n feature_dict[short_feature] = centroid(power_spectrum, self.fft_size, self.sampling_rate)\n if short_feature == \"rolloff\":\n feature_dict[short_feature] = rolloff(power_spectrum, self.cfg.rolloff_param)\n if short_feature == \"flux\":\n feature_dict[short_feature] = self.flux.main(power_spectrum)\n if short_feature == \"osc\":\n feature_dict[short_feature] = self.osc.main(power_spectrum)\n return power_spectrum, feature_dict\n\n def extract_long_frame(self, long_frame_audio: list, long_frame_spectrum: list) -> dict:\n \"\"\"\n Long-term feature extraction to one frame\n :param long_frame_audio: list of audio data from short-term frame\n :param long_frame_spectrum: list of spectrum from short-term frame\n :return dictionary of extracted features from framed audio data\n {key: name of feature, value: tuple of features from all frames}\n \"\"\"\n # Store extracted features into a dictionary (key:name of feature, value: list of extracted features in frames)\n feature_dict = {}\n\n # Apply feature extraction to a framed audio and store into a dictionary\n for long_feature in self.long_feature_list:\n if long_feature == \"low_energy\":\n feature_dict[long_feature] = low_energy(long_frame_audio)\n if long_feature == \"omsc\":\n feature_dict[long_feature] = self.msf.omsc(long_frame_spectrum, self.mod_fft_size)\n if long_feature == \"msfm\":\n feature_dict[long_feature] = self.msf.msfm(long_frame_spectrum, self.mod_fft_size)\n if long_feature == \"mscm\":\n feature_dict[long_feature] = self.msf.mscm(long_frame_spectrum, self.mod_fft_size)\n return feature_dict\n\n def extract_entire_audio(self, input_audio_file: str):\n \"\"\"\n Read audio file and extract Mel-spectrogram\n :param input_audio_file: Input audio file\n :return: Mel-spectrogram: Mel-spectrogram(currently) in numpy 2D array\n \"\"\"\n # Read audio file and extract mel-spectrogram from entire audio signal\n return mel_spectrogram(input_audio_file, self.fft_size, self.cfg.num_mels, normalize=True)\n\n def extract_file(self, input_audio_file: str):\n \"\"\"\n Feature extraction to one audio file\n :param input_audio_file: name of the audio file\n :return dictionary of extracted features from audio file\n {key: name of feature, value: list of array(number of frames)}\n \"\"\"\n # Prepare a dictionary to store extracted feature\n feature_dict = {}\n\n # Pre-processing to audio file\n processed_audio = self.pre_processing(input_audio_file)\n\n # Extract Mel-spectrogram from the entire audio\n feature_dict['mel_spectrogram'] = self.extract_entire_audio(input_audio_file)\n\n # Apply feature extraction to all frames and store into dictionary\n short_frame_number = 0\n long_frame_audio = []\n long_frame_power_spectrum = []\n\n # Store whole short-term features in list\n for short_frame_audio in processed_audio:\n # Extract short-term features\n short_frame_power_spectrum, short_feature_dict = self.extract_short_frame(short_frame_audio)\n\n # Store short-term features in dictionary\n for short_feature_type in self.short_feature_list:\n feature_dict.setdefault(short_feature_type, []).append(short_feature_dict[short_feature_type])\n\n # Extract long-term features when the number of short frames reach to a certain number\n if short_frame_number == self.cfg.long_frame_length:\n long_feature_dict = self.extract_long_frame(long_frame_audio, long_frame_power_spectrum)\n # Store long-term features in dictionary\n for long_feature in self.long_feature_list:\n feature_dict.setdefault(long_feature, []).append(long_feature_dict[long_feature])\n\n # Reset cached short-term feature\n short_frame_number = 0\n long_frame_audio = []\n long_frame_power_spectrum = []\n\n # Update short-term feature cache\n short_frame_number += 1\n long_frame_audio.append(short_frame_audio)\n long_frame_power_spectrum.append(short_frame_power_spectrum)\n\n return feature_dict\n\n def extract_directory(self, input_directory: str):\n \"\"\"\n Feature extraction to a folder which contains audio files\n :param input_directory: folder name which has audio files\n :return dictionary of extracted features from audio file\n {key: name of files, value: list of extracted features}\n \"\"\"\n # Extract file names in the input directory\n file_names = FileUtil.get_file_names(input_directory)\n\n # Extract features from audio files in a directory\n # file_feature_stat_dict = {}\n file_feature_dict = {}\n start = time.time()\n\n # Extract each audio file\n for count, audio_file in tqdm(enumerate(file_names)):\n # Extract features from one audio file\n file_feature_dict[audio_file] = self.extract_file(os.path.join(input_directory, audio_file))\n\n print(\"Extracted {0} with {1} \\n\".format(input_directory, time.time() - start))\n return file_feature_dict\n\n def extract_dataset(self, dataset_path: str):\n \"\"\"\n Feature extraction to dataset\n Extract time series feature as 2D pandas dataframe and 3D numpy array, as well as label vector as list\n :param dataset_path: path to dataset\n :return directory_files_feature_dict: dictionary of extracted features from all audio files in dataset folder\n {key: name of directory, value: list of file names {key: file name, value: list of extracted features}}\n :return label_list: list of numerical label vector\n \"\"\"\n # Make label\n label_list = self.make_label_from_directory(dataset_path)\n\n # Get file names and store them into a dictionary\n directory_files_dict = {}\n for directory in FileUtil.get_folder_names(dataset_path, sort=True):\n directory_files_dict[directory] = FileUtil.get_file_names(os.path.join(dataset_path, directory))\n\n # Extract all features and store them into list\n directory_files_feature_dict = {}\n for directory, audio_files in tqdm(directory_files_dict.items()):\n # Apply feature extraction to one directory\n directory_files_feature_dict[directory] = self.extract_directory(os.path.join(dataset_path, directory))\n\n return directory_files_feature_dict, label_list\n\n @staticmethod\n def dict2array(directory_files_feature_dict: dict):\n \"\"\"\n Convert extracted feature to\n :param directory_files_feature_dict: dictionary of extracted features from all audio files in dataset folder\n {key: name of directory, value: list of file names {key: file name, value: list of extracted features}}\n :return: expert_feature_2d_array: 2D Numpy array of extracted feature using expert system\n :return: mel_spectrogram_3d_array: 3D Numpy array of extracted mel-spectrogram\n \"\"\"\n # Initialization\n processed_file = 0\n expert_feature_vector = []\n\n # Process for each class\n for class_name, file_feature_dict in directory_files_feature_dict.items():\n # Process for each file\n for file_name, feature_value_dict in file_feature_dict.items():\n file_feature_vector = []\n # Process for each feature\n for feature_name, feature in feature_value_dict.items():\n # Take stats across frames for expert system and append to list\n if type(feature) is list:\n file_feature_array = np.array(feature[:])\n if file_feature_array.ndim == 1:\n file_feature_vector.append(np.mean(file_feature_array))\n else:\n file_feature_vector.extend(np.mean(file_feature_array, axis=0))\n # Append mel-spectrogram to 3D array\n else:\n if processed_file == 0:\n mel_spectrogram_3d_array = np.dstack((np.empty(np.shape(feature), int), feature))\n mel_spectrogram_3d_array = mel_spectrogram_3d_array[:, :, 1]\n else:\n mel_spectrogram_3d_array = np.dstack((mel_spectrogram_3d_array, feature))\n\n # Append expert system feature vector\n expert_feature_vector.append(file_feature_vector)\n processed_file += 1\n\n # Transpose 3D array\n mel_spectrogram_3d_array = mel_spectrogram_3d_array.T\n # Convert list to 2D numpy array\n expert_feature_2d_array = np.array(expert_feature_vector)\n\n return expert_feature_2d_array, mel_spectrogram_3d_array\n\n @staticmethod\n def make_label_from_directory(dataset_path: str):\n # Init parameter\n dir_num = 0\n label_list = []\n\n # Iterate over directories\n for directory in FileUtil.get_folder_names(dataset_path, sort=True):\n # Make label as list\n label_list.extend([dir_num] * len(FileUtil.get_file_names(os.path.join(dataset_path, directory))))\n dir_num += 1\n return label_list\n\n @staticmethod\n def get_feature_stats(feature_frame_dict: dict, stat_type: str) -> dict:\n \"\"\"\n # Store statistics from features into dictionary\n :param feature_frame_dict:dictionary of extracted features from audio file\n {key: name of feature, value: list of array(number of frames)}\n :param stat_type: type of statistics\n :return feature_stat_dict: features from one audio file with statistics\n {key: name of feature, value: array or single value}\n \"\"\"\n # For each feature, compute statistical operation\n feature_stat_dict = {}\n\n for feature_name, values in feature_frame_dict.items():\n print(feature_name)\n if type(values[0]) is not list and values[0].ndim >= 2:\n if stat_type == \"mean\":\n feature_frame_dict[feature_name] = np.mean(values[:], axis=0) + 1e-8\n elif stat_type == \"std\":\n feature_stat_dict[feature_name] = np.std(values[:], axis=0)\n else:\n if stat_type == \"mean\":\n feature_frame_dict[feature_name] -= np.mean(feature_frame_dict[feature_name], axis=0) + 1e-8\n elif stat_type == \"std\":\n feature_stat_dict[feature_name] = get_std(feature_frame_dict[feature_name], \"r\")\n return feature_frame_dict\n" ]
[ [ "numpy.array", "numpy.shape", "numpy.mean", "numpy.std", "numpy.dstack" ] ]
eaplatanios/nig
[ "c806e534ae3b79ddcdf4093383d73ecf59947044", "c806e534ae3b79ddcdf4093383d73ecf59947044" ]
[ "src/experiment/nig/rcv1v2.py", "src/nig/ops/variable_ops.py" ]
[ "import logging\nimport nig\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom collections import OrderedDict\nfrom functools import partial\nfrom nig.data import loaders\n\nfrom experiment.nig import experiments\n\n__author__ = 'eaplatanios'\n\nlogger = logging.getLogger(__name__)\n\n\nclass RCV1V2Experiment(experiments.ExperimentBase):\n def __init__(self, data_subsets, architectures, activation=tf.nn.relu,\n labeled_batch_size=100, unlabeled_batch_size=100,\n test_data_proportion=0.1, max_iter=1000, abs_loss_chg_tol=1e-6,\n rel_loss_chg_tol=1e-6, loss_chg_iter_below_tol=5,\n logging_frequency=10, summary_frequency=100,\n checkpoint_frequency=1000, evaluation_frequency=10,\n variable_statistics_frequency=-1, run_meta_data_frequency=-1,\n working_dir=os.path.join(os.getcwd(), 'working'),\n checkpoint_file_prefix='ckpt', restore_sequentially=False,\n save_trained=True, optimizer=lambda: tf.train.AdamOptimizer(),\n gradients_processor=None):\n if isinstance(data_subsets, int):\n data_subsets = [data_subsets]\n self.data_subsets = data_subsets\n self.architectures = architectures\n # self.loss = nig.L2Loss()\n self.loss = nig.BinaryCrossEntropy(\n logit_outputs=False, one_hot_train_outputs=True)\n optimizer_opts = {\n 'batch_size': labeled_batch_size,\n 'max_iter': max_iter,\n 'abs_loss_chg_tol': abs_loss_chg_tol,\n 'rel_loss_chg_tol': rel_loss_chg_tol,\n 'loss_chg_iter_below_tol': loss_chg_iter_below_tol,\n 'grads_processor': gradients_processor}\n dataset_info = loaders.mulan.dataset_info['rcv1v2']\n num_features = dataset_info['num_features']\n num_labels = dataset_info['num_labels']\n models = [nig.MultiLayerPerceptron(\n input_size=num_features, output_size=num_labels,\n hidden_layer_sizes=architecture, activation=activation,\n softmax_output=False, sigmoid_output=True, log_output=False,\n train_outputs_one_hot=True, loss=self.loss, loss_summary=False,\n optimizer=optimizer, optimizer_opts=optimizer_opts)\n for architecture in self.architectures]\n # eval_metric = nig.HammingLoss(log_predictions=False)\n eval_metrics = [\n nig.Accuracy(\n log_outputs=False, scaled_outputs=True,\n one_hot_train_outputs=True, thresholds=0.5, macro_average=True),\n nig.AreaUnderCurve(\n log_outputs=False, scaled_outputs=True,\n one_hot_train_outputs=True, curve='pr', num_thresholds=100,\n macro_average=True, name='auc'),\n nig.Precision(\n log_outputs=False, scaled_outputs=True,\n one_hot_train_outputs=True, thresholds=0.5, macro_average=True),\n nig.Recall(\n log_outputs=False, scaled_outputs=True,\n one_hot_train_outputs=True, thresholds=0.5, macro_average=True),\n nig.F1Score(\n log_outputs=False, scaled_outputs=True,\n one_hot_train_outputs=True, thresholds=0.5, macro_average=True)]\n super(RCV1V2Experiment, self).__init__(\n models=models, eval_metrics=eval_metrics,\n labeled_batch_size=labeled_batch_size,\n unlabeled_batch_size=unlabeled_batch_size,\n test_data_proportion=test_data_proportion,\n logging_frequency=logging_frequency,\n summary_frequency=summary_frequency,\n checkpoint_frequency=checkpoint_frequency,\n evaluation_frequency=evaluation_frequency,\n variable_statistics_frequency=variable_statistics_frequency,\n run_meta_data_frequency=run_meta_data_frequency,\n working_dir=working_dir,\n checkpoint_file_prefix=checkpoint_file_prefix,\n restore_sequentially=restore_sequentially,\n save_trained=save_trained)\n\n def __str__(self):\n return 'rcv1v2'\n\n def experiment_information(self):\n return {'architectures': str(self.architectures),\n 'loss': str(self.loss)}\n\n def load_data(self, test_proportion=None):\n train_data = []\n test_data = []\n for i in self.data_subsets:\n train_data_subset, test_data_subset = loaders.mulan.load(\n os.path.join(self.working_dir, 'data'),\n 'rcv1v2_subset_' + str(i+1))\n train_data.append(train_data_subset)\n test_data.append(test_data_subset)\n train_data = (np.concatenate([d[0] for d in train_data], axis=0),\n np.concatenate([d[1] for d in train_data], axis=0))\n test_data = (np.concatenate([d[0] for d in test_data], axis=0),\n np.concatenate([d[1] for d in test_data], axis=0))\n if test_proportion is None:\n return train_data, test_data\n data = (np.concatenate([train_data[0], test_data[0]], axis=0),\n np.concatenate([train_data[1], test_data[1]], axis=0))\n if isinstance(self.seed, np.random.RandomState):\n rng = self.seed\n else:\n rng = np.random.RandomState(self.seed)\n indices = rng.permutation(np.arange(data[0].shape[0]))\n num_samples = len(indices)\n num_test = int(num_samples * test_proportion)\n train_data = tuple(d[indices[:-num_test]] for d in data)\n test_data = tuple(d[indices[-num_test:]] for d in data)\n return train_data, test_data\n\n\nif __name__ == '__main__':\n seed = 9999\n data_subsets = [0, 1, 2, 3, 4]\n architectures = [[1], [8],\n [16, 8], [32, 16],\n [128, 64, 32, 16], [128, 32, 8], [256, 128]]\n use_one_hot_encoding = True\n activation = nig.leaky_relu(0.01)\n labeled_batch_size = 128\n unlabeled_batch_size = 128\n test_data_proportion = 0.95\n max_iter = 5000\n abs_loss_chg_tol = 1e-6\n rel_loss_chg_tol = 1e-6\n loss_chg_iter_below_tol = 5\n logging_frequency = 100\n summary_frequency = -1\n checkpoint_frequency = -1\n evaluation_frequency = 100\n variable_statistics_frequency = -1\n run_meta_data_frequency = -1\n working_dir = os.path.join(os.getcwd(), 'working', 'rcv1v2')\n checkpoint_file_prefix = 'ckpt'\n restore_sequentially = False\n save_trained = False\n optimizer = lambda: tf.train.AdamOptimizer() # nig.gradient_descent(1e-1, decay_rate=0.99)\n gradients_processor = None # lambda g: tf.clip_by_norm(g, 1e-1)\n\n # optimizer = tf.contrib.opt.ScipyOptimizerInterface\n # optimizer_opts = {'options': {'maxiter': 10000}}\n\n # def consensus_loss_metric(outputs, consensus):\n # with tf.name_scope('consensus_loss_metric'):\n # outputs = tf.exp(outputs)\n # metric = tf.square(tf.sub(outputs, consensus))\n # metric = tf.reduce_sum(metric)\n # return metric\n consensus_loss_metric = None\n consensus_configurations = experiments.get_consensus_configurations(\n consensus_loss_weights=[0.0, 1.0],\n multiplier=labeled_batch_size / unlabeled_batch_size)\n\n with nig.dummy(): # tf.device('/cpu:0'):\n experiment = RCV1V2Experiment(\n data_subsets=data_subsets, architectures=architectures,\n activation=activation, labeled_batch_size=labeled_batch_size,\n unlabeled_batch_size=unlabeled_batch_size,\n test_data_proportion=test_data_proportion, max_iter=max_iter,\n abs_loss_chg_tol=abs_loss_chg_tol,\n rel_loss_chg_tol=rel_loss_chg_tol,\n loss_chg_iter_below_tol=loss_chg_iter_below_tol,\n logging_frequency=logging_frequency,\n summary_frequency=summary_frequency,\n checkpoint_frequency=checkpoint_frequency,\n evaluation_frequency=evaluation_frequency,\n variable_statistics_frequency=variable_statistics_frequency,\n run_meta_data_frequency=run_meta_data_frequency,\n working_dir=working_dir,\n checkpoint_file_prefix=checkpoint_file_prefix,\n restore_sequentially=restore_sequentially,\n save_trained=save_trained, optimizer=optimizer,\n gradients_processor=gradients_processor)\n learners = []\n for name, configuration in consensus_configurations:\n learner = partial(nig.ConsensusLearner, **configuration)\n learners.append((name, learner))\n learners.append(('CV', nig.CrossValidationLearner))\n learners = OrderedDict(learners)\n results = experiment.run(learners)\n experiments.save_results(\n results, filename=os.path.join(working_dir, 'results.pk'),\n update=True, use_backup=True, delete_backup=False, yaml_format=False)\n # results = experiments.load_results(\n # filename=os.path.join(working_dir, 'results.pk'), yaml_format=False)\n experiments.plot_results(results)\n", "# Copyright 2016, The NIG Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\n__author__ = 'eaplatanios'\n\n__all__ = ['create_local']\n\n\ndef create_local(shape, name, trainable=False, collections=None,\n validate_shape=True, dtype=tf.float32):\n \"\"\"Creates a new local variable.\n\n Args:\n shape (tuple, list): Shape of the variable.\n name (str): Name of the new variable.\n trainable (bool, optional): Optional boolean value indicating whether\n the new variable is trainable or not. Defaults to `False`.\n collections (list(str), optional): Optional list of collection names to\n which the new variable will be added. Defaults to `None`.\n validate_shape (bool, optional): Optional boolean value indicating\n whether to validate the shape of the new variable. Defaults to\n `True`.\n dtype (tf.DType, optional): Optional data type of the new variable.\n Defaults to `tf.float32`.\n\n Returns:\n tf.Variable: The created variable.\n \"\"\"\n # Make sure local variables are added to the tf.GraphKeys.LOCAL_VARIABLES\n # collection.\n collections = list(collections or [])\n collections += [tf.GraphKeys.LOCAL_VARIABLES]\n return tf.Variable(\n initial_value=tf.zeros(shape, dtype=dtype), name=name,\n trainable=trainable, collections=collections,\n validate_shape=validate_shape)\n" ]
[ [ "numpy.concatenate", "numpy.arange", "tensorflow.train.AdamOptimizer", "numpy.random.RandomState" ], [ "tensorflow.zeros" ] ]
maxis1314/pyutils
[ "7e0666c650209155b3da186d09c54cf14825df1e" ]
[ "ml/svm/run.py" ]
[ "# -*- coding: utf-8 -*- \nimport numpy as np \nimport scipy as sp \nfrom sklearn import svm \nfrom sklearn.cross_validation import train_test_split \nimport matplotlib.pyplot as plt \n \ndata = [] \nlabels = [] \nwith open(\"1.txt\") as ifile: \n for line in ifile: \n tokens = line.strip().split(' ') \n data.append([float(tk) for tk in tokens[:-1]]) \n labels.append(tokens[-1]) \nx = np.array(data) \nlabels = np.array(labels) \ny = np.zeros(labels.shape) \ny[labels=='fat']=1 \nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.0) \n \nh = .02 \n# create a mesh to plot in \nx_min, x_max = x_train[:, 0].min() - 0.1, x_train[:, 0].max() + 0.1 \ny_min, y_max = x_train[:, 1].min() - 1, x_train[:, 1].max() + 1 \nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), \n np.arange(y_min, y_max, h)) \n \n''''' SVM ''' \n# title for the plots \ntitles = ['LinearSVC (linear kernel)', \n 'SVC with polynomial (degree 3) kernel', \n 'SVC with RBF kernel', \n 'SVC with Sigmoid kernel'] \nclf_linear = svm.SVC(kernel='linear').fit(x, y) \n#clf_linear = svm.LinearSVC().fit(x, y) \nclf_poly = svm.SVC(kernel='poly', degree=3).fit(x, y) \nclf_rbf = svm.SVC().fit(x, y) \nclf_sigmoid = svm.SVC(kernel='sigmoid').fit(x, y) \n \nfor i, clf in enumerate((clf_linear, clf_poly, clf_rbf, clf_sigmoid)): \n answer = clf.predict(np.c_[xx.ravel(), yy.ravel()]) \n print(clf) \n print(np.mean( answer == y_train)) \n print(answer) \n print(y_train) \n \n plt.subplot(2, 2, i + 1) \n plt.subplots_adjust(wspace=0.4, hspace=0.4) \n \n # Put the result into a color plot \n z = answer.reshape(xx.shape) \n plt.contourf(xx, yy, z, cmap=plt.cm.Paired, alpha=0.8) \n \n # Plot also the training points \n plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap=plt.cm.Paired) \n plt.xlabel(u'Height') \n plt.ylabel(u'Weight') \n plt.xlim(xx.min(), xx.max()) \n plt.ylim(yy.min(), yy.max()) \n plt.xticks(()) \n plt.yticks(()) \n plt.title(titles[i]) \n \nplt.show() " ]
[ [ "numpy.array", "matplotlib.pyplot.contourf", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.yticks", "sklearn.svm.SVC", "numpy.mean", "numpy.arange", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "sklearn.cross_validation.train_test_split", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
jklenzing/pysat
[ "e84002e57f8a808d35562f51ce957361d99c4070" ]
[ "pysat/tests/test_sw.py" ]
[ "import datetime as dt\nimport numpy as np\nimport os\n\nfrom nose.tools import assert_raises\nfrom nose.plugins import skip\nimport pandas as pds\n\nimport pysat\nfrom pysat.instruments import sw_kp, sw_f107\nfrom pysat.instruments.methods import sw as sw_meth\n\n\nclass TestSWKp():\n def setup(self):\n \"\"\"Runs before every method to create a clean testing setup\"\"\"\n # Load a test instrument\n self.testInst = pysat.Instrument()\n self.testInst.data = pds.DataFrame({'Kp': np.arange(0, 4, 1.0/3.0),\n 'ap_nan': np.full(shape=12, \\\n fill_value=np.nan),\n 'ap_inf': np.full(shape=12, \\\n fill_value=np.inf)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(hours=3*i)\n for i in range(12)])\n self.testInst.meta = pysat.Meta()\n self.testInst.meta.__setitem__('Kp', {self.testInst.meta.fill_label:\n np.nan})\n self.testInst.meta.__setitem__('ap_nan', {self.testInst.meta.fill_label:\n np.nan})\n self.testInst.meta.__setitem__('ap_inv', {self.testInst.meta.fill_label:\n np.inf})\n\n # Load a test Metadata\n self.testMeta = pysat.Meta()\n\n def teardown(self):\n \"\"\"Runs after every method to clean up previous testing.\"\"\"\n del self.testInst, self.testMeta\n\n def test_convert_kp_to_ap(self):\n \"\"\" Test conversion of Kp to ap\"\"\"\n\n sw_kp.convert_3hr_kp_to_ap(self.testInst)\n\n assert '3hr_ap' in self.testInst.data.columns\n assert '3hr_ap' in self.testInst.meta.keys()\n assert(self.testInst['3hr_ap'].min() >=\n self.testInst.meta['3hr_ap'][self.testInst.meta.min_label])\n assert(self.testInst['3hr_ap'].max() <=\n self.testInst.meta['3hr_ap'][self.testInst.meta.max_label])\n\n def test_convert_kp_to_ap_fill_val(self):\n \"\"\" Test conversion of Kp to ap with fill values\"\"\"\n\n # Set the first value to a fill value, then calculate ap\n fill_label = self.testInst.meta.fill_label\n self.testInst['Kp'][0] = self.testInst.meta['Kp'][fill_label]\n sw_kp.convert_3hr_kp_to_ap(self.testInst)\n\n # Test non-fill ap values\n assert '3hr_ap' in self.testInst.data.columns\n assert '3hr_ap' in self.testInst.meta.keys()\n assert(self.testInst['3hr_ap'][1:].min() >=\n self.testInst.meta['3hr_ap'][self.testInst.meta.min_label])\n assert(self.testInst['3hr_ap'][1:].max() <=\n self.testInst.meta['3hr_ap'][self.testInst.meta.max_label])\n\n # Test the fill value in the data and metadata\n assert np.isnan(self.testInst['3hr_ap'][0])\n assert np.isnan(self.testInst.meta['3hr_ap'][fill_label])\n\n del fill_label\n\n def test_convert_kp_to_ap_bad_input(self):\n \"\"\" Test conversion of Kp to ap with bad input\"\"\"\n\n self.testInst.data.rename(columns={\"Kp\": \"bad\"}, inplace=True)\n\n assert_raises(ValueError, sw_kp.convert_3hr_kp_to_ap, self.testInst)\n\n def test_initialize_kp_metadata(self):\n \"\"\"Test default Kp metadata initialization\"\"\"\n sw_kp.initialize_kp_metadata(self.testInst.meta, 'Kp')\n\n assert self.testInst.meta['Kp'][self.testInst.meta.units_label] == ''\n assert self.testInst.meta['Kp'][self.testInst.meta.name_label] == 'Kp'\n assert(self.testInst.meta['Kp'][self.testInst.meta.desc_label] ==\n 'Planetary K-index')\n assert self.testInst.meta['Kp'][self.testInst.meta.plot_label] == 'Kp'\n assert self.testInst.meta['Kp'][self.testInst.meta.axis_label] == 'Kp'\n assert(self.testInst.meta['Kp'][self.testInst.meta.scale_label] ==\n 'linear')\n assert self.testInst.meta['Kp'][self.testInst.meta.min_label] == 0\n assert self.testInst.meta['Kp'][self.testInst.meta.max_label] == 9\n assert self.testInst.meta['Kp'][self.testInst.meta.fill_label] == -1\n\n def test_uninit_kp_metadata(self):\n \"\"\"Test Kp metadata initialization with uninitialized Metadata\"\"\"\n sw_kp.initialize_kp_metadata(self.testMeta, 'Kp')\n\n assert self.testMeta['Kp'][self.testMeta.units_label] == ''\n assert self.testMeta['Kp'][self.testMeta.name_label] == 'Kp'\n assert(self.testMeta['Kp'][self.testMeta.desc_label] ==\n 'Planetary K-index')\n assert self.testMeta['Kp'][self.testMeta.plot_label] == 'Kp'\n assert self.testMeta['Kp'][self.testMeta.axis_label] == 'Kp'\n assert self.testMeta['Kp'][self.testMeta.scale_label] == 'linear'\n assert self.testMeta['Kp'][self.testMeta.min_label] == 0\n assert self.testMeta['Kp'][self.testMeta.max_label] == 9\n assert self.testMeta['Kp'][self.testMeta.fill_label] == -1\n\n def test_fill_kp_metadata(self):\n \"\"\"Test Kp metadata initialization with user-specified fill value\"\"\"\n sw_kp.initialize_kp_metadata(self.testInst.meta, 'Kp', fill_val=666)\n\n assert self.testInst.meta['Kp'][self.testInst.meta.fill_label] == 666\n\n def test_long_name_kp_metadata(self):\n \"\"\"Test Kp metadata initialization with a long name\"\"\"\n dkey = 'high_lat_Kp'\n sw_kp.initialize_kp_metadata(self.testInst.meta, dkey)\n\n assert self.testInst.meta[dkey][self.testInst.meta.name_label] == dkey\n assert(self.testInst.meta[dkey][self.testInst.meta.desc_label] ==\n 'Planetary K-index')\n assert(self.testInst.meta[dkey][self.testInst.meta.plot_label] ==\n 'High lat Kp')\n assert(self.testInst.meta[dkey][self.testInst.meta.axis_label] ==\n 'High lat Kp')\n del dkey\n\n def test_convert_ap_to_kp(self):\n \"\"\" Test conversion of ap to Kp\"\"\"\n\n sw_kp.convert_3hr_kp_to_ap(self.testInst)\n kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'])\n\n # Assert original and coverted there and back Kp are equal\n assert all(abs(kp_out - self.testInst.data['Kp']) < 1.0e-4)\n\n # Assert the converted Kp meta data exists and is reasonable\n assert 'Kp' in kp_meta.keys()\n assert(kp_meta['Kp'][kp_meta.fill_label] == -1)\n\n del kp_out, kp_meta\n\n def test_convert_ap_to_kp_middle(self):\n \"\"\" Test conversion of ap to Kp where ap is not an exact Kp value\"\"\"\n\n sw_kp.convert_3hr_kp_to_ap(self.testInst)\n self.testInst['3hr_ap'][8] += 1\n kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'])\n\n # Assert original and coverted there and back Kp are equal\n assert all(abs(kp_out - self.testInst.data['Kp']) < 1.0e-4)\n\n # Assert the converted Kp meta data exists and is reasonable\n assert 'Kp' in kp_meta.keys()\n assert(kp_meta['Kp'][kp_meta.fill_label] == -1)\n\n del kp_out, kp_meta\n\n def test_convert_ap_to_kp_nan_input(self):\n \"\"\" Test conversion of ap to Kp where ap is NaN\"\"\"\n\n kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['ap_nan'])\n\n # Assert original and coverted there and back Kp are equal\n assert all(kp_out == -1)\n\n # Assert the converted Kp meta data exists and is reasonable\n assert 'Kp' in kp_meta.keys()\n assert(kp_meta['Kp'][kp_meta.fill_label] == -1)\n\n del kp_out, kp_meta\n\n def test_convert_ap_to_kp_inf_input(self):\n \"\"\" Test conversion of ap to Kp where ap is Inf\"\"\"\n\n kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['ap_inf'])\n\n # Assert original and coverted there and back Kp are equal\n assert all(kp_out[1:] == -1)\n\n # Assert the converted Kp meta data exists and is reasonable\n assert 'Kp' in kp_meta.keys()\n assert(kp_meta['Kp'][kp_meta.fill_label] == -1)\n\n del kp_out, kp_meta\n\n def test_convert_ap_to_kp_fill_val(self):\n \"\"\" Test conversion of ap to Kp with fill values\"\"\"\n\n # Set the first value to a fill value, then calculate ap\n fill_label = self.testInst.meta.fill_label\n self.testInst['Kp'][0] = self.testInst.meta['Kp'][fill_label]\n sw_kp.convert_3hr_kp_to_ap(self.testInst)\n kp_out, kp_meta = sw_meth.convert_ap_to_kp(self.testInst['3hr_ap'], \\\n fill_val=self.testInst.meta['Kp'][fill_label])\n\n # Test non-fill ap values\n assert all(abs(kp_out[1:] - self.testInst.data['Kp'][1:]) < 1.0e-4)\n\n # Test the fill value in the data and metadata\n assert np.isnan(kp_out[0])\n assert np.isnan(kp_meta['Kp'][fill_label])\n\n del fill_label, kp_out, kp_meta\n\n\nclass TestSwKpCombine():\n def setup(self):\n \"\"\"Runs before every method to create a clean testing setup\"\"\"\n # Switch to test_data directory\n self.saved_path = pysat.data_dir\n pysat.utils.set_data_dir(pysat.test_data_path, store=False)\n\n # Set combination testing input\n self.test_day = pysat.datetime(2019, 3, 18)\n self.combine = {\"standard_inst\": pysat.Instrument(\"sw\", \"kp\", \"\"),\n \"recent_inst\": pysat.Instrument(\"sw\", \"kp\", \"recent\"),\n \"forecast_inst\":\n pysat.Instrument(\"sw\", \"kp\", \"forecast\"),\n \"start\": self.test_day - dt.timedelta(days=30),\n \"stop\": self.test_day + dt.timedelta(days=3),\n \"fill_val\": -1}\n\n def teardown(self):\n \"\"\"Runs after every method to clean up previous testing.\"\"\"\n pysat.utils.set_data_dir(self.saved_path)\n del self.combine, self.test_day, self.saved_path\n\n def test_combine_kp_none(self):\n \"\"\" Test combine_kp failure when no input is provided\"\"\"\n\n assert_raises(ValueError, sw_meth.combine_kp)\n\n def test_combine_kp_one(self):\n \"\"\" Test combine_kp failure when only one instrument is provided\"\"\"\n\n # Load a test instrument\n testInst = pysat.Instrument()\n testInst.data = pds.DataFrame({'Kp': np.arange(0, 4, 1.0/3.0)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(hours=3*i)\n for i in range(12)])\n testInst.meta = pysat.Meta()\n testInst.meta['Kp'] = {testInst.meta.fill_label: np.nan}\n\n combo_in = {\"standard_inst\": testInst}\n assert_raises(ValueError, sw_meth.combine_kp, combo_in)\n\n del combo_in, testInst\n\n def test_combine_kp_no_time(self):\n \"\"\"Test combine_kp failure when no times are provided\"\"\"\n\n combo_in = {kk: self.combine[kk] for kk in\n ['standard_inst', 'recent_inst', 'forecast_inst']}\n\n assert_raises(ValueError, sw_meth.combine_kp, combo_in)\n\n del combo_in\n\n def test_combine_kp_no_data(self):\n \"\"\"Test combine_kp when no data is present for specified times\"\"\"\n\n combo_in = {kk: self.combine['forecast_inst'] for kk in\n ['standard_inst', 'recent_inst', 'forecast_inst']}\n combo_in['start'] = pysat.datetime(2014, 2, 19)\n combo_in['stop'] = pysat.datetime(2014, 2, 24)\n kp_inst = sw_meth.combine_kp(**combo_in)\n\n assert kp_inst.data.isnull().all()[\"Kp\"]\n\n del combo_in, kp_inst\n\n def test_combine_kp_inst_time(self):\n \"\"\"Test combine_kp when times are provided through the instruments\"\"\"\n\n combo_in = {kk: self.combine[kk] for kk in\n ['standard_inst', 'recent_inst', 'forecast_inst']}\n\n combo_in['standard_inst'].load(date=self.combine['start'])\n combo_in['recent_inst'].load(date=self.test_day)\n combo_in['forecast_inst'].load(date=self.test_day)\n combo_in['stop'] = combo_in['forecast_inst'].index[-1]\n\n kp_inst = sw_meth.combine_kp(**combo_in)\n\n assert kp_inst.index[0] >= self.combine['start']\n # kp_inst contains times up to 21:00:00, coombine['stop'] is midnight\n assert kp_inst.index[-1].date() <= self.combine['stop'].date()\n assert len(kp_inst.data.columns) == 1\n assert kp_inst.data.columns[0] == 'Kp'\n\n assert np.isnan(kp_inst.meta['Kp'][kp_inst.meta.fill_label])\n assert len(kp_inst['Kp'][np.isnan(kp_inst['Kp'])]) == 0\n\n del combo_in, kp_inst\n\n def test_combine_kp_all(self):\n \"\"\"Test combine_kp when all input is provided\"\"\"\n\n kp_inst = sw_meth.combine_kp(**self.combine)\n\n assert kp_inst.index[0] >= self.combine['start']\n assert kp_inst.index[-1] < self.combine['stop']\n assert len(kp_inst.data.columns) == 1\n assert kp_inst.data.columns[0] == 'Kp'\n\n # Fill value is defined by combine\n assert(kp_inst.meta['Kp'][kp_inst.meta.fill_label] ==\n self.combine['fill_val'])\n assert (kp_inst['Kp'] != self.combine['fill_val']).all()\n\n del kp_inst\n\n def test_combine_kp_no_forecast(self):\n \"\"\"Test combine_kp when forecasted data is not provided\"\"\"\n\n combo_in = {kk: self.combine[kk] for kk in self.combine.keys()\n if kk != 'forecast_inst'}\n kp_inst = sw_meth.combine_kp(**combo_in)\n\n assert kp_inst.index[0] >= self.combine['start']\n assert kp_inst.index[-1] < self.combine['stop']\n assert len(kp_inst.data.columns) == 1\n assert kp_inst.data.columns[0] == 'Kp'\n assert(kp_inst.meta['Kp'][kp_inst.meta.fill_label] ==\n self.combine['fill_val'])\n assert len(kp_inst['Kp'][kp_inst['Kp']]\n == self.combine['fill_val']) > 0\n\n del kp_inst, combo_in\n\n def test_combine_kp_no_recent(self):\n \"\"\"Test combine_kp when recent data is not provided\"\"\"\n\n combo_in = {kk: self.combine[kk] for kk in self.combine.keys()\n if kk != 'recent_inst'}\n kp_inst = sw_meth.combine_kp(**combo_in)\n\n assert kp_inst.index[0] >= self.combine['start']\n assert kp_inst.index[-1] < self.combine['stop']\n assert len(kp_inst.data.columns) == 1\n assert kp_inst.data.columns[0] == 'Kp'\n assert (kp_inst.meta['Kp'][kp_inst.meta.fill_label] ==\n self.combine['fill_val'])\n assert len(kp_inst['Kp'][kp_inst['Kp']]\n == self.combine['fill_val']) > 0\n\n del kp_inst, combo_in\n\n def test_combine_kp_no_standard(self):\n \"\"\"Test combine_kp when standard data is not provided\"\"\"\n\n combo_in = {kk: self.combine[kk] for kk in self.combine.keys()\n if kk != 'standard_inst'}\n kp_inst = sw_meth.combine_kp(**combo_in)\n\n assert kp_inst.index[0] >= self.combine['start']\n assert kp_inst.index[-1] < self.combine['stop']\n assert len(kp_inst.data.columns) == 1\n assert kp_inst.data.columns[0] == 'Kp'\n assert(kp_inst.meta['Kp'][kp_inst.meta.fill_label] ==\n self.combine['fill_val'])\n assert len(kp_inst['Kp'][kp_inst['Kp']]\n == self.combine['fill_val']) > 0\n\n del kp_inst, combo_in\n\n\nclass TestSWF107():\n def setup(self):\n \"\"\"Runs before every method to create a clean testing setup\"\"\"\n # Load a test instrument\n self.testInst = pysat.Instrument()\n self.testInst.data = pds.DataFrame({'f107': np.linspace(70, 200, 160)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(days=i)\n for i in range(160)])\n\n def teardown(self):\n \"\"\"Runs after every method to clean up previous testing.\"\"\"\n del self.testInst\n\n def test_calc_f107a_bad_inname(self):\n \"\"\" Test the calc_f107a with a bad input name \"\"\"\n\n assert_raises(ValueError, sw_f107.calc_f107a, self.testInst, 'bad')\n\n def test_calc_f107a_bad_outname(self):\n \"\"\" Test the calc_f107a with a bad output name \"\"\"\n\n assert_raises(ValueError, sw_f107.calc_f107a, self.testInst, 'f107',\n 'f107')\n\n def test_calc_f107a_daily(self):\n \"\"\" Test the calc_f107a routine with daily data\"\"\"\n\n sw_f107.calc_f107a(self.testInst, f107_name='f107', f107a_name='f107a')\n\n # Assert that new data and metadata exist\n assert 'f107a' in self.testInst.data.columns\n assert 'f107a' in self.testInst.meta.keys()\n\n # Assert the values are finite and realistic means\n assert np.all(np.isfinite(self.testInst['f107a']))\n assert self.testInst['f107a'].min() > self.testInst['f107'].min()\n assert self.testInst['f107a'].max() < self.testInst['f107'].max()\n\n def test_calc_f107a_high_rate(self):\n \"\"\" Test the calc_f107a routine with sub-daily data\"\"\"\n self.testInst.data = pds.DataFrame({'f107': np.linspace(70, 200,\n 3840)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(hours=i)\n for i in range(3840)])\n sw_f107.calc_f107a(self.testInst, f107_name='f107', f107a_name='f107a')\n\n # Assert that new data and metadata exist\n assert 'f107a' in self.testInst.data.columns\n assert 'f107a' in self.testInst.meta.keys()\n\n # Assert the values are finite and realistic means\n assert np.all(np.isfinite(self.testInst['f107a']))\n assert self.testInst['f107a'].min() > self.testInst['f107'].min()\n assert self.testInst['f107a'].max() < self.testInst['f107'].max()\n\n # Assert the same mean value is used for a day\n assert len(np.unique(self.testInst['f107a'][:24])) == 1\n\n def test_calc_f107a_daily_missing(self):\n \"\"\" Test the calc_f107a routine with some daily data missing\"\"\"\n\n self.testInst.data = pds.DataFrame({'f107': np.linspace(70, 200, 160)},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(days=2*i+1)\n for i in range(160)])\n sw_f107.calc_f107a(self.testInst, f107_name='f107', f107a_name='f107a')\n\n # Assert that new data and metadata exist\n assert 'f107a' in self.testInst.data.columns\n assert 'f107a' in self.testInst.meta.keys()\n\n # Assert the finite values have realistic means\n assert(np.nanmin(self.testInst['f107a'])\n > np.nanmin(self.testInst['f107']))\n assert(np.nanmax(self.testInst['f107a'])\n < np.nanmax(self.testInst['f107']))\n\n # Assert the expected number of fill values\n assert(len(self.testInst['f107a'][np.isnan(self.testInst['f107a'])])\n == 40)\n\n\nclass TestSWF107Combine():\n def setup(self):\n \"\"\"Runs before every method to create a clean testing setup\"\"\"\n # Switch to test_data directory\n self.saved_path = pysat.data_dir\n pysat.utils.set_data_dir(pysat.test_data_path, store=False)\n\n # Set combination testing input\n self.test_day = pysat.datetime(2019, 3, 16)\n self.combineInst = {tag: pysat.Instrument(\"sw\", \"f107\", tag)\n for tag in sw_f107.tags.keys()}\n self.combineTimes = {\"start\": self.test_day - dt.timedelta(days=30),\n \"stop\": self.test_day + dt.timedelta(days=3)}\n\n def teardown(self):\n \"\"\"Runs after every method to clean up previous testing.\"\"\"\n pysat.utils.set_data_dir(self.saved_path)\n del self.combineInst, self.test_day, self.combineTimes\n\n def test_combine_f107_none(self):\n \"\"\" Test combine_f107 failure when no input is provided\"\"\"\n\n assert_raises(TypeError, sw_meth.combine_f107)\n\n def test_combine_f107_no_time(self):\n \"\"\"Test combine_f107 failure when no times are provided\"\"\"\n\n assert_raises(ValueError, sw_meth.combine_f107,\n self.combineInst[''], self.combineInst['forecast'])\n\n def test_combine_f107_inst_time(self):\n \"\"\"Test combine_f107 with times provided through datasets\"\"\"\n\n self.combineInst['all'].load(date=self.combineTimes['start'])\n self.combineInst['forecast'].load(date=self.test_day)\n\n f107_inst = sw_meth.combine_f107(self.combineInst['all'],\n self.combineInst['forecast'])\n\n assert f107_inst.index[0] == dt.datetime(1947, 2, 13)\n assert f107_inst.index[-1] <= self.combineTimes['stop']\n assert len(f107_inst.data.columns) == 1\n assert f107_inst.data.columns[0] == 'f107'\n\n del f107_inst\n\n def test_combine_f107_all(self):\n \"\"\"Test combine_f107 when all input is provided with '' and '45day'\"\"\"\n\n f107_inst = sw_meth.combine_f107(self.combineInst[''],\n self.combineInst['45day'],\n **self.combineTimes)\n\n assert f107_inst.index[0] >= self.combineTimes['start']\n assert f107_inst.index[-1] < self.combineTimes['stop']\n assert len(f107_inst.data.columns) == 1\n assert f107_inst.data.columns[0] == 'f107'\n\n del f107_inst\n\n\nclass TestSWAp():\n def setup(self):\n \"\"\"Runs before every method to create a clean testing setup\"\"\"\n # Load a test instrument with 3hr ap data\n self.testInst = pysat.Instrument()\n self.testInst.data = pds.DataFrame({'3hr_ap': [0, 2, 3, 4, 5, 6, 7, 9,\n 12, 15]},\n index=[pysat.datetime(2009, 1, 1)\n + pds.DateOffset(hours=3*i)\n for i in range(10)])\n self.testInst.meta = pysat.Meta()\n self.meta_dict = {self.testInst.meta.units_label: '',\n self.testInst.meta.name_label: 'ap',\n self.testInst.meta.desc_label:\n \"3-hour ap (equivalent range) index\",\n self.testInst.meta.plot_label: \"ap\",\n self.testInst.meta.axis_label: \"ap\",\n self.testInst.meta.scale_label: 'linear',\n self.testInst.meta.min_label: 0,\n self.testInst.meta.max_label: 400,\n self.testInst.meta.fill_label: np.nan,\n self.testInst.meta.notes_label: 'test ap'}\n self.testInst.meta.__setitem__('3hr_ap', self.meta_dict)\n\n def teardown(self):\n \"\"\"Runs after every method to clean up previous testing.\"\"\"\n del self.testInst, self.meta_dict\n\n def test_calc_daily_Ap(self):\n \"\"\" Test daily Ap calculation\"\"\"\n\n sw_meth.calc_daily_Ap(self.testInst)\n\n assert 'Ap' in self.testInst.data.columns\n assert 'Ap' in self.testInst.meta.keys()\n\n # Test unfilled values (full days)\n assert np.all(self.testInst['Ap'][:8].min() == 4.5)\n\n # Test fill values (partial days)\n assert np.all(np.isnan(self.testInst['Ap'][8:]))\n\n def test_calc_daily_Ap_bad_3hr(self):\n \"\"\" Test daily Ap calculation with bad input key\"\"\"\n\n assert_raises(ValueError, sw_meth.calc_daily_Ap, self.testInst,\n \"no\")\n\n def test_calc_daily_Ap_bad_daily(self):\n \"\"\" Test daily Ap calculation with bad output key\"\"\"\n\n assert_raises(ValueError, sw_meth.calc_daily_Ap, self.testInst,\n \"3hr_ap\", \"3hr_ap\")\n" ]
[ [ "numpy.full", "numpy.isnan", "numpy.nanmin", "pandas.DateOffset", "numpy.arange", "numpy.isfinite", "numpy.linspace", "numpy.nanmax", "numpy.unique" ] ]
jjc2718/generic-expression-patterns
[ "99961ac3647d2447268ca73a94cab8b09ee08237" ]
[ "new_experiment/nbconverted/find_specific_genes_in_new_experiment.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Application: new experiment\n# \n# This notebook allows users to find specific genes in their experiment of interest using an existing VAE model\n# \n# This notebook will generate a `generic_gene_summary_<experiment id>.tsv` file that contains a z-score per gene that indicates how specific a gene is the experiment in question.\n\n# In[1]:\n\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('load_ext', 'rpy2.ipython')\nget_ipython().run_line_magic('autoreload', '2')\n\n\n# In[2]:\n\n\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ponyo import utils\nfrom generic_expression_patterns_modules import process, new_experiment_process, stats, ranking\n\n\n# ## User input\n# \n# User needs to define the following in the [config file](../configs/config_new_experiment.tsv):\n# \n# 1. Template experiment. This is the experiment you are interested in studying\n# 2. Training compendium used to train VAE, including unnormalized gene mapped version and normalized version\n# 3. Scaler transform used to normalize the training compendium\n# 4. Directory containing trained VAE model\n# 5. Experiment id to label newly create simulated experiments\n# \n# The user also needs to provide metadata files:\n# 1. `<experiment id>_process_samples.tsv` contains 2 columns (sample ids, label that indicates if the sample is kept or removed). See [example](data/metadata/cis-gem-par-KU1919_process_samples.tsv). **Note: This file is not required if the user wishes to use all the samples in the template experiment file.**\n# 2. `<experiment id>_groups.tsv` contains 2 columns: sample ids, group label to perform DE analysis. See [example](data/metadata/cis-gem-par-KU1919_groups.tsv)\n\n# In[3]:\n\n\n# Read in config variables\nbase_dir = os.path.abspath(os.path.join(os.getcwd(), \"../\"))\n\nconfig_filename = os.path.abspath(\n os.path.join(base_dir, \"configs\", \"config_new_experiment.tsv\")\n)\n\nparams = utils.read_config(config_filename)\n\n\n# In[4]:\n\n\n# Load config params\n\n# Local directory to store intermediate files\nlocal_dir = params['local_dir']\n\n# Number of simulated experiments to generate\nnum_runs = params['num_simulated']\n\n# Directory containing trained VAE model\nvae_model_dir = params['vae_model_dir']\n\n# Dimension of latent space used in VAE model\nlatent_dim = params['latent_dim']\n\n# ID for template experiment\n# This ID will be used to label new simulated experiments\nproject_id = params['project_id']\n\n# Template experiment filename\ntemplate_filename = params['raw_template_filename']\nmapped_template_filename = params['mapped_template_filename']\nnormalized_template_filename = params['normalized_template_filename']\nprocessed_template_filename = params['processed_template_filename']\n\n# Training dataset used for existing VAE model\nmapped_compendium_filename = params['mapped_compendium_filename']\n\n# Normalized compendium filename\nnormalized_compendium_filename = params['normalized_compendium_filename']\n\n# Scaler transform used to scale compendium data into 0-1 range for training\nscaler_filename = params['scaler_filename']\n\n# Test statistic used to rank genes by\ncol_to_rank_genes = params['rank_genes_by']\n\n# Minimum mean count per gene\ncount_threshold = params['count_threshold']\n\n# Column headers to use to make summary statistic table\nlogFC_name = params['DE_logFC_name']\npvalue_name = params['DE_pvalue_name']\n\n\n# In[5]:\n\n\n# Load metadata files\n\n# Load metadata file with processing information\nsample_id_metadata_filename = os.path.join(\n \"data\",\n \"metadata\",\n f\"{project_id}_process_samples.tsv\"\n)\n\n# Load metadata file with grouping assignments for samples\nmetadata_filename = os.path.join(\n \"data\",\n \"metadata\",\n f\"{project_id}_groups.tsv\"\n)\n\n\n# In[6]:\n\n\n# Output filename\ngene_summary_filename = f\"generic_gene_summary_{project_id}.tsv\"\n\n\n# ## Map template experiment to same feature space as training compendium\n# \n# In order to simulate a new gene expression experiment, we will need to encode this experiment into the learned latent space. This requires that the feature space (i.e. genes) in the template experiment match the features in the compendium used to train the VAE model. These cells process the template experiment to be of the expected input format:\n# * Template data is expected to be a matrix that is sample x gene\n# * Template experiment is expected to have the same genes as the compendium experiment. Genes that are in the template experiment but not in the compendium are removed. Genes that are in the compendium but missing in the template experiment are added and the gene expression value is set to the median gene expression value of that gene across the samples in the compendium.\n\n# In[7]:\n\n\n# Template experiment needs to be of the form sample x gene\ntemplate_filename_only = template_filename.split(\"/\")[-1].split(\".\")[0]\ntransposed_template_filename = os.path.join(local_dir, template_filename_only+\"_transposed.txt\")\n\nnew_experiment_process.transpose_save(template_filename, transposed_template_filename)\n\n\n# In[8]:\n\n\nnew_experiment_process.process_template_experiment(\n transposed_template_filename,\n mapped_compendium_filename,\n scaler_filename,\n mapped_template_filename,\n normalized_template_filename,\n)\n\n\n# ## Simulate experiments based on template experiment\n# \n# Embed template experiment into learned latent space and linearly shift template experiment to different locations of the latent space to create new experiments\n\n# In[9]:\n\n\n# Simulate experiments based on template experiment\nnormalized_compendium_data = pd.read_csv(normalized_compendium_filename, sep=\"\\t\", index_col=0, header=0)\nnormalized_template_data = pd.read_csv(normalized_template_filename, sep=\"\\t\", index_col=0, header=0)\n\nfor run_id in range(num_runs):\n new_experiment_process.embed_shift_template_experiment(\n normalized_compendium_data,\n normalized_template_data,\n vae_model_dir,\n project_id,\n scaler_filename,\n local_dir,\n latent_dim,\n run_id\n )\n\n\n# ## Process template and simulated experiments\n# \n# * Remove samples not required for comparison\n# * Make sure ordering of samples matches metadata for proper comparison\n# * Make sure values are cast as integers if using DESeq\n# * Filter lowly expressed genes if using DESeq\n\n# In[10]:\n\n\nif \"human_general_analysis\" in vae_model_dir:\n method = \"deseq\"\nelse:\n method = \"limma\"\n\n\n# In[11]:\n\n\nif not os.path.exists(sample_id_metadata_filename):\n sample_id_metadata_filename = None\n \nif method == \"deseq\":\n stats.process_samples_for_DESeq(\n mapped_template_filename,\n metadata_filename,\n processed_template_filename,\n count_threshold,\n sample_id_metadata_filename,\n )\n\n for i in range(num_runs):\n simulated_filename = os.path.join(\n local_dir,\n \"pseudo_experiment\",\n f\"selected_simulated_data_{project_id}_{i}.txt\"\n )\n out_simulated_filename = os.path.join(\n local_dir,\n \"pseudo_experiment\",\n f\"selected_simulated_data_{project_id}_{i}_processed.txt\"\n )\n stats.process_samples_for_DESeq(\n simulated_filename,\n metadata_filename,\n out_simulated_filename,\n count_threshold,\n sample_id_metadata_filename,\n )\nelse:\n stats.process_samples_for_limma(\n mapped_template_filename,\n metadata_filename,\n processed_template_filename,\n sample_id_metadata_filename,\n )\n\n for i in range(num_runs):\n simulated_filename = os.path.join(\n local_dir,\n \"pseudo_experiment\",\n f\"selected_simulated_data_{project_id}_{i}.txt\"\n )\n stats.process_samples_for_limma(\n simulated_filename,\n metadata_filename,\n None,\n sample_id_metadata_filename,\n )\n\n\n# ## Differential expression analysis\n# \n# * If data is RNA-seq then use DESeq2 (using human_general_analysis model)\n# * If data is microarray then use Limma (using human_cancer_analysis, pseudomonas_analysis models)\n\n# In[12]:\n\n\n# Create subdirectory: \"<local_dir>/DE_stats/\"\nos.makedirs(os.path.join(local_dir, \"DE_stats\"), exist_ok=True)\n\n\n# In[13]:\n\n\nget_ipython().run_cell_magic('R', '-i metadata_filename -i project_id -i processed_template_filename -i local_dir -i base_dir -i method', '\\nsource(paste0(base_dir, \\'/generic_expression_patterns_modules/DE_analysis.R\\'))\\n\\n# File created: \"<local_dir>/DE_stats/DE_stats_template_data_<project_id>_real.txt\"\\nif (method == \"deseq\"){\\n get_DE_stats_DESeq(\\n metadata_filename,\\n project_id, \\n processed_template_filename,\\n \"template\",\\n local_dir,\\n \"real\"\\n )\\n}\\nelse{\\n get_DE_stats_limma(\\n metadata_filename,\\n project_id, \\n processed_template_filename,\\n \"template\",\\n local_dir,\\n \"real\"\\n ) \\n}')\n\n\n# In[14]:\n\n\nget_ipython().run_cell_magic('R', '-i metadata_filename -i project_id -i base_dir -i local_dir -i num_runs -i method', '\\nsource(paste0(base_dir, \\'/generic_expression_patterns_modules/DE_analysis.R\\'))\\n\\n# Files created: \"<local_dir>/DE_stats/DE_stats_simulated_data_<project_id>_<n>.txt\"\\nfor (i in 0:(num_runs-1)){\\n simulated_data_filename <- paste(\\n local_dir, \\n \"pseudo_experiment/selected_simulated_data_\",\\n project_id,\\n \"_\", \\n i,\\n \"_processed.txt\",\\n sep = \"\"\\n )\\n if (method == \"deseq\"){\\n get_DE_stats_DESeq(\\n metadata_filename,\\n project_id, \\n simulated_data_filename,\\n \"simulated\",\\n local_dir,\\n i\\n )\\n }\\n else {\\n get_DE_stats_limma(\\n metadata_filename,\\n project_id, \\n simulated_data_filename,\\n \"simulated\",\\n local_dir,\\n i\\n )\\n }\\n }')\n\n\n# ## Rank genes\n# \n# Genes are ranked by their \"generic-ness\" - how frequently these genes are changed across the simulated experiments using user-specific test statistic (i.e. log2 fold change).\n\n# In[15]:\n\n\nanalysis_type = \"DE\"\ntemplate_DE_stats_filename = os.path.join(\n local_dir,\n \"DE_stats\",\n f\"DE_stats_template_data_{project_id}_real.txt\"\n)\n\ntemplate_DE_stats, simulated_DE_summary_stats = ranking.process_and_rank_genes_pathways(\n template_DE_stats_filename,\n local_dir,\n num_runs,\n project_id,\n analysis_type,\n col_to_rank_genes,\n logFC_name,\n pvalue_name,\n)\n\n\n# ## Summary table\n# \n# * Gene ID: Gene identifier (hgnc symbols for human data or PA number for *P. aeruginosa* data)\n# * (Real): Statistics for template experiment\n# * (Simulated): Statistics across simulated experiments\n# * Number of experiments: Number of simulated experiments\n# * Z-score: High z-score indicates that gene is more changed in template compared to the null set of simulated experiments (high z-score = highly specific to template experiment)\n# \n# \n# **Note:** \n# * If using DESeq, genes with NaN in only the `Adj P-value (Real)` column are those genes flagged because of the `cooksCutoff` parameter. The cook's distance as a diagnostic to tell if a single sample has a count which has a disproportionate impact on the log fold change and p-values. These genes are flagged with an NA in the pvalue and padj columns of the result table. \n# \n# * If using DESeq with count threshold, some genes may not be present in all simulated experiments (i.e. the `Number of experiments (simulated)` will not equal the number of simulated experiments you specified in the beginning. This pre-filtering will lead to some genes found in few simulated experiments and so the background/null set for that gene is not robust. Thus, the user should sort by both z-score and number of experiments to identify specific expressed genes.\n# \n# * If using DESeq without count threshold, some genes may still not be present in all simulated experiments (i.e. the `Number of experiments (simulated)` will not equal the number of simulated experiments you specified in the beginning. If the gene is 0 expressed across all samples and thus automatically given an NA in `log fold change, adjusted p-value` columns. Thus, the user should sort by both z-score and number of experiments to identify specific expressed genes.\n# \n# For more information you can read [DESeq FAQs](https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#pvaluesNA)\n\n# In[16]:\n\n\n# Get summary table\nsummary_gene_ranks = ranking.generate_summary_table(\n template_DE_stats_filename,\n template_DE_stats,\n simulated_DE_summary_stats,\n col_to_rank_genes,\n local_dir,\n 'gene',\n params\n)\n\nsummary_gene_ranks.sort_values(by=\"Z score\", ascending=False).head(10)\n\n\n# In[17]:\n\n\nsummary_gene_ranks.isna().any()\n\n\n# In[18]:\n\n\nsummary_gene_ranks[summary_gene_ranks.isna().any(axis=1)]\n\n\n# In[19]:\n\n\n# Save\nsummary_gene_ranks.to_csv(gene_summary_filename, sep='\\t')\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
VanekPetr/investment-funnel
[ "5de73d92efb785fb3f38509af5d22fade0c18333" ]
[ "models/CVaRmodel.py" ]
[ "import pulp\nimport pandas as pd\n\n\n# ----------------------------------------------------------------------\n# MODEL FOR THE SECOND AND ONGOING PERIODS \n# ----------------------------------------------------------------------\ndef rebalancingModel(mu,scen,CVaR_target,cvar_alpha,x_old,trans_cost, max_weight):\n \n \"\"\" This function finds the optimal enhanced index portfolio according to some benchmark.\n The portfolio corresponds to the tangency portfolio where risk is evaluated according to \n the CVaR of the tracking error. The model is formulated using fractional programming.\n \n Parameters\n ----------\n mu : pandas.Series with float values\n asset point forecast\n mu_b : pandas.Series with float values\n Benchmark point forecast\n scen : pandas.DataFrame with float values\n Asset scenarios\n scen_b : pandas.Series with float values\n Benchmark scenarios\n max_weight : float\n Maximum allowed weight \n cvar_alpha : float\n Alpha value used to evaluate Value-at-Risk one \n \n Returns\n -------\n float\n Asset weights in an optimal portfolio\n \n \"\"\"\n \n # define index\n i_idx = scen.columns\n j_idx = scen.index\n \n # number of scenarios\n N = scen.shape[0] \n # variable costs\n c = trans_cost\n \n # define variables\n x = pulp.LpVariable.dicts(\"x\", ((i) for i in i_idx), lowBound=0, cat='Continuous')\n \n # define variables for buying\n buy = pulp.LpVariable.dicts(\"buy\", ((i) for i in i_idx), lowBound=0, cat='Continuous')\n # define variables for selling\n sell = pulp.LpVariable.dicts(\"sell\", ((i) for i in i_idx), lowBound=0, cat='Continuous')\n \n # define cost variable\n cost = pulp.LpVariable(\"cost\", lowBound=0, cat='Continuous') \n \n # loss deviation\n VarDev = pulp.LpVariable.dicts(\"VarDev\", ((t) for t in j_idx), lowBound=0, cat='Continuous')\n \n # value at risk\n VaR = pulp.LpVariable(\"VaR\", lowBound=0, cat='Continuous')\n CVaR = pulp.LpVariable(\"CVaR\", lowBound=0, cat='Continuous')\n\n # *** define model ***\n model = pulp.LpProblem(\"Mean-CVaR Optimization\", pulp.LpMaximize)\n\n # *** Objective Function, maximize expected return of the portfolio ***\n \n model += pulp.lpSum([mu[i] * x[i] for i in i_idx] )\n\n # *** constraints ***\n \n # calculate VaR deviation\n for t in j_idx:\n model += -pulp.lpSum([scen.loc[t, i] * x[i] for i in i_idx]) - VaR <= VarDev[t]\n \n # calculate CVaR\n model += VaR + 1/(N*cvar_alpha)*pulp.lpSum([VarDev[t] for t in j_idx]) == CVaR\n \n # CVaR target\n model += CVaR <= CVaR_target \n \n # re-balancing\n for t in i_idx:\n model += x_old[t] - sell[t] + buy[t] == x[t]\n \n # cost of re-balancing\n model += c * (pulp.lpSum([ buy[i] for i in i_idx]) + pulp.lpSum([sell[i] for i in i_idx])) == cost\n \n # new budget constrain\n model += pulp.lpSum([buy[i] for i in i_idx]) == pulp.lpSum([sell[i] for i in i_idx]) - cost\n \n # *** Concentration limits ***\n # set max limits, so it cannot not be larger than a fixed value\n ###\n for i in i_idx:\n model += x[i] <= max_weight*(x_old.sum()-cost)\n\n # *** solve model ***\n model.solve()\n \n # print an error if the model is not optimal\n if pulp.LpStatus[model.status] != 'Optimal':\n print(\"Whoops! There is an error! The model has error status:\" + pulp.LpStatus[model.status] )\n\n # *** Get positions ***\n if pulp.LpStatus[model.status] == 'Optimal':\n \n # print variables\n var_model = dict()\n for variable in model.variables():\n var_model[variable.name] = variable.varValue\n \n # solution with variable names \n var_model = pd.Series(var_model,index=var_model.keys())\n\n long_pos = [i for i in var_model.keys() if i.startswith(\"x\")]\n \n # total portfolio with negative values as short positions\n port_total = pd.Series(var_model[long_pos].values, index=[t[2:] for t in var_model[long_pos].index])\n \n opt_port = port_total\n \n # set floating data points to zero and normalize\n opt_port[opt_port < 0.000001] = 0\n CVaR_result_p = var_model[\"CVaR\"]/sum(opt_port)\n port_val = sum(opt_port)\n opt_port = opt_port/sum(opt_port)\n \n # return portfolio, CVaR, and alpha\n return opt_port, CVaR_result_p, port_val\n\n\n# ----------------------------------------------------------------------\n# MODEL FOR THE FIRST PERIOD \n# ----------------------------------------------------------------------\ndef firstPeriodModel(mu, scen, CVaR_target, cvar_alpha, budget, trans_cost, max_weight):\n \n \"\"\" This function finds the optimal enhanced index portfolio according to some benchmark.\n The portfolio corresponds to the tangency portfolio where risk is evaluated according to \n the CVaR of the tracking error. The model is formulated using fractional programming.\n \n Parameters\n ----------\n mu : pandas.Series with float values\n asset point forecast\n mu_b : pandas.Series with float values\n Benchmark point forecast\n scen : pandas.DataFrame with float values\n Asset scenarios\n scen_b : pandas.Series with float values\n Benchmark scenarios\n max_weight : float\n Maximum allowed weight \n cvar_alpha : float\n Alpha value used to evaluate Value-at-Risk one \n \n Returns\n -------\n float\n Asset weights in an optimal portfolio\n \n \"\"\"\n \n # define index\n i_idx = scen.columns\n j_idx = scen.index\n \n # number of scenarios\n N = scen.shape[0] \n # variable transaction costs\n c = trans_cost\n \n # define variables\n x = pulp.LpVariable.dicts(\"x\", ((i) for i in i_idx), lowBound=0, cat='Continuous')\n \n # loss deviation\n VarDev = pulp.LpVariable.dicts(\"VarDev\", ((t) for t in j_idx), lowBound=0, cat='Continuous')\n \n # value at risk\n VaR = pulp.LpVariable(\"VaR\", lowBound=0, cat='Continuous')\n CVaR = pulp.LpVariable(\"CVaR\", lowBound=0, cat='Continuous')\n\n # *** define model ***\n model = pulp.LpProblem(\"Mean-CVaR Optimization\", pulp.LpMaximize)\n\n # *** Objective Function, maximize expected return of the portfolio ***\n \n model += pulp.lpSum([mu[i] * x[i] for i in i_idx])\n\n # *** constraints ***\n # budget constrain\n model += pulp.lpSum([x[i] for i in i_idx]) == (1-c) * budget\n \n # calculate VaR deviation\n for t in j_idx:\n model += -pulp.lpSum([scen.loc[t, i] * x[i] for i in i_idx]) - VaR <= VarDev[t]\n \n # calculate CVaR\n model += VaR + 1/(N * cvar_alpha) * pulp.lpSum([VarDev[t] for t in j_idx]) == CVaR\n \n # CVaR target\n model += CVaR <= CVaR_target \n \n # *** Concentration limits ***\n # set max limits, so it cannot not be larger than a fixed value\n for i in i_idx:\n model += x[i] <= max_weight*(1-c)*budget\n\n # *** solve model ***\n model.solve()\n \n # print an error if the model is not optimal\n if pulp.LpStatus[model.status] != 'Optimal':\n print(\"Whoops! There is an error! The model has error status:\" + pulp.LpStatus[model.status])\n\n # *** Get positions ***\n if pulp.LpStatus[model.status] == 'Optimal':\n \n # print variables\n var_model = dict()\n for variable in model.variables():\n var_model[variable.name] = variable.varValue\n \n # solution with variable names \n var_model = pd.Series(var_model,index=var_model.keys())\n\n long_pos = [i for i in var_model.keys() if i.startswith(\"x\")]\n \n # total portfolio with negative values as short positions\n port_total = pd.Series(var_model[long_pos].values, index=[t[2:] for t in var_model[long_pos].index])\n \n opt_port = port_total\n \n # *** set floating data points to zero and normalize ***\n opt_port[opt_port < 0.000001] = 0\n CVaR_result_p = var_model[\"CVaR\"]/sum(opt_port)\n port_val = sum(opt_port)\n opt_port = opt_port/sum(opt_port)\n \n # return portfolio, CVaR, and alpha\n return opt_port, CVaR_result_p, port_val\n\n\n\"\"\"\n ----------------------------------------------------------------------\n Mathematical Optimization: RUN THE CVAR MODEL\n ----------------------------------------------------------------------\n\"\"\"\ndef modelCVaR(testRet, scen, targets, budget, cvar_alpha, trans_cost, max_weight):\n \"\"\"\n Method to run the CVaR model over given periods\n \"\"\"\n p_points = len(scen[:, 0, 0]) # number of periods\n s_points = len(scen[0, :, 0]) # number of scenarios\n prob = 1/s_points # probability of each scenario\n\n assets = testRet.columns # names of all assets\n\n # DATA FRAME TO STORE CVaR TARGETS\n portCVaR = pd.DataFrame(columns=[\"CVaR\"], index=list(range(p_points)))\n # DATA FRAME TO STORE VALUE OF THE PORTFOLIO\n portValue = pd.DataFrame(columns=[\"Portfolio_Value\"], index=testRet.index)\n # DATA FRAME TO STORE PORTFOLIO ALLOCATION\n portAllocation = pd.DataFrame(columns=assets, index=list(range(p_points)))\n\n # *** THE FIRST INVESTMENT PERIOD ***\n # ----------------------------------------------------------------------\n # create data frame with scenarios for a period p=0\n scenDf = pd.DataFrame(scen[0, :, :],\n columns=testRet.columns,\n index=list(range(s_points)))\n\n # compute expected returns of all assets\n EP = sum(prob*scenDf.loc[i, :] for i in scenDf.index)\n\n # run CVaR model\n p_alloc, CVaR_val, port_val = firstPeriodModel(mu=EP,\n scen=scenDf,\n CVaR_target=targets.loc[0, \"CVaR_Target\"] * budget,\n cvar_alpha=cvar_alpha,\n budget=budget,\n trans_cost=trans_cost,\n max_weight=max_weight)\n\n # save the result\n portCVaR.loc[0, \"CVaR\"] = CVaR_val\n # save allocation\n portAllocation.loc[0, assets] = p_alloc\n portValueW = port_val\n\n # COMPUTE PORTFOLIO VALUE\n for w in testRet.index[0:4]:\n portValue.loc[w,\"Portfolio_Value\"] = sum(portAllocation.loc[0, assets] * portValueW\n * (1+testRet.loc[w, assets]))\n portValueW = portValue.loc[w, \"Portfolio_Value\"]\n\n # *** THE SECOND AND ONGOING INVESTMENT PERIODS ***\n # ----------------------------------------------------------------------\n for p in range(1, p_points):\n # create data frame with scenarios for a given period p\n scenDf = pd.DataFrame(scen[p, :, :],\n columns=testRet.columns,\n index=list(range(s_points))) \n \n # compute expected returns of all assets\n EP = sum(prob*scenDf.loc[i, :] for i in scenDf.index)\n\n # run CVaR model\n p_alloc, CVaR_val, port_val = rebalancingModel(mu=EP,\n scen=scenDf,\n CVaR_target=targets.loc[p, \"CVaR_Target\"] * portValueW,\n cvar_alpha=cvar_alpha,\n x_old=portAllocation.loc[p-1, assets] * portValueW,\n trans_cost=trans_cost,\n max_weight=max_weight)\n # save the result\n portCVaR.loc[p, \"CVaR\"] = CVaR_val\n # save allocation\n portAllocation.loc[p, assets] = p_alloc\n\n portValueW = port_val\n # COMPUTE PORTFOLIO VALUE\n for w in testRet.index[(p * 4): (4 + p * 4)]:\n portValue.loc[w, \"Portfolio_Value\"] = sum(portAllocation.loc[p, assets]\n * portValueW*(1+testRet.loc[w, assets]))\n portValueW = portValue.loc[w, \"Portfolio_Value\"]\n \n return portAllocation, portValue, portCVaR\n" ]
[ [ "pandas.DataFrame", "pandas.Series" ] ]
samuelwestlake/Multi-Tier-Robot-System
[ "93664413e68ac2080958527149729bd6b63429b5" ]
[ "catkin/src/distributed_robot_system/src/nodes/scripts/camera.py" ]
[ "#!/usr/bin/env python\n\nimport cv2\nimport rospy\nimport numpy as np\nfrom sensor_msgs.msg import CompressedImage\n\nfrom get_message import GetMessage\n\n\nclass Camera(object):\n\n def __init__(self, nb, buggy_nb):\n self.get_image = GetMessage()\n topic = \"buggy\" + str(buggy_nb) + \"/camera\" + str(nb)\n rospy.Subscriber(topic, CompressedImage, self.get_image, queue_size=1)\n\n def get_frame(self):\n frame = self.get_image.get_msg().data\n frame = np.fromstring(frame, np.uint8)\n frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n frame = np.stack((frame[:, :, 2], frame[:, :, 1], frame[:, :, 0]), axis=2)\n return frame\n" ]
[ [ "numpy.fromstring", "numpy.stack" ] ]
hanrui1sensetime/mmdeploy
[ "f2594c624b67910e55e24418832bd96685425b2f" ]
[ "mmdeploy/backend/onnxruntime/wrapper.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport os.path as osp\nfrom typing import Dict, Optional, Sequence\n\nimport numpy as np\nimport onnxruntime as ort\nimport torch\n\nfrom mmdeploy.utils import Backend, parse_device_id\nfrom mmdeploy.utils.timer import TimeCounter\nfrom ..base import BACKEND_WRAPPER, BaseWrapper\nfrom .init_plugins import get_ops_path\n\n\n@BACKEND_WRAPPER.register_module(Backend.ONNXRUNTIME.value)\nclass ORTWrapper(BaseWrapper):\n \"\"\"ONNXRuntime wrapper for inference.\n\n Args:\n onnx_file (str): Input onnx model file.\n device (str): The device to input model.\n output_names (Sequence[str] | None): Names of model outputs in order.\n Defaults to `None` and the wrapper will load the output names from\n model.\n\n Examples:\n >>> from mmdeploy.backend.onnxruntime import ORTWrapper\n >>> import torch\n >>>\n >>> onnx_file = 'model.onnx'\n >>> model = ORTWrapper(onnx_file, -1)\n >>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu'))\n >>> outputs = model(inputs)\n >>> print(outputs)\n \"\"\"\n\n def __init__(self,\n onnx_file: str,\n device: str,\n output_names: Optional[Sequence[str]] = None):\n # get the custom op path\n ort_custom_op_path = get_ops_path()\n session_options = ort.SessionOptions()\n # register custom op for onnxruntime\n if osp.exists(ort_custom_op_path):\n session_options.register_custom_ops_library(ort_custom_op_path)\n logging.info(f'Successfully loaded onnxruntime custom ops from \\\n {ort_custom_op_path}')\n else:\n logging.warning(f'The library of onnxruntime custom ops does \\\n not exist: {ort_custom_op_path}')\n\n sess = ort.InferenceSession(onnx_file, session_options)\n\n device_id = parse_device_id(device)\n\n providers = ['CPUExecutionProvider']\n options = [{}]\n is_cuda_available = ort.get_device() == 'GPU'\n if is_cuda_available:\n providers.insert(0, 'CUDAExecutionProvider')\n options.insert(0, {'device_id': device_id})\n sess.set_providers(providers, options)\n if output_names is None:\n output_names = [_.name for _ in sess.get_outputs()]\n self.sess = sess\n self.io_binding = sess.io_binding()\n self.device_id = device_id\n self.is_cuda_available = is_cuda_available\n self.device_type = 'cuda' if is_cuda_available else 'cpu'\n\n super().__init__(output_names)\n\n def forward(self, inputs: Dict[str,\n torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward inference.\n\n Args:\n inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.\n\n Returns:\n Dict[str, torch.Tensor]: The output name and tensor pairs.\n \"\"\"\n for name, input_tensor in inputs.items():\n # set io binding for inputs/outputs\n input_tensor = input_tensor.contiguous()\n if not self.is_cuda_available:\n input_tensor = input_tensor.cpu()\n self.io_binding.bind_input(\n name=name,\n device_type=self.device_type,\n device_id=self.device_id,\n element_type=np.float32,\n shape=input_tensor.shape,\n buffer_ptr=input_tensor.data_ptr())\n\n for name in self._output_names:\n self.io_binding.bind_output(name)\n # run session to get outputs\n self.__ort_execute(self.io_binding)\n output_list = self.io_binding.copy_outputs_to_cpu()\n outputs = {}\n for output_name, numpy_tensor in zip(self._output_names, output_list):\n outputs[output_name] = torch.from_numpy(numpy_tensor)\n\n return outputs\n\n @TimeCounter.count_time()\n def __ort_execute(self, io_binding: ort.IOBinding):\n \"\"\"Run inference with ONNXRuntime session.\n\n Args:\n io_binding (ort.IOBinding): To bind input/output to a specified\n device, e.g. GPU.\n \"\"\"\n self.sess.run_with_iobinding(io_binding)\n" ]
[ [ "torch.from_numpy" ] ]
vramesh/factor-graph-compute
[ "ea8b94b63a44c99aa279333444e4852ba7dca728" ]
[ "hmslearn/main.py" ]
[ "from factor_graph import FactorGraph\nfrom attrdict import AttrDict\nfrom collections import OrderedDict\nfrom functools import reduce\nimport numpy as np\nimport pdb\n\n\ndef normalize_message(message):\n return message/message.sum() if message.sum() > 0 else np.array([0.5, 0.5])\n\n\ndef sum_product_update_var(state, messages, sender_id, recipient_id,\n from_node_id):\n if recipient_id == from_node_id:\n return\n variable_index = sender_id[1:]\n factor_index = recipient_id[1:]\n message_product = np.array(state)\n for _, message in messages.items():\n if message is not None:\n message_product *= np.array(message)\n if messages[recipient_id] is not None:\n outgoing_message = normalize_message(message_product/np.array(messages[recipient_id]))\n else:\n outgoing_message = normalize_message(message_product)\n return outgoing_message\n\n\n\ndef sum_product_update_fac(state, messages, sender_id, recipient_id,\n from_node_id):\n if recipient_id == from_node_id:\n return \n\n state_numpy = np.array(state)\n dimension = len(state_numpy.shape)\n if dimension == 2:\n outgoing_message = np.dot(np.array(state),np.array(messages[from_node_id])) # not always correct\n\n elif dimension == 3:\n if recipient_id > from_node_id:\n outgoing_message = np.dot(np.array(state[0]),np.array(messages[from_node_id]))\n else:\n outgoing_message = np.dot(np.array(state[1]),np.array(messages[from_node_id]))\n\n return outgoing_message\n\nfunction_list = [sum_product_update_fac, sum_product_update_var, normalize_message]\n\nconfig = {\n \"algorithm\": \"sum_product\",\n \"pubsub_choice\": \"redis\",\n \"synchronous\": \"asynchronous\",\n \"number_of_iter\": 20,\n \"time_till_stop\": 20,\n \"verbose\": True\n}\n\npath_to_input_file = \"examples/hmm_simple_factor_graph_ver_7_new_ui.txt\"\n\nfg = FactorGraph(path_to_input_file, config, function_list)\nfg.run()\n\nfg.print_solution()" ]
[ [ "numpy.array" ] ]
mikemhenry/openfe
[ "d4c78af62a7ae05b99eb95d173661ac134b7e7b9" ]
[ "openfe/setup/_rbfe_utils/relative.py" ]
[ "# This code is a slightly modified version of the HybridTopologyFactory code\n# from https://github.com/choderalab/perses\n# The eventual goal is to move a version of this towards openmmtools\n# LICENSE: MIT\n\nimport logging\nimport openmm\nfrom openmm import unit\nimport numpy as np\nimport copy\nimport itertools\n# OpenMM constant for Coulomb interactions (implicitly in md_unit_system units)\nfrom openmmtools.constants import ONE_4PI_EPS0\nimport mdtraj as mdt\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass HybridTopologyFactory:\n \"\"\"\n This class generates a hybrid topology based on two input systems and an\n atom mapping. For convenience the states are called \"old\" and \"new\"\n respectively, defining the starting and end states along the alchemical\n transformation.\n\n The input systems are assumed to have:\n 1. The total number of molecules\n 2. The same coordinates for equivalent atoms\n\n Atoms in the resulting hybrid system are treated as being from one\n of four possible types:\n\n unique_old_atom : These atoms are not mapped and only present in the old\n system. Their interactions will be on for lambda=0, off for lambda=1\n unique_new_atom : These atoms are not mapped and only present in the new\n system. Their interactions will be off for lambda=0, on for lambda=1\n core_atom : These atoms are mapped between the two end states, and are\n part of a residue that is changing alchemically. Their interactions\n will be those corresponding to the old system at lambda=0, and those\n corresponding to the new system at lambda=1\n environment_atom : These atoms are mapped between the two end states, and\n are not part of a residue undergoing an alchemical change. Their\n interactions are always on and are alchemically unmodified.\n\n Properties\n ----------\n hybrid_system : openmm.System\n The hybrid system for simulation\n new_to_hybrid_atom_map : dict of int : int\n The mapping of new system atoms to hybrid atoms\n old_to_hybrid_atom_map : dict of int : int\n The mapping of old system atoms to hybrid atoms\n hybrid_positions : [n, 3] np.ndarray\n The positions of the hybrid system\n hybrid_topology : mdtraj.Topology\n The topology of the hybrid system\n omm_hybrid_topology : openmm.app.Topology\n The OpenMM topology object corresponding to the hybrid system\n\n .. warning :: This API is experimental and subject to change.\n\n Notes\n -----\n * Logging has been removed and will be revamped at a later date.\n * The ability to define custom functions has been removed for now.\n * Neglected angle terms have been removed for now.\n * RMSD restraint option has been removed for now.\n * Endstate support has been removed for now.\n * Bond softening has been removed for now.\n * Unused InteractionGroup code paths have been removed.\n\n TODO\n ----\n * Document how positions for hybrid system are constructed.\n * Allow support for annealing in omitted terms.\n * Implement omitted terms (this was not available in the original class).\n\n \"\"\"\n\n def __init__(self,\n old_system, old_positions, old_topology,\n new_system, new_positions, new_topology,\n old_to_new_atom_map, old_to_new_core_atom_map,\n use_dispersion_correction=False,\n softcore_alpha=0.5,\n softcore_LJ_v2=True,\n softcore_LJ_v2_alpha=0.85,\n softcore_electrostatics=True,\n softcore_electrostatics_alpha=0.3,\n softcore_sigma_Q=1.0,\n interpolate_old_and_new_14s=False,\n flatten_torsions=False,\n **kwargs):\n \"\"\"\n Initialize the Hybrid topology factory.\n\n Parameters\n ----------\n old_system : openmm.System\n OpenMM system defining the \"old\" (i.e. starting) state.\n old_positions : [n,3] np.ndarray of float\n The positions of the \"old system\".\n old_topology : openmm.Topology\n OpenMM topology defining the \"old\" state.\n new_system: opemm.System\n OpenMM system defining the \"new\" (i.e. end) state.\n new_positions : [m,3] np.ndarray of float\n The positions of the \"new system\"\n new_topology : openmm.Topology\n OpenMM topology defining the \"new\" state.\n old_to_new_atom_map : dict of int : int\n Dictionary of corresponding atoms between the old and new systems.\n Unique atoms are not included in this atom map.\n old_to_new_core_atom_map : dict of int : int\n Dictionary of corresponding atoms between the alchemical \"core\n atoms\" (i.e. residues which are changing) between the old and\n new systems.\n use_dispersion_correction : bool, default False\n Whether to use the long range correction in the custom sterics\n force. This can be very expensive for NCMC.\n softcore_alpha: float, default None\n \"alpha\" parameter of softcore sterics, default 0.5.\n softcore_LJ_v2 : bool, default True\n Implement the softcore LJ as defined by Gapsys et al. JCTC 2012.\n softcore_LJ_v2_alpha : float, default 0.85\n Softcore alpha parameter for LJ v2\n softcore_electrostatics : bool, default True\n Use softcore electrostatics as defined by Gapsys et al. JCTC 2021.\n softcore_electrostatics_alpha : float, default 0.3\n Softcore alpha parameter for softcore electrostatics.\n softcore_sigma_Q : float, default 1.0\n Softcore sigma parameter for softcore electrostatics.\n interpolate_old_and_new_14s : bool, default False\n Whether to turn off interactions for new exceptions (not just\n 1,4s) at lambda = 0 and old exceptions at lambda = 1; if False,\n they are present in the nonbonded force.\n flatten_torsions : bool, default False\n If True, torsion terms involving `unique_new_atoms` will be\n scaled such that at lambda=0,1, the torsion term is turned off/on\n respectively. The opposite is true for `unique_old_atoms`.\n \"\"\"\n\n # Assign system positions and force\n # IA - Are deep copies really needed here?\n self._old_system = copy.deepcopy(old_system)\n self._old_positions = old_positions\n self._old_topology = old_topology\n self._new_system = copy.deepcopy(new_system)\n self._new_positions = new_positions\n self._new_topology = new_topology\n self._hybrid_system_forces = dict()\n\n # Set mappings (full, core, and env maps)\n self._set_mappings(old_to_new_atom_map, old_to_new_core_atom_map)\n\n # Other options\n self._use_dispersion_correction = use_dispersion_correction\n self._interpolate_14s = interpolate_old_and_new_14s\n self._flatten_torsions = flatten_torsions\n\n # Sofcore options\n self._softcore_alpha = softcore_alpha\n self._check_bounds(softcore_alpha, \"softcore_alpha\") # [0,1] check\n\n self._softcore_LJ_v2 = softcore_LJ_v2\n if self._softcore_LJ_v2:\n self._check_bounds(softcore_LJ_v2_alpha, \"softcore_LJ_v2_alpha\")\n self._softcore_LJ_v2_alpha = softcore_LJ_v2_alpha\n\n self._softcore_electrostatics = softcore_electrostatics\n if self._softcore_electrostatics:\n self._softcore_electrostatics_alpha = softcore_electrostatics_alpha\n self._check_bounds(softcore_electrostatics_alpha,\n \"softcore_electrostatics_alpha\")\n self._softcore_sigma_Q = softcore_sigma_Q\n self._check_bounds(softcore_sigma_Q, \"softcore_sigma_Q\")\n\n # TODO: end __init__ here and move everything else to\n # create_hybrid_system() or equivalent\n\n self._check_and_store_system_forces()\n\n logger.info(\"creating hybrid system\")\n # Create empty system that will become the hybrid system\n self._hybrid_system = openmm.System()\n\n # Add particles to system\n self._add_particles()\n\n # Add box + barostat\n self._handle_box()\n\n # Assign atoms to one of the classes described in the class docstring\n # Renamed from original _determine_atom_classes\n self._set_atom_classes()\n\n # Construct dictionary of exceptions in old and new systems\n self._old_system_exceptions = self._generate_dict_from_exceptions(\n self._old_system_forces['NonbondedForce'])\n self._new_system_exceptions = self._generate_dict_from_exceptions(\n self._new_system_forces['NonbondedForce'])\n\n # check for exceptions clashes between unique and env atoms\n self._validate_disjoint_sets()\n\n logger.info(\"setting force field terms\")\n # Copy constraints, checking to make sure they are not changing\n self._handle_constraints()\n\n # Copy over relevant virtual sites - pick up refactor from here\n self._handle_virtual_sites()\n\n # TODO - move to a single method call? Would be good to group these\n # Call each of the force methods to add the corresponding force terms\n # and prepare the forces:\n self._add_bond_force_terms()\n\n self._add_angle_force_terms()\n\n self._add_torsion_force_terms()\n\n has_nonbonded_force = ('NonbondedForce' in self._old_system_forces or\n 'NonbondedForce' in self._new_system_forces)\n\n if has_nonbonded_force:\n self._add_nonbonded_force_terms()\n\n # Call each force preparation method to generate the actual\n # interactions that we need:\n logger.info(\"adding forces\")\n self._handle_harmonic_bonds()\n\n self._handle_harmonic_angles()\n\n self._handle_periodic_torsion_force()\n\n if has_nonbonded_force:\n self._handle_nonbonded()\n if not (len(self._old_system_exceptions.keys()) == 0 and\n len(self._new_system_exceptions.keys()) == 0):\n self._handle_old_new_exceptions()\n\n # Get positions for the hybrid\n self._hybrid_positions = self._compute_hybrid_positions()\n\n # Get an MDTraj topology for writing\n self._hybrid_topology = self._create_mdtraj_topology()\n logger.info(\"DONE\")\n\n @staticmethod\n def _check_bounds(value, varname, minmax=(0, 1)):\n \"\"\"\n Convenience method to check the bounds of a value.\n\n Parameters\n ----------\n value : float\n Value to evaluate.\n varname : str\n Name of value to raise in error message\n minmax : tuple\n Two element tuple with the lower and upper bounds to check.\n\n Raises\n ------\n AssertionError\n If value is lower or greater than bounds.\n \"\"\"\n if value < minmax[0] or value > minmax[1]:\n raise AssertionError(f\"{varname} is not in {minmax}\")\n\n @staticmethod\n def _invert_dict(dictionary):\n \"\"\"\n Convenience method to invert a dictionary (since we do it so often).\n\n Paramters:\n ----------\n dictionary : dict\n Dictionary you want to invert\n \"\"\"\n return {v: k for k, v in dictionary.items()}\n\n def _set_mappings(self, old_to_new_map, core_old_to_new_map):\n \"\"\"\n Parameters\n ----------\n old_to_new_map : dict of int : int\n Dictionary mapping atoms between the old and new systems.\n\n Notes\n -----\n * For now this directly sets the system, core and env old_to_new_map,\n new_to_old_map, an empty new_to_hybrid_map and an empty\n old_to_hybrid_map. In the future this will be moved to the one\n dictionary to make things a lot less confusing.\n \"\"\"\n self._old_to_new_map = old_to_new_map\n self._core_old_to_new_map = core_old_to_new_map\n self._new_to_old_map = self._invert_dict(old_to_new_map)\n self._core_new_to_old_map = self._invert_dict(core_old_to_new_map)\n self._old_to_hybrid_map = {}\n self._new_to_hybrid_map = {}\n\n # Get unique atoms\n # old system first\n self._unique_old_atoms = []\n for particle_idx in range(self._old_system.getNumParticles()):\n if particle_idx not in self._old_to_new_map.keys():\n self._unique_old_atoms.append(particle_idx)\n\n self._unique_new_atoms = []\n for particle_idx in range(self._new_system.getNumParticles()):\n if particle_idx not in self._new_to_old_map.keys():\n self._unique_new_atoms.append(particle_idx)\n\n # Get env atoms (i.e. atoms mapped not in core)\n self._env_old_to_new_map = {}\n for key, value in old_to_new_map.items():\n if key not in self._core_old_to_new_map.keys():\n self._env_old_to_new_map[key] = value\n\n self._env_new_to_old_map = self._invert_dict(self._env_old_to_new_map)\n\n # IA - Internal check for now (move to test later)\n num_env = len(self._env_old_to_new_map.keys())\n num_core = len(self._core_old_to_new_map.keys())\n num_total = len(self._old_to_new_map.keys())\n assert num_env + num_core == num_total\n\n def _check_and_store_system_forces(self):\n \"\"\"\n Conveniently stores the system forces and checks that no unknown\n forces exist.\n \"\"\"\n\n def _check_unknown_forces(forces, system_name):\n # TODO: double check that CMMotionRemover is ok being here\n known_forces = {'HarmonicBondForce', 'HarmonicAngleForce',\n 'PeriodicTorsionForce', 'NonbondedForce',\n 'MonteCarloBarostat', 'CMMotionRemover'}\n\n force_names = forces.keys()\n unknown_forces = set(force_names) - set(known_forces)\n if unknown_forces:\n errmsg = (f\"Unknown forces {unknown_forces} encountered in \"\n f\"{system_name} system\")\n raise ValueError(errmsg)\n\n # Prepare dicts of forces, which will be useful later\n # TODO: Store this as self._system_forces[name], name in ('old',\n # 'new', 'hybrid') for compactness\n self._old_system_forces = {type(force).__name__: force for force in\n self._old_system.getForces()}\n _check_unknown_forces(self._old_system_forces, 'old')\n self._new_system_forces = {type(force).__name__: force for force in\n self._new_system.getForces()}\n _check_unknown_forces(self._new_system_forces, 'new')\n\n # TODO: check if this is actually used much, otherwise ditch it\n # Get and store the nonbonded method from the system:\n self._nonbonded_method = self._old_system_forces['NonbondedForce'].getNonbondedMethod()\n\n def _add_particles(self):\n \"\"\"\n Adds particles to the hybrid system.\n\n This does not copy over interactions, but does copy over the masses.\n\n Note\n ----\n * If there is a difference in masses between the old and new systems\n the average mass of the two is used.\n\n TODO\n ----\n * Verify if we should just not allow elemental changes, current\n behaviour reflects original perses code.\n \"\"\"\n # Begin by copying all particles in the old system\n for particle_idx in range(self._old_system.getNumParticles()):\n mass_old = self._old_system.getParticleMass(particle_idx)\n\n if particle_idx in self._old_to_new_map.keys():\n particle_idx_new_system = self._old_to_new_map[particle_idx]\n mass_new = self._new_system.getParticleMass(\n particle_idx_new_system)\n # Take the average of the masses if the atom is mapped\n particle_mass = (mass_old + mass_new) / 2\n else:\n particle_mass = mass_old\n\n hybrid_idx = self._hybrid_system.addParticle(particle_mass)\n self._old_to_hybrid_map[particle_idx] = hybrid_idx\n\n # If the particle index in question is mapped, make sure to add it\n # to the new to hybrid map as well.\n if particle_idx in self._old_to_new_map.keys():\n self._new_to_hybrid_map[particle_idx_new_system] = hybrid_idx\n\n # Next, add the remaining unique atoms from the new system to the\n # hybrid system and map accordingly.\n for particle_idx in self._unique_new_atoms:\n particle_mass = self._new_system.getParticleMass(particle_idx)\n hybrid_idx = self._hybrid_system.addParticle(particle_mass)\n self._new_to_hybrid_map[particle_idx] = hybrid_idx\n\n # Create the opposite atom maps for later use (nonbonded processing)\n self._hybrid_to_old_map = self._invert_dict(self._old_to_hybrid_map)\n self._hybrid_to_new_map = self._invert_dict(self._new_to_hybrid_map)\n\n def _handle_box(self):\n \"\"\"\n Copies over the barostat and box vectors as necessary.\n \"\"\"\n # Check that if there is a barostat in the old system,\n # it is added to the hybrid system\n if \"MonteCarloBarostat\" in self._old_system_forces.keys():\n barostat = copy.deepcopy(\n self._old_system_forces[\"MonteCarloBarostat\"])\n self._hybrid_system.addForce(barostat)\n\n # Copy over the box vectors from the old system\n box_vectors = self._old_system.getDefaultPeriodicBoxVectors()\n self._hybrid_system.setDefaultPeriodicBoxVectors(*box_vectors)\n\n def _set_atom_classes(self):\n \"\"\"\n This method determines whether each atom belongs to unique old,\n unique new, core, or environment, as defined in the class docstring.\n All indices are indices in the hybrid system.\n \"\"\"\n self._atom_classes = {'unique_old_atoms': set(),\n 'unique_new_atoms': set(),\n 'core_atoms': set(),\n 'environment_atoms': set()}\n\n # First, find the unique old atoms\n for atom_idx in self._unique_old_atoms:\n hybrid_idx = self._old_to_hybrid_map[atom_idx]\n self._atom_classes['unique_old_atoms'].add(hybrid_idx)\n\n # Then the unique new atoms\n for atom_idx in self._unique_new_atoms:\n hybrid_idx = self._new_to_hybrid_map[atom_idx]\n self._atom_classes['unique_new_atoms'].add(hybrid_idx)\n\n # The core atoms:\n core_atoms = []\n for new_idx, old_idx in self._core_new_to_old_map.items():\n new_to_hybrid_idx = self._new_to_hybrid_map[new_idx]\n old_to_hybrid_idx = self._old_to_hybrid_map[old_idx]\n if new_to_hybrid_idx != old_to_hybrid_idx:\n errmsg = (f\"there is an index collision in hybrid indices of \"\n f\"the core atom map: {self._core_new_to_old_map}\")\n raise AssertionError(errmsg)\n core_atoms.append(new_to_hybrid_idx)\n\n # The environment atoms:\n env_atoms = []\n for new_idx, old_idx in self._env_new_to_old_map.items():\n new_to_hybrid_idx = self._new_to_hybrid_map[new_idx]\n old_to_hybrid_idx = self._old_to_hybrid_map[old_idx]\n if new_to_hybrid_idx != old_to_hybrid_idx:\n errmsg = (f\"there is an index collion in hybrid indices of \"\n f\"the environment atom map: \"\n f\"{self._env_new_to_old_map}\")\n raise AssertionError(errmsg)\n env_atoms.append(new_to_hybrid_idx)\n\n # TODO - this is weirdly done and double assignments - fix\n self._atom_classes['core_atoms'] = set(core_atoms)\n self._atom_classes['environment_atoms'] = set(env_atoms)\n\n @staticmethod\n def _generate_dict_from_exceptions(force):\n \"\"\"\n This is a utility function to generate a dictionary of the form\n (particle1_idx, particle2_idx) : [exception parameters].\n This will facilitate access and search of exceptions.\n\n Parameters\n ----------\n force : openmm.NonbondedForce object\n a force containing exceptions\n\n Returns\n -------\n exceptions_dict : dict\n Dictionary of exceptions\n \"\"\"\n exceptions_dict = {}\n\n for exception_index in range(force.getNumExceptions()):\n [index1, index2, chargeProd, sigma, epsilon] = force.getExceptionParameters(exception_index)\n exceptions_dict[(index1, index2)] = [chargeProd, sigma, epsilon]\n\n return exceptions_dict\n\n def _validate_disjoint_sets(self):\n \"\"\"\n Conduct a sanity check to make sure that the hybrid maps of the old\n and new system exception dict keys do not contain both environment\n and unique_old/new atoms.\n\n TODO: repeated code - condense\n \"\"\"\n for old_indices in self._old_system_exceptions.keys():\n hybrid_indices = (self._old_to_hybrid_map[old_indices[0]],\n self._old_to_hybrid_map[old_indices[1]])\n old_env_intersection = set(old_indices).intersection(\n self._atom_classes['environment_atoms'])\n if old_env_intersection:\n if set(old_indices).intersection(\n self._atom_classes['unique_old_atoms']\n ):\n errmsg = (f\"old index exceptions {old_indices} include \"\n \"unique old and environment atoms, which is \"\n \"disallowed\")\n raise AssertionError(errmsg)\n\n for new_indices in self._new_system_exceptions.keys():\n hybrid_indices = (self._new_to_hybrid_map[new_indices[0]],\n self._new_to_hybrid_map[new_indices[1]])\n new_env_intersection = set(hybrid_indices).intersection(\n self._atom_classes['environment_atoms'])\n if new_env_intersection:\n if set(hybrid_indices).intersection(\n self._atom_classes['unique_new_atoms']\n ):\n errmsg = (f\"new index exceptions {new_indices} include \"\n \"unique new and environment atoms, which is \"\n \"dissallowed\")\n raise AssertionError\n\n def _handle_constraints(self):\n \"\"\"\n This method adds relevant constraints from the old and new systems.\n\n First, all constraints from the old systenm are added.\n Then, constraints to atoms unique to the new system are added.\n\n TODO: condense duplicated code\n \"\"\"\n # lengths of constraints already added\n constraint_lengths = dict()\n\n # old system\n hybrid_map = self._old_to_hybrid_map\n for const_idx in range(self._old_system.getNumConstraints()):\n at1, at2, length = self._old_system.getConstraintParameters(\n const_idx)\n hybrid_atoms = tuple(sorted([hybrid_map[at1], hybrid_map[at2]]))\n if hybrid_atoms not in constraint_lengths.keys():\n self._hybrid_system.addConstraint(hybrid_atoms[0],\n hybrid_atoms[1], length)\n constraint_lengths[hybrid_atoms] = length\n else:\n\n if constraint_lengths[hybrid_atoms] != length:\n raise AssertionError('constraint length is changing')\n\n # new system\n hybrid_map = self._new_to_hybrid_map\n for const_idx in range(self._new_system.getNumConstraints()):\n at1, at2, length = self._new_system.getConstraintParameters(\n const_idx)\n hybrid_atoms = tuple(sorted([hybrid_map[at1], hybrid_map[at2]]))\n if hybrid_atoms not in constraint_lengths.keys():\n self._hybrid_system.addConstraint(hybrid_atoms[0],\n hybrid_atoms[1], length)\n constraint_lengths[hybrid_atoms] = length\n else:\n if constraint_lengths[hybrid_atoms] != length:\n raise AssertionError('constraint length is changing')\n\n def _handle_virtual_sites(self):\n \"\"\"\n Ensure that all virtual sites in old and new system are copied over to\n the hybrid system. Note that we do not support virtual sites in the\n changing region.\n\n TODO - remerge into a single loop\n TODO - check that it's fine to double count here (even so, there's\n an optimisation that could be done here...)\n \"\"\"\n # old system\n # Loop through virtual sites\n for particle_idx in range(self._old_system.getNumParticles()):\n if self._old_system.isVirtualSite(particle_idx):\n # If it's a virtual site, make sure it is not in the unique or\n # core atoms, since this is currently unsupported\n hybrid_idx = self._old_to_hybrid_map[particle_idx]\n if hybrid_idx not in self._atom_classes['environment_atoms']:\n errmsg = (\"Virtual sites in changing residue are \"\n \"unsupported.\")\n raise ValueError(errmsg)\n else:\n virtual_site = self._old_system.getVirtualSite(\n particle_idx)\n self._hybrid_system.setVirtualSite(hybrid_idx,\n virtual_site)\n\n # new system\n # Loop through virtual sites\n for particle_idx in range(self._new_system.getNumParticles()):\n if self._new_system.isVirtualSite(particle_idx):\n # If it's a virtual site, make sure it is not in the unique or\n # core atoms, since this is currently unsupported\n hybrid_idx = self._new_to_hybrid_map[particle_idx]\n if hybrid_idx not in self._atom_classes['environment_atoms']:\n errmsg = (\"Virtual sites in changing residue are \"\n \"unsupported.\")\n raise ValueError(errmsg)\n else:\n virtual_site = self._new_system.getVirtualSite(\n particle_idx)\n self._hybrid_system.setVirtualSite(hybrid_idx,\n virtual_site)\n\n def _add_bond_force_terms(self):\n \"\"\"\n This function adds the appropriate bond forces to the system\n (according to groups defined in the main class docstring). Note that\n it does _not_ add the particles to the force. It only adds the force\n to facilitate another method adding the particles to the force.\n\n Notes\n -----\n * User defined functions have been removed for now.\n \"\"\"\n core_energy_expression = '(K/2)*(r-length)^2;'\n # linearly interpolate spring constant\n core_energy_expression += 'K = (1-lambda_bonds)*K1 + lambda_bonds*K2;'\n # linearly interpolate bond length\n core_energy_expression += 'length = (1-lambda_bonds)*length1 + lambda_bonds*length2;'\n\n # Create the force and add the relevant parameters\n custom_core_force = openmm.CustomBondForce(core_energy_expression)\n custom_core_force.addPerBondParameter('length1') # old bond length\n custom_core_force.addPerBondParameter('K1') # old spring constant\n custom_core_force.addPerBondParameter('length2') # new bond length\n custom_core_force.addPerBondParameter('K2') # new spring constant\n\n custom_core_force.addGlobalParameter('lambda_bonds', 0.0)\n\n self._hybrid_system.addForce(custom_core_force)\n self._hybrid_system_forces['core_bond_force'] = custom_core_force\n\n # Add a bond force for environment and unique atoms (bonds are never\n # scaled for these):\n standard_bond_force = openmm.HarmonicBondForce()\n self._hybrid_system.addForce(standard_bond_force)\n self._hybrid_system_forces['standard_bond_force'] = standard_bond_force\n\n def _add_angle_force_terms(self):\n \"\"\"\n This function adds the appropriate angle force terms to the hybrid\n system. It does not add particles or parameters to the force; this is\n done elsewhere.\n\n Notes\n -----\n * User defined functions have been removed for now.\n * Neglected angle terms have been removed for now.\n \"\"\"\n energy_expression = '(K/2)*(theta-theta0)^2;'\n # linearly interpolate spring constant\n energy_expression += 'K = (1.0-lambda_angles)*K_1 + lambda_angles*K_2;'\n # linearly interpolate equilibrium angle\n energy_expression += 'theta0 = (1.0-lambda_angles)*theta0_1 + lambda_angles*theta0_2;'\n\n # Create the force and add relevant parameters\n custom_core_force = openmm.CustomAngleForce(energy_expression)\n # molecule1 equilibrium angle\n custom_core_force.addPerAngleParameter('theta0_1')\n # molecule1 spring constant\n custom_core_force.addPerAngleParameter('K_1')\n # molecule2 equilibrium angle\n custom_core_force.addPerAngleParameter('theta0_2')\n # molecule2 spring constant\n custom_core_force.addPerAngleParameter('K_2')\n\n custom_core_force.addGlobalParameter('lambda_angles', 0.0)\n\n # Add the force to the system and the force dict.\n self._hybrid_system.addForce(custom_core_force)\n self._hybrid_system_forces['core_angle_force'] = custom_core_force\n\n # Add an angle term for environment/unique interactions -- these are\n # never scaled\n standard_angle_force = openmm.HarmonicAngleForce()\n self._hybrid_system.addForce(standard_angle_force)\n self._hybrid_system_forces['standard_angle_force'] = standard_angle_force\n\n def _add_torsion_force_terms(self):\n \"\"\"\n This function adds the appropriate PeriodicTorsionForce terms to the\n system. Core torsions are interpolated, while environment and unique\n torsions are always on.\n\n Notes\n -----\n * User defined functions have been removed for now.\n * Options for add_custom_core_force (default True) and\n add_unique_atom_torsion_force (default True) have been removed for\n now.\n \"\"\"\n energy_expression = '(1-lambda_torsions)*U1 + lambda_torsions*U2;'\n energy_expression += 'U1 = K1*(1+cos(periodicity1*theta-phase1));'\n energy_expression += 'U2 = K2*(1+cos(periodicity2*theta-phase2));'\n\n # Create the force and add the relevant parameters\n custom_core_force = openmm.CustomTorsionForce(energy_expression)\n # molecule1 periodicity\n custom_core_force.addPerTorsionParameter('periodicity1')\n # molecule1 phase\n custom_core_force.addPerTorsionParameter('phase1')\n # molecule1 spring constant\n custom_core_force.addPerTorsionParameter('K1')\n # molecule2 periodicity\n custom_core_force.addPerTorsionParameter('periodicity2')\n # molecule2 phase\n custom_core_force.addPerTorsionParameter('phase2')\n # molecule2 spring constant\n custom_core_force.addPerTorsionParameter('K2')\n\n custom_core_force.addGlobalParameter('lambda_torsions', 0.0)\n\n # Add the force to the system\n self._hybrid_system.addForce(custom_core_force)\n self._hybrid_system_forces['custom_torsion_force'] = custom_core_force\n\n # Create and add the torsion term for unique/environment atoms\n unique_atom_torsion_force = openmm.PeriodicTorsionForce()\n self._hybrid_system.addForce(unique_atom_torsion_force)\n self._hybrid_system_forces['unique_atom_torsion_force'] = unique_atom_torsion_force\n\n @staticmethod\n def _nonbonded_custom(v2):\n \"\"\"\n Get a part of the nonbonded energy expression when there is no cutoff.\n\n Parameters\n ----------\n v2 : bool\n Whether to use the softcore methods as defined by Gapsys et al.\n JCTC 2012.\n\n Returns\n -------\n sterics_energy_expression : str\n The energy expression for U_sterics\n electrostatics_energy_expression : str\n The energy expression for electrostatics\n\n TODO\n ----\n * Move to a dictionary or equivalent.\n \"\"\"\n # Soft-core Lennard-Jones\n if v2:\n sterics_energy_expression = \"U_sterics = select(step(r - r_LJ), 4*epsilon*x*(x-1.0), U_sterics_quad);\"\n sterics_energy_expression += \"U_sterics_quad = Force*(((r - r_LJ)^2)/2 - (r - r_LJ)) + U_sterics_cut;\"\n sterics_energy_expression += \"U_sterics_cut = 4*epsilon*((sigma/r_LJ)^6)*(((sigma/r_LJ)^6) - 1.0);\"\n sterics_energy_expression += \"Force = -4*epsilon*((-12*sigma^12)/(r_LJ^13) + (6*sigma^6)/(r_LJ^7));\"\n sterics_energy_expression += \"x = (sigma/r)^6;\"\n sterics_energy_expression += \"r_LJ = softcore_alpha*((26/7)*(sigma^6)*lambda_sterics_deprecated)^(1/6);\"\n sterics_energy_expression += \"lambda_sterics_deprecated = new_interaction*(1.0 - lambda_sterics_insert) + old_interaction*lambda_sterics_delete;\"\n else:\n sterics_energy_expression = \"U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/reff_sterics)^6;\"\n\n return sterics_energy_expression\n\n @staticmethod\n def _nonbonded_custom_sterics_common():\n \"\"\"\n Get a custom sterics expression using amber softcore expression\n\n Returns\n -------\n sterics_addition : str\n The common softcore sterics energy expression\n\n TODO\n ----\n * Move to a dictionary or equivalent.\n \"\"\"\n # interpolation\n sterics_addition = \"epsilon = (1-lambda_sterics)*epsilonA + lambda_sterics*epsilonB;\"\n # effective softcore distance for sterics\n sterics_addition += \"reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);\"\n sterics_addition += \"sigma = (1-lambda_sterics)*sigmaA + lambda_sterics*sigmaB;\"\n\n sterics_addition += \"lambda_alpha = new_interaction*(1-lambda_sterics_insert) + old_interaction*lambda_sterics_delete;\"\n sterics_addition += \"lambda_sterics = core_interaction*lambda_sterics_core + new_interaction*lambda_sterics_insert + old_interaction*lambda_sterics_delete;\"\n sterics_addition += \"core_interaction = delta(unique_old1+unique_old2+unique_new1+unique_new2);new_interaction = max(unique_new1, unique_new2);old_interaction = max(unique_old1, unique_old2);\"\n\n return sterics_addition\n\n @staticmethod\n def _nonbonded_custom_mixing_rules():\n \"\"\"\n Mixing rules for the custom nonbonded force.\n\n Returns\n -------\n sterics_mixing_rules : str\n The mixing expression for sterics\n electrostatics_mixing_rules : str\n The mixiing rules for electrostatics\n\n TODO\n ----\n * Move to a dictionary or equivalent.\n \"\"\"\n # Define mixing rules.\n # mixing rule for epsilon\n sterics_mixing_rules = \"epsilonA = sqrt(epsilonA1*epsilonA2);\"\n # mixing rule for epsilon\n sterics_mixing_rules += \"epsilonB = sqrt(epsilonB1*epsilonB2);\"\n # mixing rule for sigma\n sterics_mixing_rules += \"sigmaA = 0.5*(sigmaA1 + sigmaA2);\"\n # mixing rule for sigma\n sterics_mixing_rules += \"sigmaB = 0.5*(sigmaB1 + sigmaB2);\"\n return sterics_mixing_rules\n\n @staticmethod\n def _translate_nonbonded_method_to_custom(standard_nonbonded_method):\n \"\"\"\n Utility function to translate the nonbonded method enum from the\n standard nonbonded force to the custom version\n `CutoffPeriodic`, `PME`, and `Ewald` all become `CutoffPeriodic`;\n `NoCutoff` becomes `NoCutoff`; `CutoffNonPeriodic` becomes\n `CutoffNonPeriodic`\n\n Parameters\n ----------\n standard_nonbonded_method : openmm.NonbondedForce.NonbondedMethod\n the nonbonded method of the standard force\n\n Returns\n -------\n custom_nonbonded_method : openmm.CustomNonbondedForce.NonbondedMethod\n the nonbonded method for the equivalent customnonbonded force\n \"\"\"\n if standard_nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic,\n openmm.NonbondedForce.PME,\n openmm.NonbondedForce.Ewald]:\n return openmm.CustomNonbondedForce.CutoffPeriodic\n elif standard_nonbonded_method == openmm.NonbondedForce.NoCutoff:\n return openmm.CustomNonbondedForce.NoCutoff\n elif standard_nonbonded_method == openmm.NonbondedForce.CutoffNonPeriodic:\n return openmm.CustomNonbondedForce.CutoffNonPeriodic\n else:\n errmsg = \"This nonbonded method is not supported.\"\n raise NotImplementedError(errmsg)\n\n def _add_nonbonded_force_terms(self):\n \"\"\"\n Add the nonbonded force terms to the hybrid system. Note that as with\n the other forces, this method does not add any interactions. It only\n sets up the forces.\n\n Notes\n -----\n * User defined functions have been removed for now.\n * Argument `add_custom_sterics_force` (default True) has been removed\n for now.\n\n TODO\n ----\n * Move nonbonded_method defn here to avoid just setting it globally\n and polluting `self`.\n \"\"\"\n # Add a regular nonbonded force for all interactions that are not\n # changing.\n standard_nonbonded_force = openmm.NonbondedForce()\n self._hybrid_system.addForce(standard_nonbonded_force)\n self._hybrid_system_forces['standard_nonbonded_force'] = standard_nonbonded_force\n\n # Create a CustomNonbondedForce to handle alchemically interpolated\n # nonbonded parameters.\n # Select functional form based on nonbonded method.\n # TODO: check _nonbonded_custom_ewald and _nonbonded_custom_cutoff\n # since they take arguments that are never used...\n if self._nonbonded_method in [openmm.NonbondedForce.NoCutoff]:\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n elif self._nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic,\n openmm.NonbondedForce.CutoffNonPeriodic]:\n epsilon_solvent = self._old_system_forces['NonbondedForce'].getReactionFieldDielectric()\n r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n standard_nonbonded_force.setReactionFieldDielectric(\n epsilon_solvent)\n standard_nonbonded_force.setCutoffDistance(r_cutoff)\n elif self._nonbonded_method in [openmm.NonbondedForce.PME,\n openmm.NonbondedForce.Ewald]:\n [alpha_ewald, nx, ny, nz] = self._old_system_forces['NonbondedForce'].getPMEParameters()\n delta = self._old_system_forces['NonbondedForce'].getEwaldErrorTolerance()\n r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()\n sterics_energy_expression = self._nonbonded_custom(\n self._softcore_LJ_v2)\n standard_nonbonded_force.setPMEParameters(alpha_ewald, nx, ny, nz)\n standard_nonbonded_force.setEwaldErrorTolerance(delta)\n standard_nonbonded_force.setCutoffDistance(r_cutoff)\n else:\n errmsg = f\"Nonbonded method {self._nonbonded_method} not supported\"\n raise ValueError(errmsg)\n\n standard_nonbonded_force.setNonbondedMethod(self._nonbonded_method)\n\n sterics_energy_expression += self._nonbonded_custom_sterics_common()\n\n sterics_mixing_rules = self._nonbonded_custom_mixing_rules()\n\n custom_nonbonded_method = self._translate_nonbonded_method_to_custom(\n self._nonbonded_method)\n\n total_sterics_energy = \"U_sterics;\" + sterics_energy_expression + sterics_mixing_rules\n\n sterics_custom_nonbonded_force = openmm.CustomNonbondedForce(\n total_sterics_energy)\n\n if self._softcore_LJ_v2:\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_LJ_v2_alpha)\n else:\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_alpha)\n\n # Lennard-Jones sigma initial\n sterics_custom_nonbonded_force.addPerParticleParameter(\"sigmaA\")\n # Lennard-Jones epsilon initial\n sterics_custom_nonbonded_force.addPerParticleParameter(\"epsilonA\")\n # Lennard-Jones sigma final\n sterics_custom_nonbonded_force.addPerParticleParameter(\"sigmaB\")\n # Lennard-Jones epsilon final\n sterics_custom_nonbonded_force.addPerParticleParameter(\"epsilonB\")\n # 1 = hybrid old atom, 0 otherwise\n sterics_custom_nonbonded_force.addPerParticleParameter(\"unique_old\")\n # 1 = hybrid new atom, 0 otherwise\n sterics_custom_nonbonded_force.addPerParticleParameter(\"unique_new\")\n\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_core\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_electrostatics_core\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_insert\", 0.0)\n sterics_custom_nonbonded_force.addGlobalParameter(\n \"lambda_sterics_delete\", 0.0)\n\n sterics_custom_nonbonded_force.setNonbondedMethod(\n custom_nonbonded_method)\n\n self._hybrid_system.addForce(sterics_custom_nonbonded_force)\n self._hybrid_system_forces['core_sterics_force'] = sterics_custom_nonbonded_force\n\n # Set the use of dispersion correction to be the same between the new\n # nonbonded force and the old one:\n if self._old_system_forces['NonbondedForce'].getUseDispersionCorrection():\n self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(True)\n if self._use_dispersion_correction:\n sterics_custom_nonbonded_force.setUseLongRangeCorrection(True)\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(False)\n\n if self._old_system_forces['NonbondedForce'].getUseSwitchingFunction():\n switching_distance = self._old_system_forces['NonbondedForce'].getSwitchingDistance()\n standard_nonbonded_force.setUseSwitchingFunction(True)\n standard_nonbonded_force.setSwitchingDistance(switching_distance)\n sterics_custom_nonbonded_force.setUseSwitchingFunction(True)\n sterics_custom_nonbonded_force.setSwitchingDistance(switching_distance)\n else:\n standard_nonbonded_force.setUseSwitchingFunction(False)\n sterics_custom_nonbonded_force.setUseSwitchingFunction(False)\n\n @staticmethod\n def _find_bond_parameters(bond_force, index1, index2):\n \"\"\"\n This is a convenience function to find bond parameters in another\n system given the two indices.\n\n Parameters\n ----------\n bond_force : openmm.HarmonicBondForce\n The bond force where the parameters should be found\n index1 : int\n Index1 (order does not matter) of the bond atoms\n index2 : int\n Index2 (order does not matter) of the bond atoms\n\n Returns\n -------\n bond_parameters : list\n List of relevant bond parameters\n \"\"\"\n index_set = {index1, index2}\n # Loop through all the bonds:\n for bond_index in range(bond_force.getNumBonds()):\n parms = bond_force.getBondParameters(bond_index)\n if index_set == {parms[0], parms[1]}:\n return parms\n\n return []\n\n def _handle_harmonic_bonds(self):\n \"\"\"\n This method adds the appropriate interaction for all bonds in the\n hybrid system. The scheme used is:\n\n 1) If the two atoms are both in the core, then we add to the\n CustomBondForce and interpolate between the two parameters\n 2) If one of the atoms is in core and the other is environment, we\n have to assert that the bond parameters do not change between the\n old and the new system; then, the parameters are added to the\n regular bond force\n 3) Otherwise, we add the bond to a regular bond force.\n\n Notes\n -----\n * Bond softening logic has been removed for now.\n \"\"\"\n old_system_bond_force = self._old_system_forces['HarmonicBondForce']\n new_system_bond_force = self._new_system_forces['HarmonicBondForce']\n\n # First, loop through the old system bond forces and add relevant terms\n for bond_index in range(old_system_bond_force.getNumBonds()):\n # Get each set of bond parameters\n [index1_old, index2_old, r0_old, k_old] = old_system_bond_force.getBondParameters(bond_index)\n\n # Map the indices to the hybrid system, for which our atom classes\n # are defined.\n index1_hybrid = self._old_to_hybrid_map[index1_old]\n index2_hybrid = self._old_to_hybrid_map[index2_old]\n index_set = {index1_hybrid, index2_hybrid}\n\n # Now check if it is a subset of the core atoms (that is, both\n # atoms are in the core)\n # If it is, we need to find the parameters in the old system so\n # that we can interpolate\n if index_set.issubset(self._atom_classes['core_atoms']):\n index1_new = self._old_to_new_map[index1_old]\n index2_new = self._old_to_new_map[index2_old]\n new_bond_parameters = self._find_bond_parameters(\n new_system_bond_force, index1_new, index2_new)\n if not new_bond_parameters:\n r0_new = r0_old\n k_new = 0.0*unit.kilojoule_per_mole/unit.angstrom**2\n else:\n # TODO - why is this being recalculated?\n [index1, index2, r0_new, k_new] = self._find_bond_parameters(\n new_system_bond_force, index1_new, index2_new)\n self._hybrid_system_forces['core_bond_force'].addBond(\n index1_hybrid, index2_hybrid,\n [r0_old, k_old, r0_new, k_new])\n\n # Check if the index set is a subset of anything besides\n # environment (in the case of environment, we just add the bond to\n # the regular bond force)\n # that would mean that this bond is core-unique_old or\n # unique_old-unique_old\n # NOTE - These are currently all the same because we don't soften\n # TODO - work these out somewhere else, this is terribly difficult\n # to understand logic.\n elif (index_set.issubset(self._atom_classes['unique_old_atoms']) or\n (len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 1\n and len(index_set.intersection(self._atom_classes['core_atoms'])) == 1)):\n\n # We can just add it to the regular bond force.\n self._hybrid_system_forces['standard_bond_force'].addBond(\n index1_hybrid, index2_hybrid, r0_old, k_old)\n\n elif (len(index_set.intersection(self._atom_classes['environment_atoms'])) == 1 and\n len(index_set.intersection(self._atom_classes['core_atoms'])) == 1):\n self._hybrid_system_forces['standard_bond_force'].addBond(\n index1_hybrid, index2_hybrid, r0_old, k_old)\n\n # Otherwise, we just add the same parameters as those in the old\n # system (these are environment atoms, and the parameters are the\n # same)\n elif index_set.issubset(self._atom_classes['environment_atoms']):\n self._hybrid_system_forces['standard_bond_force'].addBond(\n index1_hybrid, index2_hybrid, r0_old, k_old)\n else:\n errmsg = (f\"hybrid index set {index_set} does not fit into a \"\n \"canonical atom type\")\n raise ValueError(errmsg)\n\n # Now loop through the new system to get the interactions that are\n # unique to it.\n for bond_index in range(new_system_bond_force.getNumBonds()):\n # Get each set of bond parameters\n [index1_new, index2_new, r0_new, k_new] = new_system_bond_force.getBondParameters(bond_index)\n\n # Convert indices to hybrid, since that is how we represent atom classes:\n index1_hybrid = self._new_to_hybrid_map[index1_new]\n index2_hybrid = self._new_to_hybrid_map[index2_new]\n index_set = {index1_hybrid, index2_hybrid}\n\n # If the intersection of this set and unique new atoms contains\n # anything, the bond is unique to the new system and must be added\n # all other bonds in the new system have been accounted for already\n # NOTE - These are mostly all the same because we don't soften\n if (len(index_set.intersection(self._atom_classes['unique_new_atoms'])) == 2 or\n (len(index_set.intersection(self._atom_classes['unique_new_atoms'])) == 1 and\n len(index_set.intersection(self._atom_classes['core_atoms'])) == 1)):\n\n # If we aren't softening bonds, then just add it to the standard bond force\n self._hybrid_system_forces['standard_bond_force'].addBond(\n index1_hybrid, index2_hybrid, r0_new, k_new)\n\n # If the bond is in the core, it has probably already been added\n # in the above loop. However, there are some circumstances\n # where it was not (closing a ring). In that case, the bond has\n # not been added and should be added here.\n # This has some peculiarities to be discussed...\n # TODO - Work out what the above peculiarities are...\n elif index_set.issubset(self._atom_classes['core_atoms']):\n if not self._find_bond_parameters(\n self._hybrid_system_forces['core_bond_force'],\n index1_hybrid, index2_hybrid):\n r0_old = r0_new\n k_old = 0.0*unit.kilojoule_per_mole/unit.angstrom**2\n self._hybrid_system_forces['core_bond_force'].addBond(\n index1_hybrid, index2_hybrid,\n [r0_old, k_old, r0_new, k_new])\n elif index_set.issubset(self._atom_classes['environment_atoms']):\n # Already been added\n pass\n\n elif (len(index_set.intersection(self._atom_classes['environment_atoms'])) == 1 and\n len(index_set.intersection(self._atom_classes['core_atoms'])) == 1):\n pass\n\n else:\n errmsg = (f\"hybrid index set {index_set} does not fit into a \"\n \"canonical atom type\")\n raise ValueError(errmsg)\n\n @staticmethod\n def _find_angle_parameters(angle_force, indices):\n \"\"\"\n Convenience function to find the angle parameters corresponding to a\n particular set of indices\n\n Parameters\n ----------\n angle_force : openmm.HarmonicAngleForce\n The force where the angle of interest may be found.\n indices : list of int\n The indices (any order) of the angle atoms\n\n Returns\n -------\n angle_params : list\n list of angle parameters\n \"\"\"\n indices_reversed = indices[::-1]\n\n # Now loop through and try to find the angle:\n for angle_index in range(angle_force.getNumAngles()):\n angle_params = angle_force.getAngleParameters(angle_index)\n\n # Get a set representing the angle indices\n angle_param_indices = angle_params[:3]\n\n if (indices == angle_param_indices or\n indices_reversed == angle_param_indices):\n return angle_params\n return [] # Return empty if no matching angle found\n\n def _handle_harmonic_angles(self):\n \"\"\"\n This method adds the appropriate interaction for all angles in the\n hybrid system. The scheme used, as with bonds, is:\n\n 1) If the three atoms are all in the core, then we add to the\n CustomAngleForce and interpolate between the two parameters\n 2) If the three atoms contain at least one unique new, check if the\n angle is in the neglected new list, and if so, interpolate from\n K_1 = 0; else, if the three atoms contain at least one unique old,\n check if the angle is in the neglected old list, and if so,\n interpolate from K_2 = 0.\n 3) If the angle contains at least one environment and at least one\n core atom, assert there are no unique new atoms and that the angle\n terms are preserved between the new and the old system. Then add to\n the standard angle force.\n 4) Otherwise, we add the angle to a regular angle force since it is\n environment.\n\n Notes\n -----\n * Removed softening and neglected angle functionality\n \"\"\"\n old_system_angle_force = self._old_system_forces['HarmonicAngleForce']\n new_system_angle_force = self._new_system_forces['HarmonicAngleForce']\n\n # First, loop through all the angles in the old system to determine\n # what to do with them. We will only use the\n # custom angle force if all atoms are part of \"core.\" Otherwise, they\n # are either unique to one system or never change.\n for angle_index in range(old_system_angle_force.getNumAngles()):\n\n old_angle_parameters = old_system_angle_force.getAngleParameters(\n angle_index)\n\n # Get the indices in the hybrid system\n hybrid_index_list = [\n self._old_to_hybrid_map[old_atomid] for old_atomid in old_angle_parameters[:3]\n ]\n hybrid_index_set = set(hybrid_index_list)\n\n # If all atoms are in the core, we'll need to find the\n # corresponding parameters in the old system and interpolate\n if hybrid_index_set.issubset(self._atom_classes['core_atoms']):\n # Get the new indices so we can get the new angle parameters\n new_indices = [\n self._old_to_new_map[old_atomid] for old_atomid in old_angle_parameters[:3]\n ]\n new_angle_parameters = self._find_angle_parameters(\n new_system_angle_force, new_indices\n )\n if not new_angle_parameters:\n new_angle_parameters = [\n 0, 0, 0, old_angle_parameters[3],\n 0.0*unit.kilojoule_per_mole/unit.radian**2\n ]\n\n # Add to the hybrid force:\n # the parameters at indices 3 and 4 represent theta0 and k,\n # respectively.\n hybrid_force_parameters = [\n old_angle_parameters[3], old_angle_parameters[4],\n new_angle_parameters[3], new_angle_parameters[4]\n ]\n self._hybrid_system_forces['core_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_force_parameters\n )\n\n # Check if the atoms are neither all core nor all environment,\n # which would mean they involve unique old interactions\n elif not hybrid_index_set.issubset(\n self._atom_classes['environment_atoms']):\n # if there is an environment atom\n if hybrid_index_set.intersection(\n self._atom_classes['environment_atoms']):\n if hybrid_index_set.intersection(\n self._atom_classes['unique_old_atoms']):\n errmsg = \"we disallow unique-environment terms\"\n raise ValueError(errmsg)\n\n self._hybrid_system_forces['standard_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], old_angle_parameters[3],\n old_angle_parameters[4]\n )\n else:\n # There are no env atoms, so we can treat this term\n # appropriately\n\n # We don't soften so just add this to the standard angle\n # force\n self._hybrid_system_forces['standard_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], old_angle_parameters[3],\n old_angle_parameters[4]\n )\n\n # Otherwise, only environment atoms are in this interaction, so\n # add it to the standard angle force\n elif hybrid_index_set.issubset(\n self._atom_classes['environment_atoms']):\n self._hybrid_system_forces['standard_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], old_angle_parameters[3],\n old_angle_parameters[4]\n )\n else:\n errmsg = (f\"handle_harmonic_angles: angle_index {angle_index} \"\n \"does not fit a canonical form.\")\n raise ValueError(errmsg)\n\n # Finally, loop through the new system force to add any unique new\n # angles\n for angle_index in range(new_system_angle_force.getNumAngles()):\n\n new_angle_parameters = new_system_angle_force.getAngleParameters(\n angle_index)\n\n # Get the indices in the hybrid system\n hybrid_index_list = [\n self._new_to_hybrid_map[new_atomid] for new_atomid in new_angle_parameters[:3]\n ]\n hybrid_index_set = set(hybrid_index_list)\n\n # If the intersection of this hybrid set with the unique new atoms\n # is nonempty, it must be added:\n # TODO - there's a ton of len > 0 on sets, empty sets == False,\n # so we can simplify this logic.\n if len(hybrid_index_set.intersection(\n self._atom_classes['unique_new_atoms'])) > 0:\n if hybrid_index_set.intersection(\n self._atom_classes['environment_atoms']):\n errmsg = (\"we disallow angle terms with unique new and \"\n \"environment atoms\")\n raise ValueError(errmsg)\n\n # Not softening just add to the nonalchemical force\n self._hybrid_system_forces['standard_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], new_angle_parameters[3],\n new_angle_parameters[4]\n )\n\n elif hybrid_index_set.issubset(self._atom_classes['core_atoms']):\n if not self._find_angle_parameters(self._hybrid_system_forces['core_angle_force'],\n hybrid_index_list):\n hybrid_force_parameters = [\n new_angle_parameters[3],\n 0.0*unit.kilojoule_per_mole/unit.radian**2,\n new_angle_parameters[3], new_angle_parameters[4]\n ]\n self._hybrid_system_forces['core_angle_force'].addAngle(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_force_parameters\n )\n elif hybrid_index_set.issubset(self._atom_classes['environment_atoms']):\n # We have already added the appropriate environmental atom\n # terms\n pass\n elif hybrid_index_set.intersection(self._atom_classes['environment_atoms']):\n if hybrid_index_set.intersection(self._atom_classes['unique_new_atoms']):\n errmsg = (\"we disallow angle terms with unique new and \"\n \"environment atoms\")\n raise ValueError(errmsg)\n else:\n errmsg = (f\"hybrid index list {hybrid_index_list} does not \"\n \"fit into a canonical atom set\")\n raise ValueError(errmsg)\n\n @staticmethod\n def _find_torsion_parameters(torsion_force, indices):\n \"\"\"\n Convenience function to find the torsion parameters corresponding to a\n particular set of indices.\n\n Parameters\n ----------\n torsion_force : openmm.PeriodicTorsionForce\n torsion force where the torsion of interest may be found\n indices : list of int\n The indices of the atoms of the torsion\n\n Returns\n -------\n torsion_parameters : list\n torsion parameters\n \"\"\"\n indices_reversed = indices[::-1]\n\n torsion_params_list = list()\n\n # Now loop through and try to find the torsion:\n for torsion_idx in range(torsion_force.getNumTorsions()):\n torsion_params = torsion_force.getTorsionParameters(torsion_idx)\n\n # Get a set representing the torsion indices:\n torsion_param_indices = torsion_params[:4]\n\n if (indices == torsion_param_indices or\n indices_reversed == torsion_param_indices):\n torsion_params_list.append(torsion_params)\n\n return torsion_params_list\n\n def _handle_periodic_torsion_force(self):\n \"\"\"\n Handle the torsions defined in the new and old systems as such:\n\n 1. old system torsions will enter the ``custom_torsion_force`` if they\n do not contain ``unique_old_atoms`` and will interpolate from ``on``\n to ``off`` from ``lambda_torsions`` = 0 to 1, respectively.\n 2. new system torsions will enter the ``custom_torsion_force`` if they\n do not contain ``unique_new_atoms`` and will interpolate from\n ``off`` to ``on`` from ``lambda_torsions`` = 0 to 1, respectively.\n 3. old *and* new system torsions will enter the\n ``unique_atom_torsion_force`` (``standard_torsion_force``) and will\n *not* be interpolated.\n\n Notes\n -----\n * Torsion flattening logic has been removed for now.\n \"\"\"\n old_system_torsion_force = self._old_system_forces['PeriodicTorsionForce']\n new_system_torsion_force = self._new_system_forces['PeriodicTorsionForce']\n\n auxiliary_custom_torsion_force = []\n old_custom_torsions_to_standard = []\n\n # We need to keep track of what torsions we added so that we do not\n # double count\n # added_torsions = []\n # TODO: Commented out since this actually isn't being done anywhere?\n # Is it necessary? Should we add this logic back in?\n for torsion_index in range(old_system_torsion_force.getNumTorsions()):\n\n torsion_parameters = old_system_torsion_force.getTorsionParameters(\n torsion_index)\n\n # Get the indices in the hybrid system\n hybrid_index_list = [\n self._old_to_hybrid_map[old_index] for old_index in torsion_parameters[:4]\n ]\n hybrid_index_set = set(hybrid_index_list)\n\n # If all atoms are in the core, we'll need to find the\n # corresponding parameters in the old system and interpolate\n if hybrid_index_set.intersection(self._atom_classes['unique_old_atoms']):\n # Then it goes to a standard force...\n self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n torsion_parameters[4], torsion_parameters[5],\n torsion_parameters[6]\n )\n else:\n # It is a core-only term, an environment-only term, or a\n # core/env term; in any case, it goes to the core torsion_force\n # TODO - why are we even adding the 0.0, 0.0, 0.0 section?\n hybrid_force_parameters = [\n torsion_parameters[4], torsion_parameters[5],\n torsion_parameters[6], 0.0, 0.0, 0.0\n ]\n auxiliary_custom_torsion_force.append(\n [hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n hybrid_force_parameters[:3]]\n )\n\n for torsion_index in range(new_system_torsion_force.getNumTorsions()):\n torsion_parameters = new_system_torsion_force.getTorsionParameters(torsion_index)\n\n # Get the indices in the hybrid system:\n hybrid_index_list = [\n self._new_to_hybrid_map[new_index] for new_index in torsion_parameters[:4]]\n hybrid_index_set = set(hybrid_index_list)\n\n if hybrid_index_set.intersection(self._atom_classes['unique_new_atoms']):\n # Then it goes to the custom torsion force (scaled to zero)\n self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n torsion_parameters[4], torsion_parameters[5],\n torsion_parameters[6]\n )\n else:\n hybrid_force_parameters = [\n 0.0, 0.0, 0.0, torsion_parameters[4],\n torsion_parameters[5], torsion_parameters[6]]\n\n # Check to see if this term is in the olds...\n term = [hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n hybrid_force_parameters[3:]]\n if term in auxiliary_custom_torsion_force:\n # Then this terms has to go to standard and be deleted...\n old_index = auxiliary_custom_torsion_force.index(term)\n old_custom_torsions_to_standard.append(old_index)\n self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n torsion_parameters[4], torsion_parameters[5],\n torsion_parameters[6]\n )\n else:\n # Then this term has to go to the core force...\n self._hybrid_system_forces['custom_torsion_force'].addTorsion(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n hybrid_force_parameters\n )\n\n # Now we have to loop through the aux custom torsion force\n for index in [q for q in range(len(auxiliary_custom_torsion_force))\n if q not in old_custom_torsions_to_standard]:\n terms = auxiliary_custom_torsion_force[index]\n hybrid_index_list = terms[:4]\n hybrid_force_parameters = terms[4] + [0., 0., 0.]\n self._hybrid_system_forces['custom_torsion_force'].addTorsion(\n hybrid_index_list[0], hybrid_index_list[1],\n hybrid_index_list[2], hybrid_index_list[3],\n hybrid_force_parameters\n )\n\n def _handle_nonbonded(self):\n \"\"\"\n Handle the nonbonded interactions defined in the new and old systems.\n\n TODO\n ----\n * Expand this docstring to explain the logic.\n * A lot of this logic is duplicated, probably turn it into a couple of\n functions.\n \"\"\"\n def _check_indices(idx1, idx2):\n if idx1 != idx2:\n errmsg = (\"Attempting to add incorrect particle to hybrid \"\n \"system\")\n raise ValueError(errmsg)\n\n old_system_nonbonded_force = self._old_system_forces['NonbondedForce']\n new_system_nonbonded_force = self._new_system_forces['NonbondedForce']\n hybrid_to_old_map = self._hybrid_to_old_map\n hybrid_to_new_map = self._hybrid_to_new_map\n\n # Define new global parameters for NonbondedForce\n self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_electrostatics_core', 0.0)\n self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_sterics_core', 0.0)\n self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter(\"lambda_electrostatics_delete\", 0.0)\n self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter(\"lambda_electrostatics_insert\", 0.0)\n\n # We have to loop through the particles in the system, because\n # nonbonded force does not accept index\n for particle_index in range(self._hybrid_system.getNumParticles()):\n\n if particle_index in self._atom_classes['unique_old_atoms']:\n # Get the parameters in the old system\n old_index = hybrid_to_old_map[particle_index]\n [charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)\n\n # Add the particle to the hybrid custom sterics and\n # electrostatics.\n # turning off sterics in forward direction\n check_index = self._hybrid_system_forces['core_sterics_force'].addParticle(\n [sigma, epsilon, sigma, 0.0*epsilon, 1, 0]\n )\n _check_indices(particle_index, check_index)\n\n # Add particle to the regular nonbonded force, but\n # Lennard-Jones will be handled by CustomNonbondedForce\n check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(\n charge, sigma, 0.0*epsilon\n )\n _check_indices(particle_index, check_index)\n\n # Charge will be turned off at\n # lambda_electrostatics_delete = 0, on at\n # lambda_electrostatics_delete = 1; kill charge with\n # lambda_electrostatics_delete = 0 --> 1\n self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset(\n 'lambda_electrostatics_delete', particle_index,\n -charge, 0*sigma, 0*epsilon\n )\n\n elif particle_index in self._atom_classes['unique_new_atoms']:\n # Get the parameters in the new system\n new_index = hybrid_to_new_map[particle_index]\n [charge, sigma, epsilon] = new_system_nonbonded_force.getParticleParameters(new_index)\n\n # Add the particle to the hybrid custom sterics and electrostatics\n # turning on sterics in forward direction\n check_index = self._hybrid_system_forces['core_sterics_force'].addParticle(\n [sigma, 0.0*epsilon, sigma, epsilon, 0, 1]\n )\n _check_indices(particle_index, check_index)\n\n # Add particle to the regular nonbonded force, but\n # Lennard-Jones will be handled by CustomNonbondedForce\n check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(\n 0.0, sigma, 0.0\n ) # charge starts at zero\n _check_indices(particle_index, check_index)\n\n # Charge will be turned off at lambda_electrostatics_insert = 0\n # on at lambda_electrostatics_insert = 1;\n # add charge with lambda_electrostatics_insert = 0 --> 1\n self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset(\n 'lambda_electrostatics_insert', particle_index,\n +charge, 0, 0\n )\n\n elif particle_index in self._atom_classes['core_atoms']:\n # Get the parameters in the new and old systems:\n old_index = hybrid_to_old_map[particle_index]\n [charge_old, sigma_old, epsilon_old] = old_system_nonbonded_force.getParticleParameters(old_index)\n new_index = hybrid_to_new_map[particle_index]\n [charge_new, sigma_new, epsilon_new] = new_system_nonbonded_force.getParticleParameters(new_index)\n\n # Add the particle to the custom forces, interpolating between\n # the two parameters; add steric params and zero electrostatics\n # to core_sterics per usual\n check_index = self._hybrid_system_forces['core_sterics_force'].addParticle(\n [sigma_old, epsilon_old, sigma_new, epsilon_new, 0, 0])\n _check_indices(particle_index, check_index)\n\n # Still add the particle to the regular nonbonded force, but\n # with zeroed out parameters; add old charge to\n # standard_nonbonded and zero sterics\n check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(\n charge_old, 0.5*(sigma_old+sigma_new), 0.0)\n _check_indices(particle_index, check_index)\n\n # Charge is charge_old at lambda_electrostatics = 0,\n # charge_new at lambda_electrostatics = 1\n # TODO: We could also interpolate the Lennard-Jones here\n # instead of core_sterics force so that core_sterics_force\n # could just be softcore.\n\n # Interpolate between old and new charge with\n # lambda_electrostatics core make sure to keep sterics off\n self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset(\n 'lambda_electrostatics_core', particle_index,\n (charge_new - charge_old), 0, 0\n )\n\n # Otherwise, the particle is in the environment\n else:\n # The parameters will be the same in new and old system, so\n # just take the old parameters\n old_index = hybrid_to_old_map[particle_index]\n [charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)\n\n # Add the particle to the hybrid custom sterics, but they dont\n # change; electrostatics are ignored\n self._hybrid_system_forces['core_sterics_force'].addParticle(\n [sigma, epsilon, sigma, epsilon, 0, 0]\n )\n\n # Add the environment atoms to the regular nonbonded force as\n # well: should we be adding steric terms here, too?\n self._hybrid_system_forces['standard_nonbonded_force'].addParticle(\n charge, sigma, epsilon\n )\n\n # Now loop pairwise through (unique_old, unique_new) and add exceptions\n # so that they never interact electrostatically\n # (place into Nonbonded Force)\n unique_old_atoms = self._atom_classes['unique_old_atoms']\n unique_new_atoms = self._atom_classes['unique_new_atoms']\n\n for old in unique_old_atoms:\n for new in unique_new_atoms:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n old, new, 0.0*unit.elementary_charge**2,\n 1.0*unit.nanometers, 0.0*unit.kilojoules_per_mole)\n # This is only necessary to avoid the 'All forces must have\n # identical exclusions' rule\n self._hybrid_system_forces['core_sterics_force'].addExclusion(old, new)\n\n self._handle_interaction_groups()\n\n self._handle_hybrid_exceptions()\n\n self._handle_original_exceptions()\n\n def _handle_interaction_groups(self):\n \"\"\"\n Create the appropriate interaction groups for the custom nonbonded\n forces. The groups are:\n\n 1) Unique-old - core\n 2) Unique-old - environment\n 3) Unique-new - core\n 4) Unique-new - environment\n 5) Core - environment\n 6) Core - core\n\n Unique-old and Unique new are prevented from interacting this way,\n and intra-unique interactions occur in an unmodified nonbonded force.\n\n Must be called after particles are added to the Nonbonded forces\n TODO: we should also be adding the following interaction groups...\n 7) Unique-new - Unique-new\n 8) Unique-old - Unique-old\n \"\"\"\n # Get the force objects for convenience:\n sterics_custom_force = self._hybrid_system_forces['core_sterics_force']\n\n # Also prepare the atom classes\n core_atoms = self._atom_classes['core_atoms']\n unique_old_atoms = self._atom_classes['unique_old_atoms']\n unique_new_atoms = self._atom_classes['unique_new_atoms']\n environment_atoms = self._atom_classes['environment_atoms']\n\n sterics_custom_force.addInteractionGroup(unique_old_atoms, core_atoms)\n\n sterics_custom_force.addInteractionGroup(unique_old_atoms,\n environment_atoms)\n\n sterics_custom_force.addInteractionGroup(unique_new_atoms,\n core_atoms)\n\n sterics_custom_force.addInteractionGroup(unique_new_atoms,\n environment_atoms)\n\n sterics_custom_force.addInteractionGroup(core_atoms, environment_atoms)\n\n sterics_custom_force.addInteractionGroup(core_atoms, core_atoms)\n\n sterics_custom_force.addInteractionGroup(unique_new_atoms,\n unique_new_atoms)\n\n sterics_custom_force.addInteractionGroup(unique_old_atoms,\n unique_old_atoms)\n\n def _handle_hybrid_exceptions(self):\n \"\"\"\n Instead of excluding interactions that shouldn't occur, we provide\n exceptions for interactions that were zeroed out but should occur.\n \"\"\"\n # TODO - are these actually used anywhere? Flake8 says no\n old_system_nonbonded_force = self._old_system_forces['NonbondedForce']\n new_system_nonbonded_force = self._new_system_forces['NonbondedForce']\n\n # Prepare the atom classes\n unique_old_atoms = self._atom_classes['unique_old_atoms']\n unique_new_atoms = self._atom_classes['unique_new_atoms']\n\n # Get the list of interaction pairs for which we need to set exceptions\n unique_old_pairs = list(itertools.combinations(unique_old_atoms, 2))\n unique_new_pairs = list(itertools.combinations(unique_new_atoms, 2))\n\n # Add back the interactions of the old unique atoms, unless there are\n # exceptions\n for atom_pair in unique_old_pairs:\n # Since the pairs are indexed in the dictionary by the old system\n # indices, we need to convert\n old_index_atom_pair = (self._hybrid_to_old_map[atom_pair[0]],\n self._hybrid_to_old_map[atom_pair[1]])\n\n # Now we check if the pair is in the exception dictionary\n if old_index_atom_pair in self._old_system_exceptions:\n [chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair]\n # if we are interpolating 1,4 exceptions then we have to\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd*0.0,\n sigma, epsilon*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon\n )\n\n # Add exclusion to ensure exceptions are consistent\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n atom_pair[0], atom_pair[1]\n )\n\n # Check if the pair is in the reverse order and use that if so\n elif old_index_atom_pair[::-1] in self._old_system_exceptions:\n [chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair[::-1]]\n # If we are interpolating 1,4 exceptions then we have to\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd*0.0,\n sigma, epsilon*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)\n\n # Add exclusion to ensure exceptions are consistent\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n atom_pair[0], atom_pair[1])\n\n # TODO: work out why there's a bunch of commented out code here\n # Exerpt:\n # If it's not handled by an exception in the original system, we\n # just add the regular parameters as an exception\n # TODO: this implies that the old-old nonbonded interactions (those\n # which are not exceptions) are always self-interacting throughout\n # lambda protocol...\n\n # Add back the interactions of the new unique atoms, unless there are\n # exceptions\n for atom_pair in unique_new_pairs:\n # Since the pairs are indexed in the dictionary by the new system\n # indices, we need to convert\n new_index_atom_pair = (self._hybrid_to_new_map[atom_pair[0]],\n self._hybrid_to_new_map[atom_pair[1]])\n\n # Now we check if the pair is in the exception dictionary\n if new_index_atom_pair in self._new_system_exceptions:\n [chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair]\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd*0.0,\n sigma, epsilon*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon\n )\n\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n atom_pair[0], atom_pair[1]\n )\n\n # Check if the pair is present in the reverse order and use that if so\n elif new_index_atom_pair[::-1] in self._new_system_exceptions:\n [chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair[::-1]]\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd*0.0,\n sigma, epsilon*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon\n )\n\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n atom_pair[0], atom_pair[1]\n )\n\n\n # TODO: work out why there's a bunch of commented out code here\n # If it's not handled by an exception in the original system, we\n # just add the regular parameters as an exception\n\n @staticmethod\n def _find_exception(force, index1, index2):\n \"\"\"\n Find the exception that corresponds to the given indices in the given\n system\n\n Parameters\n ----------\n force : openmm.NonbondedForce object\n System containing the exceptions\n index1 : int\n The index of the first atom (order is unimportant)\n index2 : int\n The index of the second atom (order is unimportant)\n\n Returns\n -------\n exception_parameters : list\n List of exception parameters\n \"\"\"\n index_set = {index1, index2}\n\n # Loop through the exceptions and try to find one matching the criteria\n for exception_idx in range(force.getNumExceptions()):\n exception_parameters = force.getExceptionParameters(exception_idx)\n if index_set==set(exception_parameters[:2]):\n return exception_parameters\n return []\n\n def _handle_original_exceptions(self):\n \"\"\"\n This method ensures that exceptions present in the original systems are\n present in the hybrid appropriately.\n \"\"\"\n # Get what we need to find the exceptions from the new and old systems:\n old_system_nonbonded_force = self._old_system_forces['NonbondedForce']\n new_system_nonbonded_force = self._new_system_forces['NonbondedForce']\n hybrid_to_old_map = self._hybrid_to_old_map\n hybrid_to_new_map = self._hybrid_to_new_map\n\n # First, loop through the old system's exceptions and add them to the\n # hybrid appropriately:\n for exception_pair, exception_parameters in self._old_system_exceptions.items():\n\n [index1_old, index2_old] = exception_pair\n [chargeProd_old, sigma_old, epsilon_old] = exception_parameters\n\n # Get hybrid indices:\n index1_hybrid = self._old_to_hybrid_map[index1_old]\n index2_hybrid = self._old_to_hybrid_map[index2_old]\n index_set = {index1_hybrid, index2_hybrid}\n\n\n # In this case, the interaction is only covered by the regular\n # nonbonded force, and as such will be copied to that force\n # In the unique-old case, it is handled elsewhere due to internal\n # peculiarities regarding exceptions\n if index_set.issubset(self._atom_classes['environment_atoms']):\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_old,\n sigma_old, epsilon_old\n )\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n index1_hybrid, index2_hybrid\n )\n\n # We have already handled unique old - unique old exceptions\n elif len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 2:\n continue\n\n # Otherwise, check if one of the atoms in the set is in the\n # unique_old_group and the other is not:\n elif len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 1:\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_old*0.0,\n sigma_old, epsilon_old*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_old,\n sigma_old, epsilon_old\n )\n\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n index1_hybrid, index2_hybrid\n )\n\n # If the exception particles are neither solely old unique, solely\n # environment, nor contain any unique old atoms, they are either\n # core/environment or core/core\n # In this case, we need to get the parameters from the exception in\n # the other (new) system, and interpolate between the two\n else:\n # First get the new indices.\n index1_new = hybrid_to_new_map[index1_hybrid]\n index2_new = hybrid_to_new_map[index2_hybrid]\n # Get the exception parameters:\n new_exception_parms= self._find_exception(\n new_system_nonbonded_force,\n index1_new, index2_new)\n\n # If there's no new exception, then we should just set the\n # exception parameters to be the nonbonded parameters\n if not new_exception_parms:\n [charge1_new, sigma1_new, epsilon1_new] = new_system_nonbonded_force.getParticleParameters(index1_new)\n [charge2_new, sigma2_new, epsilon2_new] = new_system_nonbonded_force.getParticleParameters(index2_new)\n\n chargeProd_new = charge1_new * charge2_new\n sigma_new = 0.5 * (sigma1_new + sigma2_new)\n epsilon_new = unit.sqrt(epsilon1_new*epsilon2_new)\n else:\n [index1_new, index2_new, chargeProd_new, sigma_new, epsilon_new] = new_exception_parms\n\n # Interpolate between old and new\n exception_index = self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_old,\n sigma_old, epsilon_old\n )\n self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset(\n 'lambda_electrostatics_core', exception_index,\n (chargeProd_new - chargeProd_old), 0, 0\n )\n self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset(\n 'lambda_sterics_core', exception_index, 0,\n (sigma_new - sigma_old), (epsilon_new - epsilon_old)\n )\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n index1_hybrid, index2_hybrid\n )\n\n # Now, loop through the new system to collect remaining interactions.\n # The only that remain here are uniquenew-uniquenew, uniquenew-core,\n # and uniquenew-environment. There might also be core-core, since not\n # all core-core exceptions exist in both\n for exception_pair, exception_parameters in self._new_system_exceptions.items():\n [index1_new, index2_new] = exception_pair\n [chargeProd_new, sigma_new, epsilon_new] = exception_parameters\n\n # Get hybrid indices:\n index1_hybrid = self._new_to_hybrid_map[index1_new]\n index2_hybrid = self._new_to_hybrid_map[index2_new]\n\n index_set = {index1_hybrid, index2_hybrid}\n\n # If it's a subset of unique_new_atoms, then this is an\n # intra-unique interaction and should have its exceptions\n # specified in the regular nonbonded force. However, this is\n # handled elsewhere as above due to pecularities with exception\n # handling\n if index_set.issubset(self._atom_classes['unique_new_atoms']):\n continue\n\n # Look for the final class- interactions between uniquenew-core and\n # uniquenew-environment. They are treated similarly: they are\n # simply on and constant the entire time (as a valence term)\n elif len(index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0:\n if self._interpolate_14s:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_new*0.0,\n sigma_new, epsilon_new*0.0\n )\n else:\n self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid, chargeProd_new,\n sigma_new, epsilon_new\n )\n\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n index1_hybrid, index2_hybrid\n )\n\n # However, there may be a core exception that exists in one system\n # but not the other (ring closure)\n elif index_set.issubset(self._atom_classes['core_atoms']):\n\n # Get the old indices\n try:\n index1_old = self._new_to_old_map[index1_new]\n index2_old = self._new_to_old_map[index2_new]\n except KeyError:\n continue\n\n # See if it's also in the old nonbonded force. if it is, then we don't need to add it.\n # But if it's not, we need to interpolate\n if not self._find_exception(old_system_nonbonded_force, index1_old, index2_old):\n\n [charge1_old, sigma1_old, epsilon1_old] = old_system_nonbonded_force.getParticleParameters(index1_old)\n [charge2_old, sigma2_old, epsilon2_old] = old_system_nonbonded_force.getParticleParameters(index2_old)\n\n chargeProd_old = charge1_old*charge2_old\n sigma_old = 0.5 * (sigma1_old + sigma2_old)\n epsilon_old = unit.sqrt(epsilon1_old*epsilon2_old)\n\n exception_index = self._hybrid_system_forces['standard_nonbonded_force'].addException(\n index1_hybrid, index2_hybrid,\n chargeProd_old, sigma_old, \n epsilon_old)\n\n self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset(\n 'lambda_electrostatics_core', exception_index,\n (chargeProd_new - chargeProd_old), 0, 0\n )\n\n self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset(\n 'lambda_sterics_core', exception_index, 0,\n (sigma_new - sigma_old), (epsilon_new - epsilon_old)\n )\n\n self._hybrid_system_forces['core_sterics_force'].addExclusion(\n index1_hybrid, index2_hybrid\n )\n\n def _handle_old_new_exceptions(self):\n \"\"\"\n Find the exceptions associated with old-old and old-core interactions,\n as well as new-new and new-core interactions. Theses exceptions will\n be placed in CustomBondedForce that will interpolate electrostatics and\n a softcore potential.\n\n TODO\n ----\n * Move old_new_bond_exceptions to a dictionary or similar.\n \"\"\"\n\n old_new_nonbonded_exceptions = \"U_electrostatics + U_sterics;\"\n\n if self._softcore_LJ_v2:\n old_new_nonbonded_exceptions += \"U_sterics = select(step(r - r_LJ), 4*epsilon*x*(x-1.0), U_sterics_quad);\"\n old_new_nonbonded_exceptions += f\"U_sterics_quad = Force*(((r - r_LJ)^2)/2 - (r - r_LJ)) + U_sterics_cut;\"\n old_new_nonbonded_exceptions += f\"U_sterics_cut = 4*epsilon*((sigma/r_LJ)^6)*(((sigma/r_LJ)^6) - 1.0);\"\n old_new_nonbonded_exceptions += f\"Force = -4*epsilon*((-12*sigma^12)/(r_LJ^13) + (6*sigma^6)/(r_LJ^7));\"\n old_new_nonbonded_exceptions += f\"x = (sigma/r)^6;\"\n old_new_nonbonded_exceptions += f\"r_LJ = softcore_alpha*((26/7)*(sigma^6)*lambda_sterics_deprecated)^(1/6);\"\n old_new_nonbonded_exceptions += f\"lambda_sterics_deprecated = new_interaction*(1.0 - lambda_sterics_insert) + old_interaction*lambda_sterics_delete;\"\n else:\n old_new_nonbonded_exceptions += \"U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/reff_sterics)^6;\"\n old_new_nonbonded_exceptions += \"reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);\"\n old_new_nonbonded_exceptions += \"reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);\" # effective softcore distance for sterics\n old_new_nonbonded_exceptions += \"lambda_alpha = new_interaction*(1-lambda_sterics_insert) + old_interaction*lambda_sterics_delete;\"\n\n old_new_nonbonded_exceptions += \"U_electrostatics = (lambda_electrostatics_insert * unique_new + unique_old * (1 - lambda_electrostatics_delete)) * ONE_4PI_EPS0*chargeProd/r;\"\n old_new_nonbonded_exceptions += \"ONE_4PI_EPS0 = %f;\" % ONE_4PI_EPS0\n\n old_new_nonbonded_exceptions += \"epsilon = (1-lambda_sterics)*epsilonA + lambda_sterics*epsilonB;\" # interpolation\n old_new_nonbonded_exceptions += \"sigma = (1-lambda_sterics)*sigmaA + lambda_sterics*sigmaB;\"\n\n old_new_nonbonded_exceptions += \"lambda_sterics = new_interaction*lambda_sterics_insert + old_interaction*lambda_sterics_delete;\"\n old_new_nonbonded_exceptions += \"new_interaction = delta(1-unique_new); old_interaction = delta(1-unique_old);\"\n\n\n nonbonded_exceptions_force = openmm.CustomBondForce(\n old_new_nonbonded_exceptions)\n self._hybrid_system.addForce(nonbonded_exceptions_force)\n\n # For reference, set name in force dict\n self._hybrid_system_forces['old_new_exceptions_force'] = nonbonded_exceptions_force\n\n if self._softcore_LJ_v2:\n nonbonded_exceptions_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_LJ_v2_alpha\n )\n else:\n nonbonded_exceptions_force.addGlobalParameter(\n \"softcore_alpha\", self._softcore_alpha\n )\n\n # electrostatics insert\n nonbonded_exceptions_force.addGlobalParameter(\n \"lambda_electrostatics_insert\", 0.0\n )\n # electrostatics delete\n nonbonded_exceptions_force.addGlobalParameter(\n \"lambda_electrostatics_delete\", 0.0\n )\n # sterics insert\n nonbonded_exceptions_force.addGlobalParameter(\n \"lambda_sterics_insert\", 0.0\n )\n # steric delete\n nonbonded_exceptions_force.addGlobalParameter(\n \"lambda_sterics_delete\", 0.0\n )\n\n for parameter in ['chargeProd','sigmaA', 'epsilonA', 'sigmaB',\n 'epsilonB', 'unique_old', 'unique_new']:\n nonbonded_exceptions_force.addPerBondParameter(parameter)\n\n # Prepare for exceptions loop by grabbing nonbonded forces,\n # hybrid_to_old/new maps\n old_system_nonbonded_force = self._old_system_forces['NonbondedForce']\n new_system_nonbonded_force = self._new_system_forces['NonbondedForce']\n hybrid_to_old_map = self._hybrid_to_old_map\n hybrid_to_new_map = self._hybrid_to_new_map\n\n # First, loop through the old system's exceptions and add them to the\n # hybrid appropriately:\n for exception_pair, exception_parameters in self._old_system_exceptions.items():\n\n [index1_old, index2_old] = exception_pair\n [chargeProd_old, sigma_old, epsilon_old] = exception_parameters\n\n # Get hybrid indices:\n index1_hybrid = self._old_to_hybrid_map[index1_old]\n index2_hybrid = self._old_to_hybrid_map[index2_old]\n index_set = {index1_hybrid, index2_hybrid}\n\n # Otherwise, check if one of the atoms in the set is in the\n # unique_old_group and the other is not:\n if (len(index_set.intersection(self._atom_classes['unique_old_atoms'])) > 0 and\n (chargeProd_old.value_in_unit_system(unit.md_unit_system) != 0.0 or\n epsilon_old.value_in_unit_system(unit.md_unit_system) != 0.0)):\n if self._interpolate_14s:\n # If we are interpolating 1,4s, then we anneal this term\n # off; otherwise, the exception force is constant and\n # already handled in the standard nonbonded force\n nonbonded_exceptions_force.addBond(\n index1_hybrid, index2_hybrid,\n [chargeProd_old, sigma_old, epsilon_old, sigma_old,\n epsilon_old*0.0, 1, 0]\n )\n\n\n\n # Next, loop through the new system's exceptions and add them to the\n # hybrid appropriately\n for exception_pair, exception_parameters in self._new_system_exceptions.items():\n [index1_new, index2_new] = exception_pair\n [chargeProd_new, sigma_new, epsilon_new] = exception_parameters\n\n # Get hybrid indices:\n index1_hybrid = self._new_to_hybrid_map[index1_new]\n index2_hybrid = self._new_to_hybrid_map[index2_new]\n\n index_set = {index1_hybrid, index2_hybrid}\n\n # Look for the final class- interactions between uniquenew-core and\n # uniquenew-environment. They are treated\n # similarly: they are simply on and constant the entire time\n # (as a valence term)\n if (len(index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0 and\n (chargeProd_new.value_in_unit_system(unit.md_unit_system) != 0.0 or\n epsilon_new.value_in_unit_system(unit.md_unit_system) != 0.0)):\n if self._interpolate_14s:\n # If we are interpolating 1,4s, then we anneal this term\n # on; otherwise, the exception force is constant and\n # already handled in the standard nonbonded force\n nonbonded_exceptions_force.addBond(\n index1_hybrid, index2_hybrid,\n [chargeProd_new, sigma_new, epsilon_new*0.0,\n sigma_new, epsilon_new, 0, 1]\n )\n\n def _compute_hybrid_positions(self):\n \"\"\"\n The positions of the hybrid system. Dimensionality is (n_environment +\n n_core + n_old_unique + n_new_unique),\n The positions are assigned by first copying all the mapped positions\n from the old system in, then copying the\n mapped positions from the new system. This means that there is an\n assumption that the positions common to old and new are the same\n (which is the case for perses as-is).\n\n Returns\n -------\n hybrid_positions : np.ndarray [n, 3]\n Positions of the hybrid system, in nm\n \"\"\"\n # Get unitless positions\n old_pos_without_units = np.array(\n self._old_positions.value_in_unit(unit.nanometer))\n new_pos_without_units = np.array(\n self._new_positions.value_in_unit(unit.nanometer))\n\n # Determine the number of particles in the system\n n_atoms_hybrid = self._hybrid_system.getNumParticles()\n\n # Initialize an array for hybrid positions\n hybrid_pos_array = np.zeros([n_atoms_hybrid, 3])\n\n # Loop through the old system indices, and assign positions.\n for old_idx, hybrid_idx in self._old_to_hybrid_map.items():\n hybrid_pos_array[hybrid_idx, :] = old_pos_without_units[old_idx, :]\n\n # Do the same for new indices. Note that this overwrites some\n # coordinates, but as stated above, the assumption is that these are\n # the same.\n for new_idx, hybrid_idx in self._new_to_hybrid_map.items():\n hybrid_pos_array[hybrid_idx, :] = new_pos_without_units[new_idx, :]\n\n return unit.Quantity(hybrid_pos_array, unit=unit.nanometers)\n\n def _create_mdtraj_topology(self):\n \"\"\"\n Create an MDTraj trajectory of the hybrid system.\n\n Note\n ----\n This is purely for writing out trajectories and is not expected to be\n parametrized.\n\n TODO\n ----\n * A lot of this can be simplified / reworked.\n \"\"\"\n old_top = mdt.Topology.from_openmm(self._old_topology)\n new_top = mdt.Topology.from_openmm(self._new_topology)\n\n hybrid_topology = copy.deepcopy(old_top)\n\n added_atoms = dict()\n\n # Get the core atoms in the new index system (as opposed to the hybrid\n # index system). We will need this later\n core_atoms_new_indices = set(self._core_old_to_new_map.values())\n\n # Now, add each unique new atom to the topology (this is the same order\n # as the system)\n for particle_idx in self._unique_new_atoms:\n new_particle_hybrid_idx = self._new_to_hybrid_map[particle_idx]\n new_system_atom = new_top.atom(particle_idx)\n\n # First, we get the residue in the new system associated with this\n # atom\n new_system_res = new_system_atom.residue\n\n # Next, we have to enumerate the other atoms in that residue to\n # find mapped atoms\n new_system_atom_set = {atom.index for atom in new_system_res.atoms}\n\n # Now, we find the subset of atoms that are mapped. These must be \n # in the \"core\" category, since they are mapped and part of a\n # changing residue\n mapped_new_atom_indices = core_atoms_new_indices.intersection(\n new_system_atom_set)\n\n # Now get the old indices of the above atoms so that we can find\n # the appropriate residue in the old system for this we can use the\n # new to old atom map\n mapped_old_atom_indices = [self._new_to_old_map[atom_idx] for\n atom_idx in mapped_new_atom_indices]\n\n # We can just take the first one--they all have the same residue\n first_mapped_old_atom_index = mapped_old_atom_indices[0]\n\n # Get the atom object corresponding to this index from the hybrid\n # (which is a deepcopy of the old)\n mapped_hybrid_system_atom = hybrid_topology.atom(\n first_mapped_old_atom_index)\n\n # Get the residue that is relevant to this atom\n mapped_residue = mapped_hybrid_system_atom.residue\n\n # Add the atom using the mapped residue\n added_atoms[new_particle_hybrid_idx] = hybrid_topology.add_atom(\n new_system_atom.name,\n new_system_atom.element,\n mapped_residue)\n\n # Now loop through the bonds in the new system, and if the bond\n # contains a unique new atom, then add it to the hybrid topology\n for (atom1, atom2) in new_top.bonds:\n at1_hybrid_idx = self._new_to_hybrid_map[atom1.index]\n at2_hybrid_idx = self._new_to_hybrid_map[atom2.index]\n\n # If at least one atom is in the unique new class, we need to add\n # it to the hybrid system\n at1_uniq = at1_hybrid_idx in self._atom_classes['unique_new_atoms']\n at2_uniq = at2_hybrid_idx in self._atom_classes['unique_new_atoms']\n if at1_uniq or at2_uniq:\n if atom1.index in self._atom_classes['unique_new_atoms']:\n atom1_to_bond = added_atoms[atom1.index]\n else:\n atom1_to_bond = atom1\n\n if atom2.index in self._atom_classes['unique_new_atoms']:\n atom2_to_bond = added_atoms[atom2.index]\n else:\n atom2_to_bond = atom2\n\n hybrid_topology.add_bond(atom1_to_bond, atom2_to_bond)\n\n return hybrid_topology\n\n\n def old_positions(self, hybrid_positions):\n \"\"\"\n From input hybrid positions, get the positions which would correspond\n to the old system\n\n Parameters\n ----------\n hybrid_positions : [n, 3] np.ndarray or simtk.unit.Quantity\n The positions of the hybrid system\n\n Returns\n -------\n old_positions : [m, 3] np.ndarray with unit\n The positions of the old system\n \"\"\"\n n_atoms_old = self._old_system.getNumParticles()\n # making sure hybrid positions are simtk.unit.Quantity objects\n if not isinstance(hybrid_positions, unit.Quantity):\n hybrid_positions = unit.Quantity(hybrid_positions,\n unit=unit.nanometer)\n old_positions = unit.Quantity(np.zeros([n_atoms_old, 3]),\n unit=unit.nanometer)\n for idx in range(n_atoms_old):\n hyb_idx = self._new_to_hybrid_map[idx]\n old_positions[idx, :] = hybrid_positions[hyb_idx, :]\n return old_positions\n\n def new_positions(self, hybrid_positions):\n \"\"\"\n From input hybrid positions, get the positions which could correspond\n to the new system.\n\n Parameters\n ----------\n hybrid_positions : [n, 3] np.ndarray or simtk.unit.Quantity\n The positions of the hybrid system\n\n Returns\n -------\n new_positions : [m, 3] np.ndarray with unit\n The positions of the new system\n \"\"\"\n n_atoms_new = self._new_system.getNumParticles\n # making sure hybrid positions are simtk.unit.Quantity objects\n if not isinstance(hybrid_positions, unit.Quantity):\n hybrid_positions = unit.Quantity(hybrid_positions,\n unit=unit.nanometer)\n new_positions = unit.Quantity(np.zeros([n_atoms_new, 3]),\n unit=unit.nanometer)\n for idx in range(n_atoms_new):\n hyb_idx = self._new_to_hybrid_map[idx]\n new_positions[idx, :] = hybrid_positions[hyb_idx, :]\n return new_positions\n\n @property\n def hybrid_system(self):\n \"\"\"\n The hybrid system.\n\n Returns\n -------\n hybrid_system : openmm.System\n The system representing a hybrid between old and new topologies\n \"\"\"\n return self._hybrid_system\n\n @property\n def new_to_hybrid_atom_map(self):\n \"\"\"\n Give a dictionary that maps new system atoms to the hybrid system.\n\n Returns\n -------\n new_to_hybrid_atom_map : dict of {int, int}\n The mapping of atoms from the new system to the hybrid\n \"\"\"\n return self._new_to_hybrid_map\n\n @property\n def old_to_hybrid_atom_map(self):\n \"\"\"\n Give a dictionary that maps old system atoms to the hybrid system.\n\n Returns\n -------\n old_to_hybrid_atom_map : dict of {int, int}\n The mapping of atoms from the old system to the hybrid\n \"\"\"\n return self._old_to_hybrid_map\n\n @property\n def hybrid_positions(self):\n \"\"\"\n The positions of the hybrid system. Dimensionality is (n_environment +\n n_core + n_old_unique + n_new_unique).\n The positions are assigned by first copying all the mapped positions\n from the old system in, then copying the mapped positions from the new\n system.\n\n Returns\n -------\n hybrid_positions : [n, 3] Quantity nanometers\n \"\"\"\n return self._hybrid_positions\n\n @property\n def hybrid_topology(self):\n \"\"\"\n An MDTraj hybrid topology for the purpose of writing out trajectories.\n \n Note that we do not expect this to be able to be parameterized by the\n openmm forcefield class.\n\n Returns\n -------\n hybrid_topology : mdtraj.Topology\n \"\"\"\n return self._hybrid_topology\n\n @property\n def omm_hybrid_topology(self):\n \"\"\"\n An OpenMM format of the hybrid topology. Also cannot be used to\n parameterize system, only to write out trajectories.\n\n Returns\n -------\n hybrid_topology : simtk.openmm.app.Topology\n \"\"\"\n return md.Topology.to_openmm(self._hybrid_topology)" ]
[ [ "numpy.zeros" ] ]
axch/probability
[ "b112faafc593d18e6adf4c85fa8e0ce37b29f400" ]
[ "tensorflow_probability/python/vi/csiszar_divergence_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Csiszar divergences.\"\"\"\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\n\ntfd = tfp.distributions\n\n\ndef tridiag(d, diag_value, offdiag_value):\n \"\"\"d x d matrix with given value on diag, and one super/sub diag.\"\"\"\n diag_mat = tf.eye(d) * (diag_value - offdiag_value)\n three_bands = tf.linalg.band_part(tf.fill([d, d], offdiag_value), 1, 1)\n return diag_mat + three_bands\n\n\n@test_util.test_all_tf_execution_regimes\nclass AmariAlphaTest(test_util.TestCase):\n\n def setUp(self):\n super(AmariAlphaTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n for alpha in [-1., 0., 1., 2.]:\n for normalized in [True, False]:\n self.assertAllClose(\n self.evaluate(\n tfp.vi.amari_alpha(\n 0., alpha=alpha, self_normalized=normalized)),\n 0.)\n\n def test_correct_when_alpha0(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.amari_alpha(self._logu, alpha=0.)),\n -self._logu)\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.amari_alpha(self._logu, alpha=0., self_normalized=True)),\n -self._logu + (self._u - 1.))\n\n def test_correct_when_alpha1(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.amari_alpha(self._logu, alpha=1.)),\n self._u * self._logu)\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.amari_alpha(self._logu, alpha=1., self_normalized=True)),\n self._u * self._logu - (self._u - 1.))\n\n def test_correct_when_alpha_not_01(self):\n for alpha in [-2, -1., -0.5, 0.5, 2.]:\n self.assertAllClose(\n self.evaluate(\n tfp.vi.amari_alpha(self._logu,\n alpha=alpha,\n self_normalized=False)),\n ((self._u**alpha - 1)) / (alpha * (alpha - 1.)))\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.amari_alpha(self._logu,\n alpha=alpha,\n self_normalized=True)),\n ((self._u**alpha - 1.)\n - alpha * (self._u - 1)) / (alpha * (alpha - 1.)))\n\n\n@test_util.test_all_tf_execution_regimes\nclass KLReverseTest(test_util.TestCase):\n\n def setUp(self):\n super(KLReverseTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n for normalized in [True, False]:\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_reverse(0., self_normalized=normalized)),\n 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_reverse(self._logu)),\n -self._logu)\n\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_reverse(self._logu, self_normalized=True)),\n -self._logu + (self._u - 1.))\n\n\n@test_util.test_all_tf_execution_regimes\nclass KLForwardTest(test_util.TestCase):\n\n def setUp(self):\n super(KLForwardTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n for normalized in [True, False]:\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_forward(0., self_normalized=normalized)),\n 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_forward(self._logu)),\n self._u * self._logu)\n\n self.assertAllClose(\n self.evaluate(tfp.vi.kl_forward(self._logu, self_normalized=True)),\n self._u * self._logu - (self._u - 1.))\n\n\n@test_util.test_all_tf_execution_regimes\nclass JensenShannonTest(test_util.TestCase):\n\n def setUp(self):\n super(JensenShannonTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.jensen_shannon(0.)), np.log(0.25))\n\n def test_symmetric(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.jensen_shannon(self._logu)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.jensen_shannon)))\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.jensen_shannon(self._logu, self_normalized=True)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu,\n lambda x: tfp.vi.jensen_shannon(x, self_normalized=True))))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.jensen_shannon(self._logu)),\n (self._u * self._logu\n - (1 + self._u) * np.log1p(self._u)))\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.jensen_shannon(self._logu, self_normalized=True)),\n (self._u * self._logu\n - (1 + self._u) * np.log((1 + self._u) / 2)))\n\n\n@test_util.test_all_tf_execution_regimes\nclass ArithmeticGeometricMeanTest(test_util.TestCase):\n\n def setUp(self):\n super(ArithmeticGeometricMeanTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.arithmetic_geometric(0.)), np.log(4))\n self.assertAllClose(\n self.evaluate(\n tfp.vi.arithmetic_geometric(0., self_normalized=True)), 0.)\n\n def test_symmetric(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.arithmetic_geometric(self._logu)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.arithmetic_geometric)))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.arithmetic_geometric(self._logu)),\n (1. + self._u) * np.log((1. + self._u) / np.sqrt(self._u)))\n\n self.assertAllClose(\n self.evaluate(\n tfp.vi.arithmetic_geometric(self._logu, self_normalized=True)),\n (1. + self._u) * np.log(0.5 * (1. + self._u) / np.sqrt(self._u)))\n\n\n@test_util.test_all_tf_execution_regimes\nclass TotalVariationTest(test_util.TestCase):\n\n def setUp(self):\n super(TotalVariationTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.total_variation(0.)), 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.total_variation(self._logu)),\n 0.5 * np.abs(self._u - 1))\n\n\n@test_util.test_all_tf_execution_regimes\nclass PearsonTest(test_util.TestCase):\n\n def setUp(self):\n super(PearsonTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.pearson(0.)), 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.pearson(self._logu)),\n np.square(self._u - 1))\n\n\n@test_util.test_all_tf_execution_regimes\nclass SquaredHellingerTest(test_util.TestCase):\n\n def setUp(self):\n super(SquaredHellingerTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.squared_hellinger(0.)), 0.)\n\n def test_symmetric(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.squared_hellinger(self._logu)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.squared_hellinger)))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.squared_hellinger(self._logu)),\n np.square(np.sqrt(self._u) - 1))\n\n\n@test_util.test_all_tf_execution_regimes\nclass TriangularTest(test_util.TestCase):\n\n def setUp(self):\n super(TriangularTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.triangular(0.)), 0.)\n\n def test_symmetric(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.triangular(self._logu)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.triangular)))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.triangular(self._logu)),\n np.square(self._u - 1) / (1 + self._u))\n\n\n@test_util.test_all_tf_execution_regimes\nclass TPowerTest(test_util.TestCase):\n\n def setUp(self):\n super(TPowerTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.t_power(0., t=-0.1)), 0.)\n self.assertAllClose(self.evaluate(tfp.vi.t_power(0., t=0.5)), 0.)\n self.assertAllClose(self.evaluate(tfp.vi.t_power(0., t=1.1)), 0.)\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(0., t=-0.1, self_normalized=True)), 0.)\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(0., t=0.5, self_normalized=True)), 0.)\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(0., t=1.1, self_normalized=True)), 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(-0.1))),\n self._u ** -0.1 - 1.)\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(0.5))),\n -self._u ** 0.5 + 1.)\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(1.1))),\n self._u ** 1.1 - 1.)\n\n def test_correct_self_normalized(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(-0.1),\n self_normalized=True)),\n self._u ** -0.1 - 1. + 0.1 * (self._u - 1.))\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(0.5),\n self_normalized=True)),\n -self._u ** 0.5 + 1. + 0.5 * (self._u - 1.))\n self.assertAllClose(\n self.evaluate(tfp.vi.t_power(self._logu, t=np.float64(1.1),\n self_normalized=True)),\n self._u ** 1.1 - 1. - 1.1 * (self._u - 1.))\n\n\n@test_util.test_all_tf_execution_regimes\nclass Log1pAbsTest(test_util.TestCase):\n\n def setUp(self):\n super(Log1pAbsTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.log1p_abs(0.)), 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.log1p_abs(self._logu)),\n self._u**(np.sign(self._u - 1)) - 1)\n\n\n@test_util.test_all_tf_execution_regimes\nclass JeffreysTest(test_util.TestCase):\n\n def setUp(self):\n super(JeffreysTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.jeffreys(0.)), 0.)\n\n def test_symmetric(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.jeffreys(self._logu)),\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.jeffreys)))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.jeffreys(self._logu)),\n 0.5 * (self._u * self._logu - self._logu))\n\n\n@test_util.test_all_tf_execution_regimes\nclass ChiSquareTest(test_util.TestCase):\n\n def setUp(self):\n super(ChiSquareTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(self.evaluate(tfp.vi.chi_square(0.)), 0.)\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.chi_square(self._logu)),\n self._u**2 - 1)\n\n\n@test_util.test_all_tf_execution_regimes\nclass ModifiedGanTest(test_util.TestCase):\n\n def setUp(self):\n super(ModifiedGanTest, self).setUp()\n self._logu = np.linspace(-10., 10, 100)\n self._u = np.exp(self._logu)\n\n def test_at_zero(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.modified_gan(0.)), np.log(2))\n self.assertAllClose(\n self.evaluate(\n tfp.vi.modified_gan(0., self_normalized=True)), np.log(2))\n\n def test_correct(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.modified_gan(self._logu)),\n np.log1p(self._u) - self._logu)\n\n self.assertAllClose(\n self.evaluate(tfp.vi.modified_gan(self._logu, self_normalized=True)),\n np.log1p(self._u) - self._logu + 0.5 * (self._u - 1))\n\n\n@test_util.test_all_tf_execution_regimes\nclass SymmetrizedCsiszarFunctionTest(test_util.TestCase):\n\n def setUp(self):\n super(SymmetrizedCsiszarFunctionTest, self).setUp()\n self._logu = np.linspace(-10., 10., 100)\n self._u = np.exp(self._logu)\n\n def test_jensen_shannon(self):\n # The following functions come from the claim made in the\n # symmetrized_csiszar_function docstring.\n def js1(logu):\n return (-logu\n - (1. + tf.exp(logu)) * (\n tf.nn.softplus(logu)))\n\n def js2(logu):\n return 2. * (tf.exp(logu) * (\n logu - tf.nn.softplus(logu)))\n\n self.assertAllClose(\n self.evaluate(tfp.vi.symmetrized_csiszar_function(self._logu, js1)),\n self.evaluate(tfp.vi.jensen_shannon(self._logu)))\n\n self.assertAllClose(\n self.evaluate(tfp.vi.symmetrized_csiszar_function(self._logu, js2)),\n self.evaluate(tfp.vi.jensen_shannon(self._logu)))\n\n def test_jeffreys(self):\n self.assertAllClose(\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.kl_reverse)),\n self.evaluate(tfp.vi.jeffreys(self._logu)))\n\n self.assertAllClose(\n self.evaluate(tfp.vi.symmetrized_csiszar_function(\n self._logu, tfp.vi.kl_forward)),\n self.evaluate(tfp.vi.jeffreys(self._logu)))\n\n\n@test_util.test_all_tf_execution_regimes\nclass DualCsiszarFunctionTest(test_util.TestCase):\n\n def setUp(self):\n super(DualCsiszarFunctionTest, self).setUp()\n self._logu = np.linspace(-10., 10., 100)\n self._u = np.exp(self._logu)\n\n def test_kl_forward(self):\n self.assertAllClose(\n self.evaluate(\n tfp.vi.dual_csiszar_function(self._logu, tfp.vi.kl_forward)),\n self.evaluate(tfp.vi.kl_reverse(self._logu)))\n\n def test_kl_reverse(self):\n self.assertAllClose(\n self.evaluate(\n tfp.vi.dual_csiszar_function(self._logu, tfp.vi.kl_reverse)),\n self.evaluate(tfp.vi.kl_forward(self._logu)))\n\n\n@test_util.test_all_tf_execution_regimes\nclass MonteCarloVariationalLossTest(test_util.TestCase):\n\n def test_kl_forward(self):\n q = tfd.Normal(\n loc=np.ones(6),\n scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))\n\n p = tfd.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)\n\n seed = test_util.test_seed()\n\n approx_kl = tfp.vi.monte_carlo_variational_loss(\n discrepancy_fn=tfp.vi.kl_forward,\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n sample_size=int(4e5),\n seed=seed)\n\n approx_kl_self_normalized = tfp.vi.monte_carlo_variational_loss(\n discrepancy_fn=(\n lambda logu: tfp.vi.kl_forward(logu, self_normalized=True)),\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n sample_size=int(4e5),\n seed=seed)\n\n exact_kl = tfd.kl_divergence(p, q)\n\n [approx_kl_, approx_kl_self_normalized_, exact_kl_] = self.evaluate([\n approx_kl, approx_kl_self_normalized, exact_kl])\n\n self.assertAllClose(approx_kl_, exact_kl_,\n rtol=0.10, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_, exact_kl_,\n rtol=0.06, atol=0.)\n\n def test_kl_reverse(self):\n q = tfd.Normal(\n loc=np.ones(6),\n scale=np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0]))\n\n p = tfd.Normal(loc=q.loc + 0.1, scale=q.scale - 0.2)\n\n seed = test_util.test_seed()\n\n approx_kl = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(4.5e5),\n seed=seed)\n\n approx_kl_self_normalized = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=(\n lambda logu: tfp.vi.kl_reverse(logu, self_normalized=True)),\n sample_size=int(4.5e5),\n seed=seed)\n\n exact_kl = tfd.kl_divergence(q, p)\n\n [approx_kl_, approx_kl_self_normalized_, exact_kl_] = self.evaluate([\n approx_kl, approx_kl_self_normalized, exact_kl])\n\n self.assertAllClose(approx_kl_, exact_kl_,\n rtol=0.13, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_, exact_kl_,\n rtol=0.07, atol=0.)\n\n def test_kl_forward_multidim(self):\n d = 5 # Dimension\n\n p = tfd.MultivariateNormalFullCovariance(\n covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))\n\n # Variance is very high when approximating Forward KL, so we make\n # scale_diag large. This ensures q\n # \"covers\" p and thus Var_q[p/q] is smaller.\n q = tfd.MultivariateNormalDiag(scale_diag=[1.]*d)\n\n seed = test_util.test_seed()\n\n approx_kl = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=tfp.vi.kl_forward,\n sample_size=int(6e5),\n seed=seed)\n\n approx_kl_self_normalized = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=(\n lambda logu: tfp.vi.kl_forward(logu, self_normalized=True)),\n sample_size=int(6e5),\n seed=seed)\n\n exact_kl = tfd.kl_divergence(p, q)\n\n [approx_kl_, approx_kl_self_normalized_, exact_kl_] = self.evaluate([\n approx_kl, approx_kl_self_normalized, exact_kl])\n\n self.assertAllClose(approx_kl_, exact_kl_,\n rtol=0.14, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_, exact_kl_,\n rtol=0.14, atol=0.)\n\n def test_kl_reverse_multidim(self):\n d = 5 # Dimension\n\n p = tfd.MultivariateNormalFullCovariance(\n covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))\n\n # Variance is very high when approximating Reverse KL with self\n # normalization, because we pick up a term E_q[p / q]. So we make\n # scale_diag large. This ensures q \"covers\" p and thus Var_q[p/q] is\n # smaller.\n q = tfd.MultivariateNormalDiag(scale_diag=[1.]*d)\n\n seed = test_util.test_seed()\n\n approx_kl = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(6e5),\n seed=seed)\n\n approx_kl_self_normalized = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=(\n lambda logu: tfp.vi.kl_reverse(logu, self_normalized=True)),\n sample_size=int(6e5),\n seed=seed)\n\n exact_kl = tfd.kl_divergence(q, p)\n\n [approx_kl_, approx_kl_self_normalized_, exact_kl_] = self.evaluate([\n approx_kl, approx_kl_self_normalized, exact_kl])\n\n self.assertAllClose(approx_kl_, exact_kl_,\n rtol=0.02, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_, exact_kl_,\n rtol=0.14, atol=0.)\n\n def test_kl_with_joint_q(self):\n\n # Target distribution: equiv to MVNFullCovariance(cov=[[1., 1.], [1., 2.]])\n def target_log_prob_fn(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n\n # Factored q distribution: equiv to MVNDiag(scale_diag=[1., sqrt(2)])\n q_sequential = tfd.JointDistributionSequential([ # Should pass as *args.\n tfd.Normal(0., 1.),\n tfd.Normal(0., tf.sqrt(2.))\n ])\n q_named = tfd.JointDistributionNamed({ # Should pass as **kwargs.\n 'x': tfd.Normal(0., tf.sqrt(2.)),\n 'z': tfd.Normal(0., 1.)\n })\n\n seed = test_util.test_seed()\n\n reverse_kl_sequential = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=q_sequential,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(3e5),\n seed=seed)\n\n reverse_kl_named = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=q_named,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(3e5),\n seed=seed)\n\n reverse_kl_sequential_, reverse_kl_named_, = self.evaluate(\n [reverse_kl_sequential, reverse_kl_named])\n\n # Compare to analytic MVN.kl[q|p] == 0.6534264.\n self.assertAllClose(reverse_kl_sequential_, 0.6534264, rtol=0.07, atol=0.)\n self.assertAllClose(reverse_kl_named_, 0.6534264, rtol=0.07, atol=0.)\n\n def test_importance_weighted_objective(self):\n seed = test_util.test_seed(sampler_type='stateless')\n\n # Use a normalized target, so the true normalizing constant (lowest possible\n # loss) is zero.\n target = tfd.Normal(loc=0., scale=1.)\n proposal = tfd.StudentT(2, loc=3., scale=2.)\n\n elbo_loss = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=target.log_prob,\n surrogate_posterior=proposal,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(3e4),\n importance_sample_size=1,\n seed=seed)\n self.assertAllGreater(elbo_loss, 0.)\n\n # Check that importance sampling reduces the loss towards zero.\n iwae_10_loss = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=target.log_prob,\n surrogate_posterior=proposal,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(3e4),\n importance_sample_size=10,\n seed=seed)\n self.assertAllGreater(elbo_loss, iwae_10_loss)\n self.assertAllGreater(iwae_10_loss, 0)\n\n iwae_100_loss = tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=target.log_prob,\n surrogate_posterior=proposal,\n discrepancy_fn=tfp.vi.kl_reverse,\n sample_size=int(3e4),\n importance_sample_size=100,\n seed=seed)\n self.assertAllGreater(iwae_10_loss, iwae_100_loss)\n self.assertAllClose(iwae_100_loss, 0, atol=0.1)\n\n def test_score_trick(self):\n d = 5 # Dimension\n sample_size = int(4.5e5)\n seed = test_util.test_seed()\n\n # Variance is very high when approximating Forward KL, so we make\n # scale_diag large. This ensures q \"covers\" p and thus Var_q[p/q] is\n # smaller.\n s = tf.constant(1.)\n\n def construct_monte_carlo_csiszar_f_divergence(\n func, use_reparameterization=True):\n def _fn(s):\n p = tfd.MultivariateNormalFullCovariance(\n covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))\n q = tfd.MultivariateNormalDiag(scale_diag=tf.tile([s], [d]))\n return tfp.vi.monte_carlo_variational_loss(\n target_log_prob_fn=p.log_prob,\n surrogate_posterior=q,\n discrepancy_fn=func,\n sample_size=sample_size,\n use_reparameterization=use_reparameterization,\n seed=seed)\n return _fn\n\n approx_kl = construct_monte_carlo_csiszar_f_divergence(\n tfp.vi.kl_reverse)\n\n approx_kl_self_normalized = construct_monte_carlo_csiszar_f_divergence(\n lambda logu: tfp.vi.kl_reverse(logu, self_normalized=True))\n\n approx_kl_score_trick = construct_monte_carlo_csiszar_f_divergence(\n tfp.vi.kl_reverse, use_reparameterization=False)\n\n approx_kl_self_normalized_score_trick = (\n construct_monte_carlo_csiszar_f_divergence(\n lambda logu: tfp.vi.kl_reverse(logu, self_normalized=True),\n use_reparameterization=False))\n\n def exact_kl(s):\n p = tfd.MultivariateNormalFullCovariance(\n covariance_matrix=tridiag(d, diag_value=1, offdiag_value=0.5))\n q = tfd.MultivariateNormalDiag(scale_diag=tf.tile([s], [d]))\n return tfd.kl_divergence(q, p)\n\n [\n approx_kl_,\n approx_kl_grad_,\n approx_kl_self_normalized_,\n approx_kl_self_normalized_grad_,\n approx_kl_score_trick_,\n approx_kl_score_trick_grad_,\n approx_kl_self_normalized_score_trick_,\n approx_kl_self_normalized_score_trick_grad_,\n exact_kl_,\n exact_kl_grad_,\n ] = self.evaluate(\n list(tfp.math.value_and_gradient(approx_kl, s)) +\n list(tfp.math.value_and_gradient(approx_kl_self_normalized, s)) +\n list(tfp.math.value_and_gradient(approx_kl_score_trick, s)) +\n list(tfp.math.value_and_gradient(\n approx_kl_self_normalized_score_trick, s)) +\n list(tfp.math.value_and_gradient(exact_kl, s)))\n\n # Test average divergence.\n self.assertAllClose(approx_kl_, exact_kl_,\n rtol=0.04, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_, exact_kl_,\n rtol=0.08, atol=0.)\n\n self.assertAllClose(approx_kl_score_trick_, exact_kl_,\n rtol=0.04, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_score_trick_, exact_kl_,\n rtol=0.08, atol=0.)\n\n # Test average gradient-divergence.\n self.assertAllClose(approx_kl_grad_, exact_kl_grad_,\n rtol=0.04, atol=0.)\n\n self.assertAllClose(approx_kl_self_normalized_grad_, exact_kl_grad_,\n rtol=0.04, atol=0.)\n\n self.assertAllClose(approx_kl_score_trick_grad_, exact_kl_grad_,\n rtol=0.05, atol=0.)\n\n self.assertAllClose(\n approx_kl_self_normalized_score_trick_grad_, exact_kl_grad_,\n rtol=0.04, atol=0.)\n\n\n@test_util.test_all_tf_execution_regimes\nclass CsiszarVIMCOTest(test_util.TestCase):\n\n def _csiszar_vimco_helper(self, logu):\n \"\"\"Numpy implementation of `csiszar_vimco_helper`.\"\"\"\n\n # Since this is a naive/intuitive implementation, we compensate by using the\n # highest precision we can.\n logu = np.float128(logu)\n n = logu.shape[0]\n u = np.exp(logu)\n loogeoavg_u = [] # Leave-one-out geometric-average of exp(logu).\n for j in range(n):\n loogeoavg_u.append(np.exp(np.mean(\n [logu[i, ...] for i in range(n) if i != j],\n axis=0)))\n loogeoavg_u = np.array(loogeoavg_u)\n\n loosum_u = [] # Leave-one-out sum of exp(logu).\n for j in range(n):\n loosum_u.append(np.sum(\n [u[i, ...] for i in range(n) if i != j],\n axis=0))\n loosum_u = np.array(loosum_u)\n\n # Natural log of the average u except each is swapped-out for its\n # leave-`i`-th-out Geometric average.\n log_sooavg_u = np.log(loosum_u + loogeoavg_u) - np.log(n)\n\n log_avg_u = np.log(np.mean(u, axis=0))\n return log_avg_u, log_sooavg_u\n\n def test_vimco_and_gradient(self):\n dims = 5 # Dimension\n num_draws = int(1e3)\n num_batch_draws = int(3)\n seed = test_util.test_seed()\n\n with tf.GradientTape(persistent=True) as tape:\n f = lambda logu: tfp.vi.kl_reverse(logu, self_normalized=False)\n np_f = lambda logu: -logu\n\n s = tf.constant(1.)\n tape.watch(s)\n p = tfd.MultivariateNormalFullCovariance(\n covariance_matrix=tridiag(dims, diag_value=1, offdiag_value=0.5))\n\n # Variance is very high when approximating Forward KL, so we make\n # scale_diag large. This ensures q \"covers\" p and thus Var_q[p/q] is\n # smaller.\n q = tfd.MultivariateNormalDiag(\n scale_diag=tf.tile([s], [dims]))\n\n vimco = tfp.vi.csiszar_vimco(\n f=f,\n p_log_prob=p.log_prob,\n q=q,\n num_draws=num_draws,\n num_batch_draws=num_batch_draws,\n seed=seed)\n\n # We want the seed to be the same since we will use computations\n # with the same underlying sample to show correctness of vimco.\n if tf.executing_eagerly():\n tf.random.set_seed(seed)\n x = q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed)\n x = tf.stop_gradient(x)\n logu = p.log_prob(x) - q.log_prob(x)\n f_log_sum_u = f(tfp.stats.log_soomean_exp(logu, axis=0)[::-1][0])\n q_log_prob_x = q.log_prob(x)\n\n grad_vimco = tape.gradient(vimco, s)\n grad_mean_f_log_sum_u = tape.gradient(f_log_sum_u, s) / num_batch_draws\n jacobian_logqx = tape.jacobian(q_log_prob_x, s)\n\n [\n logu_,\n jacobian_logqx_,\n vimco_,\n grad_vimco_,\n f_log_sum_u_,\n grad_mean_f_log_sum_u_,\n ] = self.evaluate([\n logu,\n jacobian_logqx,\n vimco,\n grad_vimco,\n f_log_sum_u,\n grad_mean_f_log_sum_u,\n ])\n\n np_log_avg_u, np_log_sooavg_u = self._csiszar_vimco_helper(logu_)\n\n # Test VIMCO loss is correct.\n self.assertAllClose(np_f(np_log_avg_u).mean(axis=0), vimco_,\n rtol=1e-4, atol=1e-5)\n\n # Test gradient of VIMCO loss is correct.\n #\n # To make this computation we'll inject two gradients from TF:\n # - grad[mean(f(log(sum(p(x)/q(x)))))]\n # - jacobian[log(q(x))].\n #\n # We now justify why using these (and only these) TF values for\n # ground-truth does not undermine the completeness of this test.\n #\n # Regarding `grad_mean_f_log_sum_u_`, note that we validate the\n # correctness of the zero-th order derivative (for each batch member).\n # Since `tfp.vi.csiszar_vimco_helper` itself does not manipulate any\n # gradient information, we can safely rely on TF.\n self.assertAllClose(np_f(np_log_avg_u), f_log_sum_u_, rtol=1e-4, atol=1e-5)\n #\n # Regarding `jacobian_logqx_`, note that testing the gradient of\n # `q.log_prob` is outside the scope of this unit-test thus we may safely\n # use TF to find it.\n\n # The `mean` is across batches and the `sum` is across iid samples.\n np_grad_vimco = (\n grad_mean_f_log_sum_u_\n + np.mean(\n np.sum(\n jacobian_logqx_ * (np_f(np_log_avg_u)\n - np_f(np_log_sooavg_u)),\n axis=0),\n axis=0))\n\n self.assertAllClose(np_grad_vimco, grad_vimco_, rtol=0.03, atol=1e-3)\n\n def test_vimco_with_joint_q(self):\n\n # Target distribution: equiv to MVNFullCovariance(cov=[[1., 1.], [1., 2.]])\n def p_log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n\n # Factored q distribution: equiv to MVNDiag(scale_diag=[1., sqrt(2)])\n q_sequential = tfd.JointDistributionSequential([ # Should pass as *args.\n tfd.Normal(0., 1.),\n tfd.Normal(0., tf.sqrt(2.))\n ])\n q_named = tfd.JointDistributionNamed({ # Should pass as **kwargs.\n 'x': tfd.Normal(0., tf.sqrt(2.)),\n 'z': tfd.Normal(0., 1.)\n })\n\n seed = test_util.test_seed()\n\n reverse_kl_sequential = tfp.vi.csiszar_vimco(\n f=tfp.vi.kl_reverse,\n p_log_prob=p_log_prob,\n q=q_sequential,\n num_draws=int(3e5),\n seed=seed)\n\n reverse_kl_named = tfp.vi.csiszar_vimco(\n f=tfp.vi.kl_reverse,\n p_log_prob=p_log_prob,\n q=q_named,\n num_draws=int(3e5),\n seed=seed)\n\n [reverse_kl_sequential_, reverse_kl_named_\n ] = self.evaluate([reverse_kl_sequential, reverse_kl_named])\n\n self.assertAllClose(reverse_kl_sequential_, reverse_kl_named_, atol=0.02)\n\n\nif __name__ == '__main__':\n test_util.main()\n" ]
[ [ "tensorflow.compat.v2.nn.softplus", "tensorflow.compat.v2.fill", "numpy.exp", "numpy.mean", "numpy.sign", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.sqrt", "tensorflow.compat.v2.tile", "numpy.log", "tensorflow.compat.v2.exp", "numpy.log1p", "numpy.float128", "numpy.sqrt", "numpy.square", "numpy.array", "numpy.float64", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.random.set_seed", "tensorflow.compat.v2.eye", "numpy.ones", "tensorflow.compat.v2.stop_gradient", "numpy.abs", "numpy.linspace", "tensorflow.compat.v2.constant" ] ]
MattiasFredriksson/py-c3d
[ "36c377763edcfa348fb6e272a8455a69d63ef225" ]
[ "c3d/group.py" ]
[ "''' Classes used to represent the concept of parameter groups in a .c3d file.\n'''\nimport struct\nimport numpy as np\nfrom .parameter import ParamData, Param\nfrom .utils import Decorator\n\n\nclass GroupData(object):\n '''A group of parameters stored in a C3D file.\n\n In C3D files, parameters are organized in groups. Each group has a name (key), a\n description, and a set of named parameters. Each group is also internally associated\n with a numeric key.\n\n Attributes\n ----------\n dtypes : `c3d.dtypes.DataTypes`\n Data types object used for parsing.\n name : str\n Name of this parameter group.\n desc : str\n Description for this parameter group.\n '''\n\n def __init__(self, dtypes, name=None, desc=None):\n self._params = {}\n self._dtypes = dtypes\n # Assign through property setters\n self.set_name(name)\n self.set_desc(desc)\n\n def __repr__(self):\n return '<Group: {}>'.format(self.desc)\n\n def __contains__(self, key):\n return key in self._params\n\n def __getitem__(self, key):\n return self._params[key]\n\n @property\n def binary_size(self) -> int:\n '''Return the number of bytes to store this group and its parameters.'''\n return (\n 1 + # group_id\n 1 + len(self.name.encode('utf-8')) + # size of name and name bytes\n 2 + # next offset marker\n 1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes\n sum(p.binary_size for p in self._params.values()))\n\n def set_name(self, name):\n ''' Set the group name string. '''\n if name is None or isinstance(name, str):\n self.name = name\n else:\n raise TypeError('Expected group name to be string, was %s.' % type(name))\n\n def set_desc(self, desc):\n ''' Set the Group descriptor.\n '''\n if isinstance(desc, bytes):\n self.desc = self._dtypes.decode_string(desc)\n elif isinstance(desc, str) or desc is None:\n self.desc = desc\n else:\n raise TypeError('Expected descriptor to be python string, bytes or None, was %s.' % type(desc))\n\n def add_param(self, name, **kwargs):\n '''Add a parameter to this group.\n\n Parameters\n ----------\n name : str\n Name of the parameter to add to this group. The name will\n automatically be case-normalized.\n\n See constructor of `c3d.parameter.ParamData` for additional keyword arguments.\n\n Raises\n ------\n TypeError\n Input arguments are of the wrong type.\n KeyError\n Name or numerical key already exist (attempt to overwrite existing data).\n '''\n if not isinstance(name, str):\n raise TypeError(\"Expected 'name' argument to be a string, was of type {}\".format(type(name)))\n name = name.upper()\n if name in self._params:\n raise KeyError('Parameter already exists with key {}'.format(name))\n self._params[name] = Param(ParamData(name, self._dtypes, **kwargs))\n\n def remove_param(self, name):\n '''Remove the specified parameter.\n\n Parameters\n ----------\n name : str\n Name for the parameter to remove.\n '''\n del self._params[name]\n\n def rename_param(self, name, new_name):\n ''' Rename a specified parameter group.\n\n Parameters\n ----------\n name : str, or `c3d.group.GroupReadonly`\n Parameter instance, or name.\n new_name : str\n New name for the parameter.\n Raises\n ------\n KeyError\n If no parameter with the original name exists.\n ValueError\n If the new name already exist (attempt to overwrite existing data).\n '''\n if new_name in self._params:\n raise ValueError(\"Key {} already exist.\".format(new_name))\n if isinstance(name, Param):\n param = name\n name = param.name\n else:\n # Aquire instance using id\n param = self._params[name]\n del self._params[name]\n self._params[new_name] = param\n\n def write(self, group_id, handle):\n '''Write this parameter group, with parameters, to a file handle.\n\n Parameters\n ----------\n group_id : int\n The numerical ID of the group.\n handle : file handle\n An open, writable, binary file handle.\n '''\n name = self.name.encode('utf-8')\n desc = self.desc.encode('utf-8')\n handle.write(struct.pack('bb', len(name), -group_id))\n handle.write(name)\n handle.write(struct.pack('<h', 3 + len(desc)))\n handle.write(struct.pack('B', len(desc)))\n handle.write(desc)\n for param in self._params.values():\n param._data.write(group_id, handle)\n\n\nclass GroupReadonly(object):\n ''' Wrapper exposing readonly attributes of a `c3d.group.GroupData` entry.\n '''\n def __init__(self, data):\n self._data = data\n\n def __contains__(self, key):\n return key in self._data._params\n\n def __eq__(self, other):\n return self._data is other._data\n\n @property\n def name(self) -> str:\n ''' Access group name. '''\n return self._data.name\n\n @property\n def desc(self) -> str:\n '''Access group descriptor. '''\n return self._data.desc\n\n def items(self):\n ''' Get iterator for paramater key-entry pairs. '''\n return ((k, v.readonly()) for k, v in self._data._params.items())\n\n def values(self):\n ''' Get iterator for parameter entries. '''\n return (v.readonly() for v in self._data._params.values())\n\n def keys(self):\n ''' Get iterator for parameter entry keys. '''\n return self._data._params.keys()\n\n def get(self, key, default=None):\n '''Get a readonly parameter by key.\n\n Parameters\n ----------\n key : any\n Parameter key to look up in this group.\n default : any, optional\n Value to return if the key is not found. Defaults to None.\n\n Returns\n -------\n param : :class:`ParamReadable`\n A parameter from the current group.\n '''\n val = self._data._params.get(key, default)\n if val:\n return val.readonly()\n return default\n\n def get_int8(self, key):\n '''Get the value of the given parameter as an 8-bit signed integer.'''\n return self._data[key.upper()].int8_value\n\n def get_uint8(self, key):\n '''Get the value of the given parameter as an 8-bit unsigned integer.'''\n return self._data[key.upper()].uint8_value\n\n def get_int16(self, key):\n '''Get the value of the given parameter as a 16-bit signed integer.'''\n return self._data[key.upper()].int16_value\n\n def get_uint16(self, key):\n '''Get the value of the given parameter as a 16-bit unsigned integer.'''\n return self._data[key.upper()].uint16_value\n\n def get_int32(self, key):\n '''Get the value of the given parameter as a 32-bit signed integer.'''\n return self._data[key.upper()].int32_value\n\n def get_uint32(self, key):\n '''Get the value of the given parameter as a 32-bit unsigned integer.'''\n return self._data[key.upper()].uint32_value\n\n def get_float(self, key):\n '''Get the value of the given parameter as a 32-bit float.'''\n return self._data[key.upper()].float_value\n\n def get_bytes(self, key):\n '''Get the value of the given parameter as a byte array.'''\n return self._data[key.upper()].bytes_value\n\n def get_string(self, key):\n '''Get the value of the given parameter as a string.'''\n return self._data[key.upper()].string_value\n\n\nclass Group(GroupReadonly):\n ''' Wrapper exposing readable and writeable attributes of a `c3d.group.GroupData` entry.\n '''\n def __init__(self, data):\n super(Group, self).__init__(data)\n\n def readonly(self):\n ''' Returns a `c3d.group.GroupReadonly` instance with readonly access. '''\n return GroupReadonly(self._data)\n\n @property\n def name(self) -> str:\n ''' Get or set name. '''\n return self._data.name\n\n @name.setter\n def name(self, value) -> str:\n self._data.set_name(value)\n\n @property\n def desc(self) -> str:\n ''' Get or set descriptor. '''\n return self._data.desc\n\n @desc.setter\n def desc(self, value) -> str:\n self._data.set_desc(value)\n\n def items(self):\n ''' Iterator for paramater key-entry pairs. '''\n return ((k, v) for k, v in self._data._params.items())\n\n def values(self):\n ''' Iterator iterator for parameter entries. '''\n return (v for v in self._data._params.values())\n\n def get(self, key, default=None):\n '''Get a parameter by key.\n\n Parameters\n ----------\n key : any\n Parameter key to look up in this group.\n default : any, optional\n Value to return if the key is not found. Defaults to None.\n\n Returns\n -------\n param : :class:`ParamReadable`\n A parameter from the current group.\n '''\n return self._data._params.get(key, default)\n\n #\n # Forward param editing\n #\n def add_param(self, name, **kwargs):\n '''Add a parameter to this group.\n\n See constructor of `c3d.parameter.ParamData` for additional keyword arguments.\n '''\n self._data.add_param(name, **kwargs)\n\n def remove_param(self, name):\n '''Remove the specified parameter.\n\n Parameters\n ----------\n name : str\n Name for the parameter to remove.\n '''\n self._data.remove_param(name)\n\n def rename_param(self, name, new_name):\n ''' Rename a specified parameter group.\n\n Parameters\n ----------\n See arguments in `c3d.group.GroupData.rename_param`.\n '''\n self._data.rename_param(name, new_name)\n\n #\n # Convenience functions for adding parameters.\n #\n def add(self, name, desc, bpe, format, data, *dimensions):\n ''' Add a parameter with `data` package formated in accordance with `format`.\n\n Convenience function for `c3d.group.GroupData.add_param` calling struct.pack() on `data`.\n\n Example:\n\n >>> group.set('RATE', 'Point data sample rate', 4, '<f', 100)\n\n Parameters\n ----------\n name : str\n Parameter name.\n desc : str\n Parameter descriptor.\n bpe : int\n Number of bytes for each atomic element.\n format : str or None\n `struct.format()` compatible format string see:\n https://docs.python.org/3/library/struct.html#format-characters\n *dimensions : int, optional\n Shape associated with the data (if the data argument represents multiple elements).\n '''\n if isinstance(data, bytes):\n pass\n else:\n data = struct.pack(format, data)\n\n self.add_param(name,\n desc=desc,\n bytes_per_element=bpe,\n bytes=data,\n dimensions=list(dimensions))\n\n def add_array(self, name, desc, data, dtype=None):\n '''Add a parameter with the `data` package.\n\n Parameters\n ----------\n name : str\n Parameter name.\n desc : str\n Parameter descriptor.\n data : np.ndarray, or iterable\n Data array to encode in the parameter.\n dtype : np.dtype, optional\n Numpy data type used to encode the array (optional only if `data.dtype` returns a numpy type).\n '''\n if not isinstance(data, np.ndarray):\n if dtype is None:\n dtype = data.dtype\n data = np.array(data, dtype=dtype)\n elif dtype is None:\n dtype = data.dtype\n\n self.add_param(name,\n desc=desc,\n bytes_per_element=dtype.itemsize,\n bytes=data.tobytes(),\n dimensions=data.shape[::-1])\n\n def add_str(self, name, desc, data, *dimensions):\n ''' Add a string parameter.\n\n Parameters\n ----------\n name : str\n Parameter name.\n desc : str\n Parameter descriptor.\n data : str\n String to encode in the parameter.\n *dimensions : int, optional\n Shape associated with the string (if the string represents multiple elements).\n '''\n shape = list(dimensions)\n self.add_param(name,\n desc=desc,\n bytes_per_element=-1,\n bytes=data.encode('utf-8'),\n dimensions=shape or [len(data)])\n\n def add_empty_array(self, name, desc=''):\n ''' Add an empty parameter block.\n\n Parameters\n ----------\n name : str\n Parameter name.\n '''\n self.add_param(name, desc=desc,\n bytes_per_element=0, dimensions=[0])\n\n #\n # Convenience functions for adding or overwriting parameters.\n #\n def set(self, name, *args, **kwargs):\n ''' Add or overwrite a parameter with 'bytes' package formated in accordance with 'format'.\n\n See arguments in `c3d.group.Group.add`.\n '''\n try:\n self.remove_param(name)\n except KeyError as e:\n pass\n self.add(name, *args, **kwargs)\n\n def set_str(self, name, *args, **kwargs):\n ''' Add or overwrite a string parameter.\n\n See arguments in `c3d.group.Group.add_str`.\n '''\n try:\n self.remove_param(name)\n except KeyError as e:\n pass\n self.add_str(name, *args, **kwargs)\n\n def set_array(self, name, *args, **kwargs):\n ''' Add or overwrite a parameter with the `data` package.\n\n See arguments in `c3d.group.Group.add_array`.\n '''\n try:\n self.remove_param(name)\n except KeyError as e:\n pass\n self.add_array(name, *args, **kwargs)\n\n def set_empty_array(self, name, *args, **kwargs):\n ''' Add an empty parameter block.\n\n See arguments in `c3d.group.Group.add_empty_array`.\n '''\n try:\n self.remove_param(name)\n except KeyError as e:\n pass\n self.add_empty_array(name, *args, **kwargs)\n" ]
[ [ "numpy.array" ] ]
Plozano94/skforecast
[ "71b83a45ecde757fb24be58adf9c88d8066a4582" ]
[ "skforecast/model_selection_statsmodels.py" ]
[ "################################################################################\n# skforecast.model_selection #\n# #\n# This work by Joaquín Amat Rodrigo is licensed under a Creative Commons #\n# Attribution 4.0 International License. #\n################################################################################\n# coding=utf-8\n\n\nimport typing\nfrom typing import Union, Dict, List, Tuple\nimport numpy as np\nimport pandas as pd\nimport logging\nimport tqdm\nfrom sklearn.metrics import mean_squared_error \nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_absolute_percentage_error\nfrom sklearn.model_selection import ParameterGrid\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.ar_model import AutoReg\n\nfrom .model_selection import time_series_spliter\n\nlogging.basicConfig(\n format = '%(asctime)-5s %(name)-10s %(levelname)-5s %(message)s', \n level = logging.INFO,\n)\n\ndef backtesting_autoreg_statsmodels(\n y: Union[np.ndarray, pd.Series],\n lags: int, \n initial_train_size: int,\n steps: int,\n metric: str,\n exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,\n verbose: bool=False\n) -> Tuple[np.array, np.array]:\n '''\n \n Backtesting (validation) of `AutoReg` model from statsmodels v0.12. The model is\n trained only once using the `initial_train_size` first observations. In each\n iteration, a number of `steps` predictions are evaluated. This evaluation is\n much faster than cross-validation since the model is trained only once.\n \n Parameters\n ----------\n y : 1D np.ndarray, pd.Series\n Training time series values. \n \n lags: int, list\n The number of lags to include in the model if an integer or the list of\n lag indices to include. For example, [1, 4] will only include lags 1 and\n 4 while lags=4 will include lags 1, 2, 3, and 4.\n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n Metric used to quantify the goodness of fit of the model.\n \n exog : np.ndarray, pd.Series, pd.DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n verbose : bool, default `False`\n Print number of folds used for backtesting.\n\n Returns \n -------\n metric_value: 1D np.ndarray\n Value of the metric.\n \n backtest_predictions: 1D np.ndarray\n Value of predictions.\n '''\n \n\n if metric not in ['mean_squared_error', 'mean_absolute_error',\n 'mean_absolute_percentage_error']:\n raise Exception(\n f\"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and \"\n f\"'mean_absolute_percentage_error'. Got {metric}.\"\n )\n \n backtest_predictions = []\n \n metrics = {\n 'mean_squared_error': mean_squared_error,\n 'mean_absolute_error': mean_absolute_error,\n 'mean_absolute_percentage_error': mean_absolute_percentage_error\n }\n \n metric = metrics[metric]\n \n if isinstance(y, pd.Series):\n y = y.to_numpy(copy=True)\n \n if isinstance(exog, (pd.Series, pd.DataFrame)):\n exog = exog.to_numpy(copy=True)\n \n if exog is None:\n model = AutoReg(endog=y[:initial_train_size], lags=lags).fit()\n else:\n model = AutoReg(\n endog = y[:initial_train_size],\n exog = exog[:initial_train_size],\n lags = lags\n ).fit()\n \n \n folds = (len(y) - initial_train_size) // steps + 1\n remainder = (len(y) - initial_train_size) % steps\n \n if verbose:\n print(f\"Number of observations used for training: {initial_train_size}\")\n print(f\"Number of observations used for testing: {len(y) - initial_train_size}\")\n print(f\" Number of folds: {folds - 1 * (remainder == 0)}\")\n print(f\" Number of steps per fold: {steps}\")\n if remainder != 0:\n print(f\" Last fold only includes {remainder} observations\")\n \n for i in range(folds):\n last_window_end = initial_train_size + i * steps\n last_window_start = (initial_train_size + i * steps) - steps \n last_window = y[last_window_start:last_window_end]\n \n if i == 0:\n if exog is None:\n pred = model.forecast(steps=steps)\n \n else:\n pred = model.forecast(\n steps = steps,\n exog = exog[last_window_end:last_window_end + steps]\n )\n \n elif i < folds - 1:\n # Update internal values stored by AutoReg\n model.model._y = np.vstack((\n model.model._y,\n last_window.reshape(-1,1)\n ))\n \n if exog is None:\n pred = model.forecast(steps=steps)\n \n else:\n pred = model.forecast(\n steps = steps,\n exog = exog[last_window_end:last_window_end + steps]\n )\n \n elif remainder != 0:\n steps = remainder\n # Update internal values stored by AutoReg\n model.model._y = np.vstack((\n model.model._y,\n last_window.reshape(-1,1)\n ))\n \n if exog is None:\n pred = model.forecast(steps=steps)\n \n else:\n pred = model.forecast(\n steps = steps,\n exog = exog[last_window_end:last_window_end + steps]\n )\n else:\n continue\n \n backtest_predictions.append(pred)\n \n backtest_predictions = np.concatenate(backtest_predictions)\n metric_value = metric(\n y_true = y[initial_train_size: initial_train_size + len(backtest_predictions)],\n y_pred = backtest_predictions\n )\n\n return np.array([metric_value]), backtest_predictions\n\n\ndef cv_autoreg_statsmodels(\n y: Union[np.ndarray, pd.Series],\n lags: int, \n initial_train_size: int,\n steps: int,\n metric: str,\n exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,\n allow_incomplete_fold: bool=True,\n verbose: bool=False\n) -> Tuple[np.array, np.array]:\n '''\n \n Cross-validation of `AutoReg` model from statsmodels v0.12. The order of data\n is maintained and the training set increases in each iteration.\n \n Parameters\n ----------\n y : 1D np.ndarray, pd.Series\n Training time series values. \n \n lags: int, list\n The number of lags to include in the model if an integer or the list of\n lag indices to include. For example, [1, 4] will only include lags 1 and\n 4 while lags=4 will include lags 1, 2, 3, and 4.\n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n Metric used to quantify the goodness of fit of the model.\n \n exog : np.ndarray, pd.Series, pd.DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n verbose : bool, default `False`\n Print number of folds used for cross-validation.\n \n Returns \n -------\n cv_metrics: 1D np.ndarray\n Value of the metric for each fold.\n \n cv_predictions: 1D np.ndarray\n Predictions.\n '''\n \n\n if metric not in ['mean_squared_error', 'mean_absolute_error',\n 'mean_absolute_percentage_error']:\n raise Exception(\n f\"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and \"\n f\"'mean_absolute_percentage_error'. Got {metric}.\"\n )\n \n metrics = {\n 'mean_squared_error': mean_squared_error,\n 'mean_absolute_error': mean_absolute_error,\n 'mean_absolute_percentage_error': mean_absolute_percentage_error\n }\n \n metric = metrics[metric]\n \n if isinstance(y, pd.Series):\n y = y.to_numpy(copy=True)\n \n if isinstance(exog, (pd.Series, pd.DataFrame)):\n exog = exog.to_numpy(copy=True)\n \n cv_predictions = []\n cv_metrics = []\n \n splits = time_series_spliter(\n y = y,\n initial_train_size = initial_train_size,\n steps = steps,\n allow_incomplete_fold = allow_incomplete_fold,\n verbose = verbose\n )\n \n for train_index, test_index in splits:\n \n if exog is None:\n model = AutoReg(endog=y[train_index], lags=lags).fit()\n pred = model.forecast(steps=len(test_index))\n \n else:\n model = AutoReg(\n endog = y[train_index],\n exog = exog[train_index],\n lags = lags\n ).fit()\n pred = model.forecast(steps=len(test_index), exog=exog[test_index])\n \n \n metric_value = metric(\n y_true = y[test_index],\n y_pred = pred\n )\n \n cv_metrics.append(metric_value)\n cv_predictions.append(pred)\n \n return np.array(cv_metrics), np.concatenate(cv_predictions)\n\n\ndef backtesting_sarimax_statsmodels(\n y: Union[np.ndarray, pd.Series],\n initial_train_size: int,\n steps: int,\n metric: str,\n order: tuple=(1, 0, 0), \n seasonal_order: tuple=(0, 0, 0, 0),\n trend: str=None,\n alpha: float= 0.05,\n exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,\n sarimax_kwargs: dict={},\n fit_kwargs: dict={'disp':0},\n verbose: bool=False\n) -> Tuple[np.array, np.array]:\n '''\n \n Backtesting (validation) of `SARIMAX` model from statsmodels v0.12. The model\n is trained only once using the `initial_train_size` first observations. In each\n iteration, a number of `steps` predictions are evaluated. This evaluation is\n much faster than cross-validation since the model is trained only once.\n \n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_forecasting.html\n \n Parameters\n ----------\n y : 1D np.ndarray, pd.Series\n Training time series values. \n \n order: tuple \n The (p,d,q) order of the model for the number of AR parameters, differences,\n and MA parameters. d must be an integer indicating the integration order\n of the process, while p and q may either be an integers indicating the AR\n and MA orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is an\n AR(1) model: (1,0,0).\n \n seasonal_order: tuple\n The (P,D,Q,s) order of the seasonal component of the model for the AR parameters,\n differences, MA parameters, and periodicity. D must be an integer\n indicating the integration order of the process, while P and Q may either\n be an integers indicating the AR and MA orders (so that all lags up to\n those orders are included) or else iterables giving specific AR and / or\n MA lags to include. s is an integer giving the periodicity (number of\n periods in season), often it is 4 for quarterly data or 12 for monthly data.\n Default is no seasonal effect.\n \n trend: str {‘n’,’c’,’t’,’ct’}\n Parameter controlling the deterministic trend polynomial A(t). Can be\n specified as a string where ‘c’ indicates a constant (i.e. a degree zero\n component of the trend polynomial), ‘t’ indicates a linear trend with time,\n and ‘ct’ is both. Can also be specified as an iterable defining the non-zero\n polynomial exponents to include, in increasing order. For example, [1,1,0,1]\n denotes a+bt+ct3. Default is to not include a trend component.\n \n alpha: float, default 0.05\n The significance level for the confidence interval. The default alpha = .05 returns a 95% confidence interval.\n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n Metric used to quantify the goodness of fit of the model.\n \n exog : np.ndarray, pd.Series, pd.DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n sarimax_kwargs: dict, default `{}`\n Additional keyword arguments passed to SARIMAX constructor. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX\n \n fit_kwargs: dict, default `{'disp':0}`\n Additional keyword arguments passed to SARIMAX fit. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit\n \n verbose : bool, default `False`\n Print number of folds used for backtesting.\n \n Returns \n -------\n metric_value: np.ndarray shape (1,)\n Value of the metric.\n\n backtest_predictions: 1D np.ndarray\n 2D np.ndarray with predicted value and their estimated interval.\n Column 0 = predictions\n Column 1 = lower bound interval\n Column 2 = upper bound interval.\n '''\n \n\n if metric not in ['mean_squared_error', 'mean_absolute_error',\n 'mean_absolute_percentage_error']:\n raise Exception(\n f\"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and \"\n f\"'mean_absolute_percentage_error'. Got {metric}.\"\n )\n \n backtest_predictions = []\n \n metrics = {\n 'mean_squared_error': mean_squared_error,\n 'mean_absolute_error': mean_absolute_error,\n 'mean_absolute_percentage_error': mean_absolute_percentage_error\n }\n \n metric = metrics[metric]\n \n if isinstance(y, pd.Series):\n y = y.to_numpy(copy=True)\n \n if isinstance(exog, (pd.Series, pd.DataFrame)):\n exog = exog.to_numpy(copy=True)\n \n if exog is None:\n model = SARIMAX(\n endog = y[:initial_train_size],\n order = order,\n seasonal_order = seasonal_order,\n trend = trend,\n **sarimax_kwargs\n ).fit(**fit_kwargs)\n \n else:\n model = SARIMAX(\n endog = y[:initial_train_size],\n exog = exog[:initial_train_size],\n order = order,\n seasonal_order = seasonal_order,\n trend = trend,\n **sarimax_kwargs\n ).fit(**fit_kwargs)\n \n \n folds = (len(y) - initial_train_size) // steps + 1\n remainder = (len(y) - initial_train_size) % steps\n \n if verbose:\n print(f\"Number of observations used for training: {initial_train_size}\")\n print(f\"Number of observations used for testing: {len(y) - initial_train_size}\")\n print(f\" Number of folds: {folds - 1 * (remainder == 0)}\")\n print(f\" Number of steps per fold: {steps}\")\n if remainder != 0:\n print(f\" Last fold only includes {remainder} observations\")\n \n for i in range(folds):\n last_window_end = initial_train_size + i * steps\n last_window_start = (initial_train_size + i * steps) - steps \n last_window_y = y[last_window_start:last_window_end]\n if exog is not None:\n last_window_exog = exog[last_window_start:last_window_end]\n next_window_exog = exog[last_window_end:last_window_end + steps]\n \n if i == 0:\n if exog is None:\n pred = model.get_forecast(steps=steps)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n else:\n pred = model.get_forecast(steps=steps, exog=next_window_exog)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n elif i < folds - 1:\n if exog is None:\n model = model.extend(endog=last_window_y)\n pred = model.get_forecast(steps=steps)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n else:\n model = model.extend(endog=last_window_y, exog=last_window_exog)\n pred = model.get_forecast(steps=steps, exog=next_window_exog)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n elif remainder != 0:\n steps = remainder\n \n if exog is None:\n model = model.extend(exog=last_window_y)\n pred = model.get_forecast(steps=steps)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n else:\n model = model.extend(endog=last_window_y, exog=last_window_exog)\n pred = model.get_forecast(steps=steps, exog=next_window_exog)\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n else:\n continue\n \n backtest_predictions.append(pred)\n \n backtest_predictions = np.concatenate(backtest_predictions)\n metric_value = metric(\n y_true = y[initial_train_size: initial_train_size + len(backtest_predictions)],\n y_pred = backtest_predictions[:, 0]\n )\n\n return np.array([metric_value]), backtest_predictions\n\n\ndef cv_sarimax_statsmodels(\n y: Union[np.ndarray, pd.Series],\n initial_train_size: int,\n steps: int,\n metric: str,\n order: tuple=(1, 0, 0), \n seasonal_order: tuple=(0, 0, 0, 0),\n trend: str=None,\n alpha: float= 0.05,\n exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,\n allow_incomplete_fold: bool=True,\n sarimax_kwargs: dict={},\n fit_kwargs: dict={'disp':0},\n verbose: bool=False\n) -> Tuple[np.array, np.array]:\n\n '''\n \n Cross-validation of `SARIMAX` model from statsmodels v0.12. The order of data\n is maintained and the training set increases in each iteration.\n \n Parameters\n ----------\n y : 1D np.ndarray, pd.Series\n Training time series values. \n \n order: tuple \n The (p,d,q) order of the model for the number of AR parameters, differences,\n and MA parameters. d must be an integer indicating the integration order\n of the process, while p and q may either be an integers indicating the AR\n and MA orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is an\n AR(1) model: (1,0,0).\n \n seasonal_order: tuple\n The (P,D,Q,s) order of the seasonal component of the model for the AR parameters,\n differences, MA parameters, and periodicity. D must be an integer\n indicating the integration order of the process, while P and Q may either\n be an integers indicating the AR and MA orders (so that all lags up to\n those orders are included) or else iterables giving specific AR and / or\n MA lags to include. s is an integer giving the periodicity (number of\n periods in season), often it is 4 for quarterly data or 12 for monthly data.\n Default is no seasonal effect.\n \n trend: str {‘n’,’c’,’t’,’ct’}\n Parameter controlling the deterministic trend polynomial A(t). Can be\n specified as a string where ‘c’ indicates a constant (i.e. a degree zero\n component of the trend polynomial), ‘t’ indicates a linear trend with time,\n and ‘ct’ is both. Can also be specified as an iterable defining the non-zero\n polynomial exponents to include, in increasing order. For example, [1,1,0,1]\n denotes a+bt+ct3. Default is to not include a trend component.\n \n alpha: float, default 0.05\n The significance level for the confidence interval. The default alpha = .05 returns a 95% confidence interval.\n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n Metric used to quantify the goodness of fit of the model.\n \n exog : np.ndarray, pd.Series, pd.DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n sarimax_kwargs: dict, default {}\n Additional keyword arguments passed to SARIMAX initialization. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX\n \n fit_kwargs: dict, default `{'disp':0}`\n Additional keyword arguments passed to SARIMAX fit. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit\n \n verbose : bool, default `False`\n Print number of folds used for cross-validation.\n \n Returns \n -------\n cv_metrics: 1D np.ndarray\n Value of the metric for each partition.\n\n cv_predictions: np.ndarray\n 2D np.ndarray with predicted value and their estimated interval.\n Column 0 = predictions\n Column 1 = lower bound interval\n Column 2 = upper bound interval.\n '''\n \n\n if metric not in ['mean_squared_error', 'mean_absolute_error',\n 'mean_absolute_percentage_error']:\n raise Exception(\n f\"Allowed metrics are: 'mean_squared_error', 'mean_absolute_error' and \"\n f\"'mean_absolute_percentage_error'. Got {metric}.\"\n )\n \n metrics = {\n 'mean_squared_error': mean_squared_error,\n 'mean_absolute_error': mean_absolute_error,\n 'mean_absolute_percentage_error': mean_absolute_percentage_error\n }\n \n metric = metrics[metric]\n \n if isinstance(y, pd.Series):\n y = y.to_numpy(copy=True)\n \n if isinstance(exog, (pd.Series, pd.DataFrame)):\n exog = exog.to_numpy(copy=True)\n \n cv_predictions = []\n cv_metrics = []\n \n splits = time_series_spliter(\n y = y,\n initial_train_size = initial_train_size,\n steps = steps,\n allow_incomplete_fold = allow_incomplete_fold,\n verbose = verbose\n )\n \n for train_index, test_index in splits:\n \n if exog is None:\n model = SARIMAX(\n endog = y[train_index],\n order = order,\n seasonal_order = seasonal_order,\n trend = trend,\n **sarimax_kwargs\n ).fit(**fit_kwargs)\n \n pred = model.get_forecast(steps=len(test_index))\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n else: \n model = SARIMAX(\n endog = y[train_index],\n exog = exog[train_index],\n order = order,\n seasonal_order = seasonal_order,\n trend = trend,\n **sarimax_kwargs\n ).fit(**fit_kwargs)\n \n pred = model.get_forecast(steps=len(test_index), exog=exog[test_index])\n pred = np.column_stack((pred.predicted_mean, pred.conf_int(alpha=alpha)))\n \n \n metric_value = metric(\n y_true = y[test_index],\n y_pred = pred[:, 0]\n )\n \n cv_metrics.append(metric_value)\n cv_predictions.append(pred)\n \n return np.array(cv_metrics), np.concatenate(cv_predictions)\n\n\ndef grid_search_sarimax_statsmodels(\n y: Union[np.ndarray, pd.Series],\n param_grid: dict,\n initial_train_size: int,\n steps: int,\n metric: str,\n exog: Union[np.ndarray, pd.Series, pd.DataFrame]=None,\n method: str='cv',\n allow_incomplete_fold: bool=True,\n sarimax_kwargs: dict={},\n fit_kwargs: dict={'disp':0},\n verbose: bool=False\n) -> pd.DataFrame:\n '''\n\n Exhaustive search over specified parameter values for a `SARIMAX` model from\n statsmodels v0.12. Validation is done using time series cross-validation or\n backtesting.\n \n Parameters\n ----------\n y : 1D np.ndarray, pd.Series\n Training time series values. \n \n param_grid : dict\n Dictionary with parameters names (`str`) as keys and lists of parameter\n settings to try as values. Allowed parameters in the grid are: order,\n seasonal_order and trend.\n \n initial_train_size: int \n Number of samples in the initial train split.\n \n steps : int\n Number of steps to predict.\n \n metric : {'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error'}\n Metric used to quantify the goodness of fit of the model.\n \n exog : np.ndarray, pd.Series, pd.DataFrame, default `None`\n Exogenous variable/s included as predictor/s. Must have the same\n number of observations as `y` and should be aligned so that y[i] is\n regressed on exog[i].\n \n method : {'cv', 'backtesting'}\n Method used to estimate the metric for each parameter combination.\n 'cv' for time series crosvalidation and 'backtesting' for simple\n backtesting. 'backtesting' is much faster since the model is fitted only\n once.\n \n allow_incomplete_fold : bool, default `True`\n The last test set is allowed to be incomplete if it does not reach `steps`\n observations. Otherwise, the latest observations are discarded.\n \n return_best : bool\n Refit the `forecaster` using the best found parameters on the whole data.\n \n sarimax_kwargs: dict, default `{}`\n Additional keyword arguments passed to SARIMAX initialization. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX\n \n fit_kwargs: dict, default `{'disp':0}`\n Additional keyword arguments passed to SARIMAX fit. See more in\n https://www.statsmodels.org/stable/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.fit.html#statsmodels.tsa.statespace.sarimax.SARIMAX.fit\n \n verbose : bool, default `True`\n Print number of folds used for cv or backtesting.\n\n Returns \n -------\n results: pandas.DataFrame\n Metric value estimated for each combination of parameters.\n\n '''\n\n \n if isinstance(y, pd.Series):\n y = y.to_numpy(copy=True)\n \n if isinstance(exog, (pd.Series, pd.DataFrame)):\n exog = exog.to_numpy(copy=True)\n \n \n params_list = []\n metric_list = []\n bic_list = []\n aic_list = []\n \n if 'order' not in param_grid:\n param_grid['order'] = [(1, 0, 0)]\n if 'seasonal_order' not in param_grid:\n param_grid['seasonal_order'] = [(0, 0, 0, 0)]\n if 'trend' not in param_grid:\n param_grid['trend'] = [None]\n\n keys_to_ignore = set(param_grid.keys()) - {'order', 'seasonal_order', 'trend'}\n if keys_to_ignore:\n print(\n f'Only arguments: order, seasonal_order and trend are allowed for grid serach.'\n f' Ignoring {keys_to_ignore}.'\n )\n for key in keys_to_ignore:\n del param_grid[key]\n \n param_grid = list(ParameterGrid(param_grid))\n\n logging.info(\n f\"Number of models compared: {len(param_grid)}\"\n )\n \n \n for params in tqdm.tqdm(param_grid):\n\n if method == 'cv':\n metrics = cv_sarimax_statsmodels(\n y = y,\n exog = exog,\n order = params['order'],\n seasonal_order = params['seasonal_order'],\n trend = params['trend'],\n initial_train_size = initial_train_size,\n steps = steps,\n metric = metric,\n sarimax_kwargs = sarimax_kwargs,\n fit_kwargs = fit_kwargs,\n verbose = verbose\n )[0]\n else:\n metrics = backtesting_sarimax_statsmodels(\n y = y,\n exog = exog,\n order = params['order'],\n seasonal_order = params['seasonal_order'],\n trend = params['trend'],\n initial_train_size = initial_train_size,\n steps = steps,\n metric = metric,\n sarimax_kwargs = sarimax_kwargs,\n fit_kwargs = fit_kwargs,\n verbose = verbose\n )[0]\n\n params_list.append(params)\n metric_list.append(metrics.mean())\n \n model = SARIMAX(\n endog = y,\n exog = exog,\n order = params['order'],\n seasonal_order = params['seasonal_order'],\n trend = params['trend'],\n **sarimax_kwargs\n ).fit(**fit_kwargs)\n \n bic_list.append(model.bic)\n aic_list.append(model.aic)\n \n results = pd.DataFrame({\n 'params': params_list,\n 'metric': metric_list,\n 'bic' : bic_list,\n 'aic' : aic_list\n })\n \n results = results.sort_values(by='metric', ascending=True)\n results = pd.concat([results, results['params'].apply(pd.Series)], axis=1)\n \n return results" ]
[ [ "numpy.concatenate", "pandas.DataFrame", "numpy.array", "sklearn.model_selection.ParameterGrid" ] ]
taku-y/pymc3
[ "70e3ca5e137b67aac0390c7e3979ec16842c4aed" ]
[ "pymc3/distributions/continuous.py" ]
[ "\"\"\"\npymc3.distributions\n\nA collection of common probability distributions for stochastic\nnodes in PyMC.\n\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nimport theano.tensor as tt\nfrom scipy import stats\nimport warnings\n\nfrom pymc3.theanof import floatX\nfrom . import transforms\n\nfrom .dist_math import bound, logpow, gammaln, betaln, std_cdf, i0, i1, alltrue_elemwise\nfrom .distribution import Continuous, draw_values, generate_samples, Bound\n\n__all__ = ['Uniform', 'Flat', 'Normal', 'Beta', 'Exponential', 'Laplace',\n 'StudentT', 'Cauchy', 'HalfCauchy', 'Gamma', 'Weibull',\n 'HalfStudentT', 'StudentTpos', 'Lognormal', 'ChiSquared',\n 'HalfNormal', 'Wald', 'Pareto', 'InverseGamma', 'ExGaussian',\n 'VonMises', 'SkewNormal']\n\n\nclass PositiveContinuous(Continuous):\n \"\"\"Base class for positive continuous distributions\"\"\"\n\n def __init__(self, transform=transforms.log, *args, **kwargs):\n super(PositiveContinuous, self).__init__(\n transform=transform, *args, **kwargs)\n\n\nclass UnitContinuous(Continuous):\n \"\"\"Base class for continuous distributions on [0,1]\"\"\"\n\n def __init__(self, transform=transforms.logodds, *args, **kwargs):\n super(UnitContinuous, self).__init__(\n transform=transform, *args, **kwargs)\n\ndef assert_negative_support(var, label, distname, value=-1e-6):\n # Checks for evidence of positive support for a variable\n if var is None:\n return\n try:\n # Transformed distribution\n support = np.isfinite(var.transformed.distribution.dist\n .logp(value).tag.test_value)\n except AttributeError:\n try:\n # Untransformed distribution\n support = np.isfinite(var.distribution.logp(value).tag.test_value)\n except AttributeError:\n # Otherwise no direct evidence of non-positive support\n support = False\n\n if np.any(support):\n msg = \"The variable specified for {0} has negative support for {1}, \".format(label, distname)\n msg += \"likely making it unsuitable for this parameter.\"\n warnings.warn(msg)\n\n\ndef get_tau_sd(tau=None, sd=None):\n \"\"\"\n Find precision and standard deviation\n\n .. math::\n \\tau = \\frac{1}{\\sigma^2}\n\n Parameters\n ----------\n tau : array-like, optional\n sd : array-like, optional\n\n Results\n -------\n Returns tuple (tau, sd)\n\n Notes\n -----\n If neither tau nor sd is provided, returns (1., 1.)\n \"\"\"\n if tau is None:\n if sd is None:\n sd = 1.\n tau = 1.\n else:\n tau = sd**-2.\n\n else:\n if sd is not None:\n raise ValueError(\"Can't pass both tau and sd\")\n else:\n sd = tau**-.5\n\n # cast tau and sd to float in a way that works for both np.arrays\n # and pure python\n tau = 1. * tau\n sd = 1. * sd\n\n return (floatX(tau), floatX(sd))\n\n\nclass Uniform(Continuous):\n R\"\"\"\n Continuous uniform log-likelihood.\n\n .. math::\n\n f(x \\mid lower, upper) = \\frac{1}{upper-lower}\n\n ======== =====================================\n Support :math:`x \\in [lower, upper]`\n Mean :math:`\\dfrac{lower + upper}{2}`\n Variance :math:`\\dfrac{(upper - lower)^2}{12}`\n ======== =====================================\n\n Parameters\n ----------\n lower : float\n Lower limit.\n upper : float\n Upper limit.\n \"\"\"\n\n def __init__(self, lower=0, upper=1, transform='interval',\n *args, **kwargs):\n if transform == 'interval':\n transform = transforms.interval(lower, upper)\n super(Uniform, self).__init__(transform=transform, *args, **kwargs)\n\n self.lower = lower = tt.as_tensor_variable(lower)\n self.upper = upper = tt.as_tensor_variable(upper)\n self.mean = (upper + lower) / 2.\n self.median = self.mean\n\n\n def random(self, point=None, size=None, repeat=None):\n lower, upper = draw_values([self.lower, self.upper],\n point=point)\n return generate_samples(stats.uniform.rvs, loc=lower,\n scale=upper - lower,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n lower = self.lower\n upper = self.upper\n return bound(-tt.log(upper - lower),\n value >= lower, value <= upper)\n\n\nclass Flat(Continuous):\n \"\"\"\n Uninformative log-likelihood that returns 0 regardless of\n the passed value.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Flat, self).__init__(*args, **kwargs)\n self.median = 0\n\n def random(self, point=None, size=None, repeat=None):\n raise ValueError('Cannot sample from Flat distribution')\n\n def logp(self, value):\n return tt.zeros_like(value)\n\n\nclass Normal(Continuous):\n R\"\"\"\n Univariate normal log-likelihood.\n\n .. math::\n\n f(x \\mid \\mu, \\tau) =\n \\sqrt{\\frac{\\tau}{2\\pi}}\n \\exp\\left\\{ -\\frac{\\tau}{2} (x-\\mu)^2 \\right\\}\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu`\n Variance :math:`\\dfrac{1}{\\tau}` or :math:`\\sigma^2`\n ======== ==========================================\n\n Normal distribution can be parameterized either in terms of precision\n or standard deviation. The link between the two parametrizations is\n given by\n\n .. math::\n\n \\tau = \\dfrac{1}{\\sigma^2}\n\n Parameters\n ----------\n mu : float\n Mean.\n sd : float\n Standard deviation (sd > 0).\n tau : float\n Precision (tau > 0).\n \"\"\"\n\n def __init__(self, mu=0, sd=None, tau=None, **kwargs):\n tau, sd = get_tau_sd(tau=tau, sd=sd)\n self.sd = tt.as_tensor_variable(sd)\n self.tau = tt.as_tensor_variable(tau)\n\n self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)\n self.variance = 1. / self.tau\n\n assert_negative_support(sd, 'sd', 'Normal')\n assert_negative_support(tau, 'tau', 'Normal')\n\n super(Normal, self).__init__(**kwargs)\n\n def random(self, point=None, size=None, repeat=None):\n mu, tau, _ = draw_values([self.mu, self.tau, self.sd],\n point=point)\n return generate_samples(stats.norm.rvs, loc=mu, scale=tau**-0.5,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n sd = self.sd\n tau = self.tau\n mu = self.mu\n\n return bound((-tau * (value - mu)**2 + tt.log(tau / np.pi / 2.)) / 2.,\n sd > 0)\n\n\nclass HalfNormal(PositiveContinuous):\n R\"\"\"\n Half-normal log-likelihood.\n\n .. math::\n\n f(x \\mid \\tau) =\n \\sqrt{\\frac{2\\tau}{\\pi}}\n \\exp\\left\\{ {\\frac{-x^2 \\tau}{2}}\\right\\}\n\n ======== ==========================================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`0`\n Variance :math:`\\dfrac{1}{\\tau}` or :math:`\\sigma^2`\n ======== ==========================================\n\n Parameters\n ----------\n sd : float\n Standard deviation (sd > 0).\n tau : float\n Precision (tau > 0).\n \"\"\"\n\n def __init__(self, sd=None, tau=None, *args, **kwargs):\n super(HalfNormal, self).__init__(*args, **kwargs)\n tau, sd = get_tau_sd(tau=tau, sd=sd)\n\n self.sd = sd = tt.as_tensor_variable(sd)\n self.tau = tau = tt.as_tensor_variable(tau)\n\n self.mean = tt.sqrt(2 / (np.pi * self.tau))\n self.variance = (1. - 2 / np.pi) / self.tau\n\n assert_negative_support(tau, 'tau', 'HalfNormal')\n assert_negative_support(sd, 'sd', 'HalfNormal')\n\n def random(self, point=None, size=None, repeat=None):\n sd = draw_values([self.sd], point=point)\n return generate_samples(stats.halfnorm.rvs, loc=0., scale=sd,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n tau = self.tau\n sd = self.sd\n return bound(-0.5 * tau * value**2 + 0.5 * tt.log(tau * 2. / np.pi),\n value >= 0,\n tau > 0, sd > 0)\n\n\nclass Wald(PositiveContinuous):\n R\"\"\"\n Wald log-likelihood.\n\n .. math::\n\n f(x \\mid \\mu, \\lambda) =\n \\left(\\frac{\\lambda}{2\\pi)}\\right)^{1/2} x^{-3/2}\n \\exp\\left\\{\n -\\frac{\\lambda}{2x}\\left(\\frac{x-\\mu}{\\mu}\\right)^2\n \\right\\}\n\n ======== =============================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\mu`\n Variance :math:`\\dfrac{\\mu^3}{\\lambda}`\n ======== =============================\n\n Wald distribution can be parameterized either in terms of lam or phi.\n The link between the two parametrizations is given by\n\n .. math::\n\n \\phi = \\dfrac{\\lambda}{\\mu}\n\n Parameters\n ----------\n mu : float, optional\n Mean of the distribution (mu > 0).\n lam : float, optional\n Relative precision (lam > 0).\n phi : float, optional\n Alternative shape parameter (phi > 0).\n alpha : float, optional\n Shift/location parameter (alpha >= 0).\n\n Notes\n -----\n To instantiate the distribution specify any of the following\n\n - only mu (in this case lam will be 1)\n - mu and lam\n - mu and phi\n - lam and phi\n\n References\n ----------\n .. [Tweedie1957] Tweedie, M. C. K. (1957).\n Statistical Properties of Inverse Gaussian Distributions I.\n The Annals of Mathematical Statistics, Vol. 28, No. 2, pp. 362-377\n\n .. [Michael1976] Michael, J. R., Schucany, W. R. and Hass, R. W. (1976).\n Generating Random Variates Using Transformations with Multiple Roots.\n The American Statistician, Vol. 30, No. 2, pp. 88-90\n \"\"\"\n\n def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs):\n super(Wald, self).__init__(*args, **kwargs)\n mu, lam, phi = self.get_mu_lam_phi(mu, lam, phi)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.lam = lam = tt.as_tensor_variable(lam)\n self.phi = phi =tt.as_tensor_variable(phi)\n\n self.mean = self.mu + self.alpha\n self.mode = self.mu * (tt.sqrt(1. + (1.5 * self.mu / self.lam)**2)\n - 1.5 * self.mu / self.lam) + self.alpha\n self.variance = (self.mu**3) / self.lam\n\n assert_negative_support(phi, 'phi', 'Wald')\n assert_negative_support(mu, 'mu', 'Wald')\n assert_negative_support(lam, 'lam', 'Wald')\n\n def get_mu_lam_phi(self, mu, lam, phi):\n if mu is None:\n if lam is not None and phi is not None:\n return lam / phi, lam, phi\n else:\n if lam is None:\n if phi is None:\n return mu, 1., 1. / mu\n else:\n return mu, mu * phi, phi\n else:\n if phi is None:\n return mu, lam, lam / mu\n\n raise ValueError('Wald distribution must specify either mu only, '\n 'mu and lam, mu and phi, or lam and phi.')\n\n def _random(self, mu, lam, alpha, size=None):\n v = np.random.normal(size=size)**2\n value = (mu + (mu**2) * v / (2. * lam) - mu / (2. * lam)\n * np.sqrt(4. * mu * lam * v + (mu * v)**2))\n z = np.random.uniform(size=size)\n i = np.floor(z - mu / (mu + value)) * 2 + 1\n value = (value**-i) * (mu**(i + 1))\n return value + alpha\n\n def random(self, point=None, size=None, repeat=None):\n mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha],\n point=point)\n return generate_samples(self._random,\n mu, lam, alpha,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n lam = self.lam\n alpha = self.alpha\n # value *must* be iid. Otherwise this is wrong.\n return bound(logpow(lam / (2. * np.pi), 0.5)\n - logpow(value - alpha, 1.5)\n - (0.5 * lam / (value - alpha)\n * ((value - alpha - mu) / mu)**2),\n # XXX these two are redundant. Please, check.\n value > 0, value - alpha > 0,\n mu > 0, lam > 0, alpha >= 0)\n\n\nclass Beta(UnitContinuous):\n R\"\"\"\n Beta log-likelihood.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{x^{\\alpha - 1} (1 - x)^{\\beta - 1}}{B(\\alpha, \\beta)}\n\n ======== ==============================================================\n Support :math:`x \\in (0, 1)`\n Mean :math:`\\dfrac{\\alpha}{\\alpha + \\beta}`\n Variance :math:`\\dfrac{\\alpha \\beta}{(\\alpha+\\beta)^2(\\alpha+\\beta+1)}`\n ======== ==============================================================\n\n Beta distribution can be parameterized either in terms of alpha and\n beta or mean and standard deviation. The link between the two\n parametrizations is given by\n\n .. math::\n\n \\alpha &= \\mu \\kappa \\\\\n \\beta &= (1 - \\mu) \\kappa\n\n \\text{where } \\kappa = \\frac{\\mu(1-\\mu)}{\\sigma^2} - 1\n\n Parameters\n ----------\n alpha : float\n alpha > 0.\n beta : float\n beta > 0.\n mu : float\n Alternative mean (0 < mu < 1).\n sd : float\n Alternative standard deviation (sd > 0).\n\n Notes\n -----\n Beta distribution is a conjugate prior for the parameter :math:`p` of\n the binomial distribution.\n \"\"\"\n\n def __init__(self, alpha=None, beta=None, mu=None, sd=None,\n *args, **kwargs):\n super(Beta, self).__init__(*args, **kwargs)\n\n alpha, beta = self.get_alpha_beta(alpha, beta, mu, sd)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n\n self.mean = self.alpha / (self.alpha + self.beta)\n self.variance = self.alpha * self.beta / (\n (self.alpha + self.beta)**2 * (self.alpha + self.beta + 1))\n\n assert_negative_support(alpha, 'alpha', 'Beta')\n assert_negative_support(beta, 'beta', 'Beta')\n\n def get_alpha_beta(self, alpha=None, beta=None, mu=None, sd=None):\n if (alpha is not None) and (beta is not None):\n pass\n elif (mu is not None) and (sd is not None):\n kappa = mu * (1 - mu) / sd**2 - 1\n alpha = mu * kappa\n beta = (1 - mu) * kappa\n else:\n raise ValueError('Incompatible parameterization. Either use alpha '\n 'and beta, or mu and sd to specify distribution.')\n\n return alpha, beta\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta = draw_values([self.alpha, self.beta],\n point=point)\n return generate_samples(stats.beta.rvs, alpha, beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n\n return bound(logpow(value, alpha - 1) + logpow(1 - value, beta - 1)\n - betaln(alpha, beta),\n value >= 0, value <= 1,\n alpha > 0, beta > 0)\n\n\nclass Exponential(PositiveContinuous):\n R\"\"\"\n Exponential log-likelihood.\n\n .. math::\n\n f(x \\mid \\lambda) = \\lambda \\exp\\left\\{ -\\lambda x \\right\\}\n\n ======== ============================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\dfrac{1}{\\lambda}`\n Variance :math:`\\dfrac{1}{\\lambda^2}`\n ======== ============================\n\n Parameters\n ----------\n lam : float\n Rate or inverse scale (lam > 0)\n \"\"\"\n\n def __init__(self, lam, *args, **kwargs):\n super(Exponential, self).__init__(*args, **kwargs)\n self.lam = lam = tt.as_tensor_variable(lam)\n self.mean = 1. / self.lam\n self.median = self.mean * tt.log(2)\n self.mode = tt.zeros_like(self.lam)\n\n self.variance = self.lam**-2\n\n assert_negative_support(lam, 'lam', 'Exponential')\n\n def random(self, point=None, size=None, repeat=None):\n lam = draw_values([self.lam], point=point)\n return generate_samples(np.random.exponential, scale=1. / lam,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n lam = self.lam\n return bound(tt.log(lam) - lam * value, value > 0, lam > 0)\n\n\nclass Laplace(Continuous):\n R\"\"\"\n Laplace log-likelihood.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{1}{2b} \\exp \\left\\{ - \\frac{|x - \\mu|}{b} \\right\\}\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu`\n Variance :math:`2 b^2`\n ======== ========================\n\n Parameters\n ----------\n mu : float\n Location parameter.\n b : float\n Scale parameter (b > 0).\n \"\"\"\n\n def __init__(self, mu, b, *args, **kwargs):\n super(Laplace, self).__init__(*args, **kwargs)\n self.b = b = tt.as_tensor_variable(b)\n self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)\n\n self.variance = 2 * self.b**2\n\n assert_negative_support(b, 'b', 'Laplace')\n\n def random(self, point=None, size=None, repeat=None):\n mu, b = draw_values([self.mu, self.b], point=point)\n return generate_samples(np.random.laplace, mu, b,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n b = self.b\n\n return -tt.log(2 * b) - abs(value - mu) / b\n\n\nclass Lognormal(PositiveContinuous):\n R\"\"\"\n Log-normal log-likelihood.\n\n Distribution of any random variable whose logarithm is normally\n distributed. A variable might be modeled as log-normal if it can\n be thought of as the multiplicative product of many small\n independent factors.\n\n .. math::\n\n f(x \\mid \\mu, \\tau) =\n \\frac{1}{x} \\sqrt{\\frac{\\tau}{2\\pi}}\n \\exp\\left\\{ -\\frac{\\tau}{2} (\\ln(x)-\\mu)^2 \\right\\}\n\n ======== =========================================================================\n Support :math:`x \\in (0, 1)`\n Mean :math:`\\exp\\{\\mu + \\frac{1}{2\\tau}\\}`\n Variance :math:\\(\\exp\\{\\frac{1}{\\tau}\\} - 1\\) \\times \\exp\\{2\\mu + \\frac{1}{\\tau}\\}\n ======== =========================================================================\n\n Parameters\n ----------\n mu : float\n Location parameter.\n tau : float\n Scale parameter (tau > 0).\n \"\"\"\n\n def __init__(self, mu=0, sd=None, tau=None, *args, **kwargs):\n super(Lognormal, self).__init__(*args, **kwargs)\n tau, sd = get_tau_sd(tau=tau, sd=sd)\n\n self.mu = mu = tt.as_tensor_variable(mu)\n self.tau = tau = tt.as_tensor_variable(tau)\n self.sd = sd = tt.as_tensor_variable(sd)\n\n self.mean = tt.exp(self.mu + 1. / (2 * self.tau))\n self.median = tt.exp(self.mu)\n self.mode = tt.exp(self.mu - 1. / self.tau)\n self.variance = (tt.exp(1. / self.tau) - 1) * tt.exp(2 * self.mu + 1. / self.tau)\n\n assert_negative_support(tau, 'tau', 'Lognormal')\n assert_negative_support(sd, 'sd', 'Lognormal')\n\n def _random(self, mu, tau, size=None):\n samples = np.random.normal(size=size)\n return np.exp(mu + (tau**-0.5) * samples)\n\n def random(self, point=None, size=None, repeat=None):\n mu, tau = draw_values([self.mu, self.tau], point=point)\n return generate_samples(self._random, mu, tau,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n tau = self.tau\n return bound(-0.5 * tau * (tt.log(value) - mu)**2\n + 0.5 * tt.log(tau / (2. * np.pi))\n - tt.log(value),\n tau > 0)\n\n\nclass StudentT(Continuous):\n R\"\"\"\n Non-central Student's T log-likelihood.\n\n Describes a normal variable whose precision is gamma distributed.\n If only nu parameter is passed, this specifies a standard (central)\n Student's T.\n\n .. math::\n\n f(x|\\mu,\\lambda,\\nu) =\n \\frac{\\Gamma(\\frac{\\nu + 1}{2})}{\\Gamma(\\frac{\\nu}{2})}\n \\left(\\frac{\\lambda}{\\pi\\nu}\\right)^{\\frac{1}{2}}\n \\left[1+\\frac{\\lambda(x-\\mu)^2}{\\nu}\\right]^{-\\frac{\\nu+1}{2}}\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n ======== ========================\n\n Parameters\n ----------\n nu : int\n Degrees of freedom (nu > 0).\n mu : float\n Location parameter.\n lam : float\n Scale parameter (lam > 0).\n \"\"\"\n\n def __init__(self, nu, mu=0, lam=None, sd=None, *args, **kwargs):\n super(StudentT, self).__init__(*args, **kwargs)\n self.nu = nu = tt.as_tensor_variable(nu)\n lam, sd = get_tau_sd(tau=lam, sd=sd)\n self.lam = lam = tt.as_tensor_variable(lam)\n self.sd = sd = tt.as_tensor_variable(sd)\n self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)\n\n self.variance = tt.switch((nu > 2) * 1,\n (1 / self.lam) * (nu / (nu - 2)),\n np.inf)\n\n assert_negative_support(lam, 'lam (sd)', 'StudentT')\n assert_negative_support(nu, 'nu', 'StudentT')\n\n def random(self, point=None, size=None, repeat=None):\n nu, mu, lam = draw_values([self.nu, self.mu, self.lam],\n point=point)\n return generate_samples(stats.t.rvs, nu, loc=mu, scale=lam**-0.5,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n nu = self.nu\n mu = self.mu\n lam = self.lam\n sd = self.sd\n\n return bound(gammaln((nu + 1.0) / 2.0)\n + .5 * tt.log(lam / (nu * np.pi))\n - gammaln(nu / 2.0)\n - (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu)**2 / nu),\n lam > 0, nu > 0, sd > 0)\n\n\nclass Pareto(PositiveContinuous):\n R\"\"\"\n Pareto log-likelihood.\n\n Often used to characterize wealth distribution, or other examples of the\n 80/20 rule.\n\n .. math::\n\n f(x \\mid \\alpha, m) = \\frac{\\alpha m^{\\alpha}}{x^{\\alpha+1}}\n\n ======== =============================================================\n Support :math:`x \\in [m, \\infty)`\n Mean :math:`\\dfrac{\\alpha m}{\\alpha - 1}` for :math:`\\alpha \\ge 1`\n Variance :math:`\\dfrac{m \\alpha}{(\\alpha - 1)^2 (\\alpha - 2)}`\n for :math:`\\alpha > 2`\n ======== =============================================================\n\n Parameters\n ----------\n alpha : float\n Shape parameter (alpha > 0).\n m : float\n Scale parameter (m > 0).\n \"\"\"\n\n def __init__(self, alpha, m, *args, **kwargs):\n super(Pareto, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.m = m = tt.as_tensor_variable(m)\n\n self.mean = tt.switch(tt.gt(alpha, 1), alpha *\n m / (alpha - 1.), np.inf)\n self.median = m * 2.**(1. / alpha)\n self.variance = tt.switch(\n tt.gt(alpha, 2),\n (alpha * m**2) / ((alpha - 2.) * (alpha - 1.)**2),\n np.inf)\n\n assert_negative_support(alpha, 'alpha', 'Pareto')\n assert_negative_support(m, 'm', 'Pareto')\n\n\n def _random(self, alpha, m, size=None):\n u = np.random.uniform(size=size)\n return m * (1. - u)**(-1. / alpha)\n\n def random(self, point=None, size=None, repeat=None):\n alpha, m = draw_values([self.alpha, self.m],\n point=point)\n return generate_samples(self._random, alpha, m,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n m = self.m\n return bound(tt.log(alpha) + logpow(m, alpha)\n - logpow(value, alpha + 1),\n value >= m, alpha > 0, m > 0)\n\n\nclass Cauchy(Continuous):\n R\"\"\"\n Cauchy log-likelihood.\n\n Also known as the Lorentz or the Breit-Wigner distribution.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{1}{\\pi \\beta [1 + (\\frac{x-\\alpha}{\\beta})^2]}\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mode :math:`\\alpha`\n Mean undefined\n Variance undefined\n ======== ========================\n\n Parameters\n ----------\n alpha : float\n Location parameter\n beta : float\n Scale parameter > 0\n \"\"\"\n\n def __init__(self, alpha, beta, *args, **kwargs):\n super(Cauchy, self).__init__(*args, **kwargs)\n self.median = self.mode = self.alpha = tt.as_tensor_variable(alpha)\n self.beta = tt.as_tensor_variable(beta)\n\n assert_negative_support(beta, 'beta', 'Cauchy')\n\n def _random(self, alpha, beta, size=None):\n u = np.random.uniform(size=size)\n return alpha + beta * np.tan(np.pi * (u - 0.5))\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta = draw_values([self.alpha, self.beta],\n point=point)\n return generate_samples(self._random, alpha, beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(- tt.log(np.pi) - tt.log(beta)\n - tt.log1p(((value - alpha) / beta)**2),\n beta > 0)\n\n\nclass HalfCauchy(PositiveContinuous):\n R\"\"\"\n Half-Cauchy log-likelihood.\n\n .. math::\n\n f(x \\mid \\beta) = \\frac{2}{\\pi \\beta [1 + (\\frac{x}{\\beta})^2]}\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mode 0\n Mean undefined\n Variance undefined\n ======== ========================\n\n Parameters\n ----------\n beta : float\n Scale parameter (beta > 0).\n \"\"\"\n\n def __init__(self, beta, *args, **kwargs):\n super(HalfCauchy, self).__init__(*args, **kwargs)\n self.mode = tt.as_tensor_variable(0)\n self.median = tt.as_tensor_variable(beta)\n self.beta = tt.as_tensor_variable(beta)\n\n assert_negative_support(beta, 'beta', 'HalfCauchy')\n\n def _random(self, beta, size=None):\n u = np.random.uniform(size=size)\n return beta * np.abs(np.tan(np.pi * (u - 0.5)))\n\n def random(self, point=None, size=None, repeat=None):\n beta = draw_values([self.beta], point=point)\n return generate_samples(self._random, beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n beta = self.beta\n return bound(tt.log(2) - tt.log(np.pi) - tt.log(beta)\n - tt.log1p((value / beta)**2),\n value >= 0, beta > 0)\n\n\nclass Gamma(PositiveContinuous):\n R\"\"\"\n Gamma log-likelihood.\n\n Represents the sum of alpha exponentially distributed random variables,\n each of which has mean beta.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\beta^{\\alpha}x^{\\alpha-1}e^{-\\beta x}}{\\Gamma(\\alpha)}\n\n ======== ===============================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\dfrac{\\alpha}{\\beta}`\n Variance :math:`\\dfrac{\\alpha}{\\beta^2}`\n ======== ===============================\n\n Gamma distribution can be parameterized either in terms of alpha and\n beta or mean and standard deviation. The link between the two\n parametrizations is given by\n\n .. math::\n\n \\alpha &= \\frac{\\mu^2}{\\sigma^2} \\\\\n \\beta &= \\frac{\\mu}{\\sigma^2}\n\n Parameters\n ----------\n alpha : float\n Shape parameter (alpha > 0).\n beta : float\n Rate parameter (beta > 0).\n mu : float\n Alternative shape parameter (mu > 0).\n sd : float\n Alternative scale parameter (sd > 0).\n \"\"\"\n\n def __init__(self, alpha=None, beta=None, mu=None, sd=None,\n *args, **kwargs):\n super(Gamma, self).__init__(*args, **kwargs)\n alpha, beta = self.get_alpha_beta(alpha, beta, mu, sd)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n self.mean = alpha / beta\n self.mode = tt.maximum((alpha - 1) / beta, 0)\n self.variance = alpha / beta**2\n\n assert_negative_support(alpha, 'alpha', 'Gamma')\n assert_negative_support(beta, 'beta', 'Gamma')\n\n def get_alpha_beta(self, alpha=None, beta=None, mu=None, sd=None):\n if (alpha is not None) and (beta is not None):\n pass\n elif (mu is not None) and (sd is not None):\n alpha = mu**2 / sd**2\n beta = mu / sd**2\n else:\n raise ValueError('Incompatible parameterization. Either use '\n 'alpha and beta, or mu and sd to specify '\n 'distribution.')\n\n return alpha, beta\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta = draw_values([self.alpha, self.beta],\n point=point)\n return generate_samples(stats.gamma.rvs, alpha, scale=1. / beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(\n -gammaln(alpha) + logpow(\n beta, alpha) - beta * value + logpow(value, alpha - 1),\n value >= 0,\n alpha > 0,\n beta > 0)\n\n\nclass InverseGamma(PositiveContinuous):\n R\"\"\"\n Inverse gamma log-likelihood, the reciprocal of the gamma distribution.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\beta^{\\alpha}}{\\Gamma(\\alpha)} x^{-\\alpha - 1}\n \\exp\\left(\\frac{-\\beta}{x}\\right)\n\n ======== ======================================================\n Support :math:`x \\in (0, \\infty)`\n Mean :math:`\\dfrac{\\beta}{\\alpha-1}` for :math:`\\alpha > 1`\n Variance :math:`\\dfrac{\\beta^2}{(\\alpha-1)^2(\\alpha)}`\n for :math:`\\alpha > 2`\n ======== ======================================================\n\n Parameters\n ----------\n alpha : float\n Shape parameter (alpha > 0).\n beta : float\n Scale parameter (beta > 0).\n \"\"\"\n\n def __init__(self, alpha, beta=1, *args, **kwargs):\n super(InverseGamma, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n\n self.mean = self._calculate_mean()\n self.mode = beta / (alpha + 1.)\n self.variance = tt.switch(tt.gt(alpha, 2),\n (beta**2) / (alpha * (alpha - 1.)**2),\n np.inf)\n assert_negative_support(alpha, 'alpha', 'InverseGamma')\n assert_negative_support(beta, 'beta', 'InverseGamma')\n\n def _calculate_mean(self):\n m = self.beta / (self.alpha - 1.)\n try:\n return (self.alpha > 1) * m or np.inf\n except ValueError: # alpha is an array\n m[self.alpha <= 1] = np.inf\n return m\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta = draw_values([self.alpha, self.beta],\n point=point)\n return generate_samples(stats.invgamma.rvs, a=alpha, scale=beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(logpow(beta, alpha) - gammaln(alpha) - beta / value\n + logpow(value, -alpha - 1),\n value > 0, alpha > 0, beta > 0)\n\n\nclass ChiSquared(Gamma):\n R\"\"\"\n :math:`\\chi^2` log-likelihood.\n\n .. math::\n\n f(x \\mid \\nu) = \\frac{x^{(\\nu-2)/2}e^{-x/2}}{2^{\\nu/2}\\Gamma(\\nu/2)}\n\n ======== ===============================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\nu`\n Variance :math:`2 \\nu`\n ======== ===============================\n\n Parameters\n ----------\n nu : int\n Degrees of freedom (nu > 0).\n \"\"\"\n\n def __init__(self, nu, *args, **kwargs):\n self.nu = nu = tt.as_tensor_variable(nu)\n super(ChiSquared, self).__init__(alpha=nu / 2., beta=0.5,\n *args, **kwargs)\n\n\nclass Weibull(PositiveContinuous):\n R\"\"\"\n Weibull log-likelihood.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta) =\n \\frac{\\alpha x^{\\alpha - 1}\n \\exp(-(\\frac{x}{\\beta})^{\\alpha})}{\\beta^\\alpha}\n\n ======== ====================================================\n Support :math:`x \\in [0, \\infty)`\n Mean :math:`\\beta \\Gamma(1 + \\frac{1}{\\alpha})`\n Variance :math:`\\beta^2 \\Gamma(1 + \\frac{2}{\\alpha} - \\mu^2)`\n ======== ====================================================\n\n Parameters\n ----------\n alpha : float\n Shape parameter (alpha > 0).\n beta : float\n Scale parameter (beta > 0).\n \"\"\"\n\n def __init__(self, alpha, beta, *args, **kwargs):\n super(Weibull, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n self.mean = beta * tt.exp(gammaln(1 + 1. / alpha))\n self.median = beta * tt.exp(gammaln(tt.log(2)))**(1. / alpha)\n self.variance = (beta**2) * \\\n tt.exp(gammaln(1 + 2. / alpha - self.mean**2))\n\n assert_negative_support(alpha, 'alpha', 'Weibull')\n assert_negative_support(beta, 'beta', 'Weibull')\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta = draw_values([self.alpha, self.beta],\n point=point)\n\n def _random(a, b, size=None):\n return b * (-np.log(np.random.uniform(size=size)))**(1 / a)\n\n return generate_samples(_random, alpha, beta,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(tt.log(alpha) - tt.log(beta)\n + (alpha - 1) * tt.log(value / beta)\n - (value / beta)**alpha,\n value >= 0, alpha > 0, beta > 0)\n\n\ndef StudentTpos(*args, **kwargs):\n warnings.warn(\"StudentTpos has been deprecated. In future, use HalfStudentT instead.\",\n DeprecationWarning)\n return HalfStudentT(*args, **kwargs)\n\nHalfStudentT = Bound(StudentT, lower=0)\n\n\nclass ExGaussian(Continuous):\n R\"\"\"\n Exponentially modified Gaussian log-likelihood.\n\n Results from the convolution of a normal distribution with an exponential\n distribution.\n\n .. math::\n\n f(x \\mid \\mu, \\sigma, \\tau) =\n \\frac{1}{\\nu}\\;\n \\exp\\left\\{\\frac{\\mu-x}{\\nu}+\\frac{\\sigma^2}{2\\nu^2}\\right\\}\n \\Phi\\left(\\frac{x-\\mu}{\\sigma}-\\frac{\\sigma}{\\nu}\\right)\n\n where :math:`\\Phi` is the cumulative distribution function of the\n standard normal distribution.\n\n ======== ========================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu + \\nu`\n Variance :math:`\\sigma^2 + \\nu^2`\n ======== ========================\n\n Parameters\n ----------\n mu : float\n Mean of the normal distribution.\n sigma : float\n Standard deviation of the normal distribution (sigma > 0).\n nu : float\n Mean of the exponential distribution (nu > 0).\n\n References\n ----------\n .. [Rigby2005] Rigby R.A. and Stasinopoulos D.M. (2005).\n \"Generalized additive models for location, scale and shape\"\n Applied Statististics., 54, part 3, pp 507-554.\n\n .. [Lacouture2008] Lacouture, Y. and Couseanou, D. (2008).\n \"How to use MATLAB to fit the ex-Gaussian and other probability\n functions to a distribution of response times\".\n Tutorials in Quantitative Methods for Psychology,\n Vol. 4, No. 1, pp 35-45.\n \"\"\"\n\n def __init__(self, mu, sigma, nu, *args, **kwargs):\n super(ExGaussian, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.sigma = sigma = tt.as_tensor_variable(sigma)\n self.nu = nu = tt.as_tensor_variable(nu)\n self.mean = mu + nu\n self.variance = (sigma**2) + (nu**2)\n\n assert_negative_support(sigma, 'sigma', 'ExGaussian')\n assert_negative_support(nu, 'nu', 'ExGaussian')\n\n def random(self, point=None, size=None, repeat=None):\n mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu],\n point=point)\n\n def _random(mu, sigma, nu, size=None):\n return (np.random.normal(mu, sigma, size=size)\n + np.random.exponential(scale=nu, size=size))\n\n return generate_samples(_random, mu, sigma, nu,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n sigma = self.sigma\n nu = self.nu\n\n # This condition suggested by exGAUS.R from gamlss\n lp = tt.switch(tt.gt(nu, 0.05 * sigma),\n - tt.log(nu) + (mu - value) / nu + 0.5 * (sigma / nu)**2\n + logpow(std_cdf((value - mu) / sigma - sigma / nu), 1.),\n - tt.log(sigma * tt.sqrt(2 * np.pi))\n - 0.5 * ((value - mu) / sigma)**2)\n return bound(lp, sigma > 0., nu > 0.)\n\n\nclass VonMises(Continuous):\n R\"\"\"\n Univariate VonMises log-likelihood.\n\n .. math::\n f(x \\mid \\mu, \\kappa) =\n \\frac{e^{\\kappa\\cos(x-\\mu)}}{2\\pi I_0(\\kappa)}\n\n where :I_0 is the modified Bessel function of order 0.\n\n ======== ==========================================\n Support :math:`x \\in [-\\pi, \\pi]`\n Mean :math:`\\mu`\n Variance :math:`1-\\frac{I_1(\\kappa)}{I_0(\\kappa)}`\n ======== ==========================================\n\n Parameters\n ----------\n mu : float\n Mean.\n kappa : float\n Concentration (\\frac{1}{kappa} is analogous to \\sigma^2).\n \"\"\"\n\n def __init__(self, mu=0.0, kappa=None, transform='circular',\n *args, **kwargs):\n if transform == 'circular':\n transform = transforms.Circular()\n super(VonMises, self).__init__(transform=transform, *args, **kwargs)\n self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)\n self.kappa = kappa = tt.as_tensor_variable(kappa)\n self.variance = 1 - i1(kappa) / i0(kappa)\n\n assert_negative_support(kappa, 'kappa', 'VonMises')\n\n def random(self, point=None, size=None, repeat=None):\n mu, kappa = draw_values([self.mu, self.kappa],\n point=point)\n return generate_samples(stats.vonmises.rvs, loc=mu, kappa=kappa,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n kappa = self.kappa\n return bound(kappa * tt.cos(mu - value) - tt.log(2 * np.pi * i0(kappa)), value >= -np.pi, value <= np.pi, kappa >= 0)\n\n\nclass SkewNormal(Continuous):\n R\"\"\"\n Univariate skew-normal log-likelihood.\n\n .. math::\n f(x \\mid \\mu, \\tau, \\alpha) =\n 2 \\Phi((x-\\mu)\\sqrt{\\tau}\\alpha) \\phi(x,\\mu,\\tau)\n\n ======== ==========================================\n Support :math:`x \\in \\mathbb{R}`\n Mean :math:`\\mu + \\sigma \\sqrt{\\frac{2}{\\pi}} \\frac {\\alpha }{{\\sqrt {1+\\alpha ^{2}}}}`\n Variance :math:`\\sigma^2 \\left( 1-\\frac{2\\alpha^2}{(\\alpha^2+1) \\pi} \\right)`\n ======== ==========================================\n\n Skew-normal distribution can be parameterized either in terms of precision\n or standard deviation. The link between the two parametrizations is\n given by\n\n .. math::\n \\tau = \\dfrac{1}{\\sigma^2}\n\n Parameters\n ----------\n mu : float\n Location parameter.\n sd : float\n Scale parameter (sd > 0).\n tau : float\n Alternative scale parameter (tau > 0).\n alpha : float\n Skewness parameter.\n\n Notes\n -----\n When alpha=0 we recover the Normal distribution and mu becomes the mean,\n tau the precision and sd the standard deviation. In the limit of alpha\n approaching plus/minus infinite we get a half-normal distribution.\n\n \"\"\"\n def __init__(self, mu=0.0, sd=None, tau=None, alpha=1, *args, **kwargs):\n super(SkewNormal, self).__init__(*args, **kwargs)\n tau, sd = get_tau_sd(tau=tau, sd=sd)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.tau = tt.as_tensor_variable(tau)\n self.sd = tt.as_tensor_variable(sd)\n\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n\n self.mean = mu + self.sd * (2 / np.pi)**0.5 * alpha / (1 + alpha**2)**0.5\n self.variance = self.sd**2 * (1 - (2 * alpha**2) / ((1 + alpha**2) * np.pi))\n\n assert_negative_support(tau, 'tau', 'SkewNormal')\n assert_negative_support(sd, 'sd', 'SkewNormal')\n\n def random(self, point=None, size=None, repeat=None):\n mu, tau, _, alpha = draw_values(\n [self.mu, self.tau, self.sd, self.alpha], point=point)\n return generate_samples(stats.skewnorm.rvs,\n a=alpha, loc=mu, scale=tau**-0.5,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n tau = self.tau\n sd = self.sd\n mu = self.mu\n alpha = self.alpha\n return bound(\n tt.log(1 +\n tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2)))\n + (-tau * (value - mu)**2\n + tt.log(tau / np.pi / 2.)) / 2.,\n tau > 0, sd > 0)\n\n\nclass Triangular(Continuous):\n \"\"\"\n Continuous Triangular log-likelihood\n Implemented by J. A. Fonseca 22/12/16\n\n Parameters\n ----------\n lower : float\n Lower limit.\n c: float\n mode\n upper : float\n Upper limit.\n \"\"\"\n\n def __init__(self, lower=0, upper=1, c=0.5,\n *args, **kwargs):\n super(Triangular, self).__init__(*args, **kwargs)\n\n self.c = c\n self.lower = lower\n self.upper = upper\n self.mean = c\n self.median = self.mean\n\n def random(self, point=None, size=None):\n c, lower, upper = draw_values([self.c, self.lower, self.upper],\n point=point)\n return generate_samples(stats.triang.rvs, c=c-lower, loc=lower, scale=upper-lower,\n size=size, dist_shape=self.shape, random_state=None)\n\n def logp(self, value):\n c = self.c\n lower = self.lower\n upper = self.upper\n return tt.switch(alltrue_elemwise([lower <= value, value < c]),\n tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))),\n tt.switch(tt.eq(value, c), tt.log(2 / (upper - lower)),\n tt.switch(alltrue_elemwise([c < value, value <= upper]),\n tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))),np.inf)))\n" ]
[ [ "numpy.random.normal", "numpy.random.exponential", "numpy.tan", "numpy.exp", "numpy.any", "numpy.random.uniform", "numpy.sqrt", "numpy.floor" ] ]
richardliaw/airflow-provider-ray-1
[ "4124afd884367a5ee611e74d828892af48bee922" ]
[ "ray_provider/example_dags/xgboost_pandas_tune_breast_cancer.py" ]
[ "import os\nimport json\nfrom airflow.decorators import dag, task\nfrom airflow.utils.dates import days_ago\nfrom airflow.operators.dummy_operator import DummyOperator\nimport ray\nfrom ray_provider.decorators.ray_decorators import ray_task\nimport numpy as np\nimport xgboost_ray as xgbr\nimport xgboost as xgb\nfrom ray import tune\nfrom ray.tune.schedulers import ASHAScheduler\nfrom ray_provider.xcom.ray_backend import RayBackend\nfrom xgboost_ray.tune import TuneReportCheckpointCallback\nfrom datetime import datetime\n\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\ndefault_args = {\n \"owner\": \"airflow\",\n \"on_success_callback\": RayBackend.on_success_callback,\n \"on_failure_callback\": RayBackend.on_failure_callback,\n}\n\ntask_args = {\"ray_conn_id\": \"ray_cluster_connection\"}\n\n# Change to True to load simple sklearn dataset\nSIMPLE = False\n\n# Change actors and cpus per actor here as per resources allow\nXGB_RAY_PARAMS = xgbr.RayParams(max_actor_restarts=1, num_actors=1, cpus_per_actor=1)\n\nROOT_DIR = \".\"\nLOCAL_DIR = f\"{ROOT_DIR}/ray_results\"\n\n\n@dag(\n default_args=default_args,\n schedule_interval=None,\n start_date=datetime(2021, 1, 1, 0, 0, 0),\n tags=[\"xgboost-pandas-tune\"],\n)\ndef xgboost_pandas_tune_breast_cancer():\n @ray_task(**task_args)\n def load_dataframe() -> \"ray.ObjectRef\":\n \"\"\"\n build dataframe from breast cancer dataset\n \"\"\"\n print(\"Loading CSV\")\n\n if SIMPLE:\n print(\"Loading simple from sklearn.datasets\")\n from sklearn import datasets\n\n data = datasets.load_breast_cancer(return_X_y=True)\n else:\n import pandas as pd\n\n url = (\n \"https://archive.ics.uci.edu/ml/machine-learning-databases/\"\n \"00280/HIGGS.csv.gz\"\n )\n\n colnames = [\"label\"] + [\"feature-%02d\" % i for i in range(1, 29)]\n data = pd.read_csv(url, compression=\"gzip\", names=colnames)\n print(\"loaded higgs\")\n print(\"Loaded CSV.\")\n\n return data\n\n @ray_task(**task_args)\n def split_train_test(data):\n print(\"Splitting Data to Train and Test Sets\")\n print(f\"Creating data matrix: {data, SIMPLE}\")\n\n if SIMPLE:\n from sklearn.model_selection import train_test_split\n\n print(\"Splitting data\")\n data, labels = data\n train_x, test_x, train_y, test_y = train_test_split(\n data, labels, test_size=0.25\n )\n\n train_set = xgbr.RayDMatrix(train_x, train_y)\n test_set = xgbr.RayDMatrix(test_x, test_y)\n else:\n df_train = data[(data[\"feature-01\"] < 0.4)]\n colnames = [\"label\"] + [\"feature-%02d\" % i for i in range(1, 29)]\n train_set = xgbr.RayDMatrix(df_train, label=\"label\", columns=colnames)\n df_validation = data[\n (data[\"feature-01\"] >= 0.4) & (data[\"feature-01\"] < 0.8)\n ]\n test_set = xgbr.RayDMatrix(df_validation, label=\"label\")\n\n print(\"finished data matrix\")\n\n return train_set, test_set\n\n # This could be in a library of trainables\n def train_model(config, checkpoint_dir=None, data_dir=None, data=()):\n dtrain, dvalidation = data\n evallist = [(dvalidation, \"eval\")]\n # evals_result = {}\n config = {\n \"tree_method\": \"hist\",\n \"eval_metric\": [\"logloss\", \"error\"],\n }\n print(\"Start training with TuneReportCheckpointCallback\")\n bst = xgbr.train(\n params=config,\n dtrain=dtrain,\n ray_params=XGB_RAY_PARAMS,\n num_boost_round=100,\n evals=evallist,\n callbacks=[TuneReportCheckpointCallback(filename=f\"model.xgb\")],\n )\n\n @ray_task(**task_args)\n def tune_model(data):\n\n search_space = {\n # You can mix constants with search space objects.\n \"objective\": \"binary:logistic\",\n \"eval_metric\": [\"logloss\", \"error\"],\n \"max_depth\": tune.randint(1, 9),\n \"min_child_weight\": tune.choice([1, 2, 3]),\n \"subsample\": tune.uniform(0.5, 1.0),\n \"eta\": tune.loguniform(1e-4, 1e-1),\n }\n\n print(\"enabling aggressive early stopping of bad trials\")\n # This will enable aggressive early stopping of bad trials.\n scheduler = ASHAScheduler(\n max_t=4, grace_period=1, reduction_factor=2 # 4 training iterations\n )\n\n print(\"Tuning\")\n\n analysis = tune.run(\n tune.with_parameters(train_model, data=data),\n metric=\"eval-logloss\",\n mode=\"min\",\n local_dir=LOCAL_DIR,\n # You can add \"gpu\": 0.1 to allocate GPUs\n resources_per_trial=XGB_RAY_PARAMS.get_tune_resources(),\n config=search_space,\n num_samples=4,\n scheduler=scheduler,\n )\n\n print(\"Done Tuning\")\n\n return analysis\n\n @ray_task(**task_args)\n def load_best_model_checkpoint(analysis):\n print(\"Checking Analysis\")\n\n best_bst = xgb.Booster()\n\n print(\n f\"Analysis Best Result on eval-error is: {analysis.best_result['eval-error']}\"\n )\n print(\"Loading Model with Best Params\")\n\n best_bst.load_model(os.path.join(analysis.best_checkpoint, \"model.xgb\"))\n accuracy = 1.0 - analysis.best_result[\"eval-error\"]\n\n print(f\"Best model parameters: {analysis.best_config}\")\n print(f\"Best model total accuracy: {accuracy:.4f}\")\n\n # We could now do further predictions with\n # best_bst.predict(...)\n return best_bst\n\n build_raw_df = load_dataframe()\n data = split_train_test(build_raw_df)\n analysis = tune_model(data)\n best_checkpoint = load_best_model_checkpoint(analysis)\n\n kickoff_dag = DummyOperator(task_id=\"kickoff_dag\")\n complete_dag = DummyOperator(task_id=\"complete_dag\")\n\n kickoff_dag >> build_raw_df\n best_checkpoint >> complete_dag\n\n\nxgboost_pandas_tune_breast_cancer = xgboost_pandas_tune_breast_cancer()\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.read_csv", "sklearn.datasets.load_breast_cancer" ] ]
Asurada2015/pymoo
[ "023a787d0b78813e789f170a3e94b2de85605aff" ]
[ "pymoo/interface.py" ]
[ "\"\"\"\nThis class provide an interface for other libraries to specific modules. For example, the evolutionary operations\ncan be used easily just by calling a function and providing the lower and upper bounds of the problem.\n\n\"\"\"\nimport copy\nimport types\n\nimport numpy as np\n\nfrom pymoo.model.algorithm import filter_optimum\nfrom pymoo.model.individual import Individual\nfrom pymoo.model.population import Population\nfrom pymoo.model.problem import Problem\n\n\n# =========================================================================================================\n# A global interface for some features\n# =========================================================================================================\n\n\ndef get_problem_func(n_var, xl, xu, type_var):\n class P(Problem):\n def __init__(self) -> None:\n super().__init__(n_var=n_var, n_obj=1, n_constr=0, xl=xl, xu=xu, type_var=type_var)\n\n return P\n\n\ndef sample(sampling, n_samples, n_var, xl=0, xu=1, **kwargs):\n problem = get_problem_func(n_var, xl, xu, None)(**kwargs)\n return sampling.do(problem, n_samples, pop=None, **kwargs)\n\n\ndef crossover(crossover, a, b, c=None, xl=0, xu=1, type_var=np.double, **kwargs):\n n = a.shape[0]\n _pop = Population().new(\"X\", a).merge(Population().new(\"X\", b))\n _P = np.column_stack([np.arange(n), np.arange(n) + n])\n\n if c is not None:\n _pop = _pop.merge(Population().new(\"X\", c))\n _P = np.column_stack([_P, np.arange(n) + 2 * n])\n\n problem = get_problem_func(a.shape[1], xl, xu, type_var)(**kwargs)\n return crossover.do(problem, _pop, _P, **kwargs).get(\"X\")\n\n\ndef mutation(mutation, X, xl=0, xu=1, type_var=np.double, **kwargs):\n problem = get_problem_func(X.shape[1], xl, xu, type_var)(**kwargs)\n return mutation.do(problem, Population().new(\"X\", X), **kwargs).get(\"X\")\n\n\n# =========================================================================================================\n# Ask And Tell Interface\n# =========================================================================================================\n\n\ndef evaluate_to_nan(self, x, out, *args, **kwargs):\n n_points, _ = x.shape\n out[\"F\"] = None\n if self.n_constr > 0:\n out[\"G\"] = None\n\n\ndef evaluate_to_value(F, G=None):\n def eval(self, x, out, *args, **kwargs):\n n_points, _ = x.shape\n out[\"F\"] = F\n if G is not None:\n out[\"G\"] = G\n\n return eval\n\n\nclass AskAndTell:\n\n def __init__(self, algorithm, problem=None, **kwargs):\n\n if problem is not None:\n self.problem = copy.deepcopy(problem)\n else:\n self.problem = Problem(**kwargs)\n\n self.algorithm = copy.deepcopy(algorithm)\n\n def get_population(self):\n return self.algorithm.pop\n\n def set_population(self, pop):\n self.algorithm.pop = pop\n\n def get_offsprings(self):\n return self.algorithm.off\n\n def set_offsprings(self, off):\n self.algorithm.off = off\n\n def ask(self):\n\n # if the initial population has not been generated yet\n if self.get_population() is None:\n\n self.algorithm.initialize(self.problem)\n\n # deactivate the survival because no values have been set yet\n survival = self.algorithm.survival\n self.algorithm.survival = None\n\n self.problem._evaluate = types.MethodType(evaluate_to_nan, self.problem)\n self.algorithm._initialize()\n\n # activate the survival for the further runs\n self.algorithm.survival = survival\n\n return self.get_population().get(\"X\")\n\n # usually the case - create the next output\n else:\n\n # if offsprings do not exist set the pop - otherwise always offsprings\n if self.get_offsprings() is not None:\n self.set_population(self.get_population().merge(self.get_offsprings()))\n\n # execute a survival of the algorithm\n survivors = self.algorithm.survival.do(self.problem, self.get_population(),\n self.algorithm.pop_size, algorithm=self.algorithm)\n self.set_population(survivors)\n\n # execute the mating using the population\n off = self.algorithm.mating.do(self.algorithm.problem, self.get_population(),\n n_offsprings=self.algorithm.n_offsprings, algorithm=self.algorithm)\n\n # execute the fake evaluation of the individuals\n self.problem._evaluate = types.MethodType(evaluate_to_nan, self.problem)\n self.algorithm.evaluator.eval(self.problem, off, algorithm=self.algorithm)\n self.set_offsprings(off)\n\n return off.get(\"X\")\n\n def tell(self, F, G=None, X=None):\n\n # if offsprings do not exist set the pop - otherwise always offsprings\n pop_to_evaluate = self.get_offsprings() if self.get_offsprings() is not None else self.get_population()\n\n # if the user changed the design space values for whatever reason\n if X is not None:\n pop_to_evaluate.set(\"X\")\n\n # do the function evaluations\n self.problem._evaluate = types.MethodType(evaluate_to_value(F.copy(), G.copy()), self.problem)\n self.algorithm.evaluator.eval(self.problem, pop_to_evaluate, algorithm=self.algorithm)\n\n def result(self, only_optimum=True, return_values_of=\"auto\"):\n\n if return_values_of == \"auto\":\n return_values_of = [\"X\", \"F\"]\n if self.problem.n_constr > 0:\n return_values_of.append(\"CV\")\n\n if only_optimum:\n self.algorithm.finalize()\n pop, opt = self.algorithm.pop, self.algorithm.opt\n res = filter_optimum(pop.copy()) if opt is None else opt.copy()\n\n if isinstance(res, Individual):\n res = Population.create(res)\n\n else:\n res = self.algorithm.pop\n\n return res.get(*return_values_of)\n" ]
[ [ "numpy.arange" ] ]
tamirmal/tau_cv_proj
[ "dee83809e17c2e18c098ba1e9920e9fa785870d2" ]
[ "predict_svm_bbox.py" ]
[ "from sklearn.externals import joblib\nfrom optparse import OptionParser\nimport os\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nimport numpy as np\nfrom VGG_feature_extract import vgg_features_extract\n\n\ndef predict_classes(arr_images_list, clf, vgg, visualize=False):\n X = []\n for img in arr_images_list:\n features = vgg_features_extract.vgg_extract_features_img_array(img, vgg)\n X.append(features)\n\n X = np.array(X)\n X = np.reshape(X, (len(X), -1))\n Y = clf.predict(X)\n\n if visualize:\n bus_color_class_color = {\n '1': 'green',\n '2': 'yellow-mustard',\n '3': 'white',\n '4': 'silver-grey',\n '5': 'blue',\n '6': 'red',\n }\n import cv2\n from matplotlib import pyplot\n for idx, img in enumerate(arr_images_list):\n pyplot.figure()\n img = img[:,:,::-1]\n\n img = image.array_to_img(img)\n pyplot.imshow(img)\n print(\"===============================================\")\n print(\"class : {}={}\".format(Y[idx], bus_color_class_color[str(Y[idx])]))\n pyplot.show()\n print(\"===============================================\")\n\n return Y\n\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--test_data\", dest=\"dataset_dir\", help=\"Path to test data.\")\n parser.add_option(\"-c\", \"--clsf\", dest=\"clsf\", help=\"Path to stored classifier\")\n (options, args) = parser.parse_args()\n\n test_path = options.dataset_dir\n\n print(\"Use test data : {}\".format(test_path))\n print(\"Use classifier : {}\".format(options.clsf))\n\n if not os.path.isfile(options.clsf):\n print(\"Cant locate {}\".format(options.clsf))\n assert 0\n\n clf = joblib.load(options.clsf)\n model = VGG16(weights='imagenet', include_top=False)\n\n X = []\n IMG_LIST = []\n for img in os.listdir(test_path):\n IMG_LIST.append(img)\n IMG_PATH = test_path + '/' + img\n features = vgg_features_extract.vgg_extract_features(IMG_PATH, model)\n X.append(features)\n\n #import ipdb; ipdb.set_trace()\n X = np.array(X)\n X = np.reshape(X, (len(X), -1))\n Y = clf.predict(X)\n\n if True:\n for idx, img in enumerate(IMG_LIST):\n from matplotlib import pyplot\n print(\"image {} predicted class {}\".format(img, Y[idx]))\n IMG_PATH = test_path + '/' + img\n x = image.load_img(IMG_PATH, target_size=(224, 224))\n pyplot.figure() # figure starts from 1 ...\n pyplot.imshow(image.array_to_img(x))\n pyplot.show()\n\n print(\"DONE\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.figure", "sklearn.externals.joblib.load", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
bbrito/BC-regularized-GAIL
[ "cdbe5ee662f7d3d068f3291a45e1a23ae7736ca5" ]
[ "a2c_ppo_acktr/algo/gail.py" ]
[ "import h5py\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nfrom torch import autograd\nimport gym\nfrom baselines.common.running_mean_std import RunningMeanStd\n\n\nclass RED(nn.Module):\n def __init__(self, input_dim, hidden_dim, device, sigma, iters):\n super().__init__()\n self.device = device\n self.sigma = sigma\n self.iters = iters\n\n # This is a random initialization, used to learn\n self.dummytrunk = nn.Sequential(\n nn.Linear(input_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, 1)\n ).to(device)\n\n self.trunk = nn.Sequential(\n nn.Linear(input_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, 1)\n ).to(device)\n\n self.trunk.train()\n self.optimizer = torch.optim.Adam(self.trunk.parameters())\n\n def train_red(self, expert_loader):\n # Train the loader\n self.train()\n for _ in range(self.iters):\n for expert_batch in expert_loader:\n # Get expert state and actions\n expert_state, expert_action = expert_batch\n expert_state = torch.FloatTensor(expert_state).to(self.device)\n expert_action = expert_action.to(self.device)\n\n # Given expert state and action\n expert_sa = torch.cat([expert_state, expert_action], dim=1)\n fsa = self.trunk(expert_sa)\n with torch.no_grad():\n fsa_random = self.dummytrunk(expert_sa)\n\n loss = ((fsa - fsa_random)**2).mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n print(\"RED loss: {}\".format(loss.data.cpu().numpy()))\n\n def predict_reward(self, state, action, obfilt=None):\n with torch.no_grad():\n self.eval()\n if obfilt is not None:\n s = obfilt(state.cpu().numpy())\n s = torch.FloatTensor(s).to(action.device)\n else:\n s = state\n d = torch.cat([s, action], dim=1)\n fsa = self.trunk(d)\n fsa_random = self.dummytrunk(d)\n rew = torch.exp(-self.sigma * ((fsa - fsa_random)**2).mean(1))[:, None]\n return rew\n\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_dim, hidden_dim, device, red=None, sail=False, learn=True):\n super(Discriminator, self).__init__()\n\n self.device = device\n\n self.red = red\n self.sail = sail\n self.redtrained = False\n if self.sail:\n assert self.red is not None, 'Cannot run SAIL without using RED'\n\n self.trunk = nn.Sequential(\n nn.Linear(input_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, 1)).to(device)\n\n self.trunk.train()\n\n self.learn = learn\n self.optimizer = torch.optim.Adam(self.trunk.parameters())\n\n self.returns = None\n self.ret_rms = RunningMeanStd(shape=())\n\n def compute_grad_pen(self,\n expert_state,\n expert_action,\n policy_state,\n policy_action,\n lambda_=10):\n alpha = torch.rand(expert_state.size(0), 1)\n expert_data = torch.cat([expert_state, expert_action], dim=1)\n policy_data = torch.cat([policy_state, policy_action], dim=1)\n\n alpha = alpha.expand_as(expert_data).to(expert_data.device)\n\n mixup_data = alpha * expert_data + (1 - alpha) * policy_data\n mixup_data.requires_grad = True\n\n disc = self.trunk(mixup_data)\n ones = torch.ones(disc.size()).to(disc.device)\n grad = autograd.grad(\n outputs=disc,\n inputs=mixup_data,\n grad_outputs=ones,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n\n grad_pen = lambda_ * (grad.norm(2, dim=1) - 1).pow(2).mean()\n return grad_pen\n\n def update(self, expert_loader, rollouts, obsfilt=None):\n self.train()\n if obsfilt is None:\n obsfilt = lambda x,y : x\n\n # If RED is untrained, then train it\n if self.red is not None and not self.redtrained:\n print(\"Training RED...\")\n self.red.train_red(expert_loader) # obsfilt keeps changing after that, Pass the obsfilt to reverse normalized states\n self.redtrained = True\n print(\"Trained RED.\")\n\n # If there is no SAIL but RED is present,\n # then GAIL doesn't need to be updated\n if self.red is not None and not self.sail:\n return 0\n\n policy_data_generator = rollouts.feed_forward_generator(\n None, mini_batch_size=expert_loader.batch_size)\n\n loss = 0\n n = 0\n for expert_batch, policy_batch in zip(expert_loader,\n policy_data_generator):\n policy_state, policy_action = policy_batch[0], policy_batch[2]\n policy_d = self.trunk(\n torch.cat([policy_state, policy_action], dim=1))\n\n expert_state, expert_action = expert_batch\n expert_state = obsfilt(expert_state.numpy(), update=False)\n expert_state = torch.FloatTensor(expert_state).to(self.device)\n expert_action = expert_action.to(self.device)\n expert_d = self.trunk(\n torch.cat([expert_state, expert_action], dim=1))\n\n expert_loss = F.binary_cross_entropy_with_logits(\n expert_d,\n torch.ones(expert_d.size()).to(self.device))\n policy_loss = F.binary_cross_entropy_with_logits(\n policy_d,\n torch.zeros(policy_d.size()).to(self.device))\n\n gail_loss = expert_loss + policy_loss\n grad_pen = self.compute_grad_pen(expert_state, expert_action,\n policy_state, policy_action)\n\n loss += (gail_loss + grad_pen).item()\n n += 1\n\n if self.learn:\n self.optimizer.zero_grad()\n (gail_loss + grad_pen).backward()\n self.optimizer.step()\n return loss / n\n\n def predict_reward(self, state, action, gamma, masks, update_rms=True, obsfilt=None):\n with torch.no_grad():\n self.eval()\n d = self.trunk(torch.cat([state, action], dim=1))\n s = torch.sigmoid(d)\n # Get RED reward\n if self.red is not None:\n assert self.redtrained\n red_rew = self.red.predict_reward(state, action, obsfilt)\n\n # Check if SAIL is present or not\n if self.sail:\n reward = s * red_rew\n else:\n reward = red_rew\n else:\n # If traditional GAIL\n #reward = s.log() - (1 - s).log()\n reward = - (1 - s).log()\n\n if self.returns is None:\n self.returns = reward.clone()\n\n if update_rms:\n self.returns = self.returns * masks * gamma + reward\n self.ret_rms.update(self.returns.cpu().numpy())\n\n return reward / np.sqrt(self.ret_rms.var[0] + 1e-8)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass CNNDiscriminator(nn.Module):\n def __init__(self, input_shape, action_space, hidden_dim, device, clip=0.01):\n super(CNNDiscriminator, self).__init__()\n self.device = device\n C, H, W = input_shape\n self.n = 0\n if type(action_space) == gym.spaces.box.Box:\n A = action_space.shape[0]\n else:\n A = action_space.n\n self.n = A\n\n self.main = nn.Sequential(\n nn.Conv2d(C, 32, 4, stride=2), nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(),\n nn.Conv2d(64, 128, 4, stride=2), nn.ReLU(),\n nn.Conv2d(128, 256, 4, stride=2), nn.ReLU(), Flatten(),\n ).to(device)\n self.clip = clip\n print(\"Using clip {}\".format(self.clip))\n\n for i in range(4):\n H = (H - 4)//2 + 1\n W = (W - 4)//2 + 1\n # Get image dim\n img_dim = 256*H*W\n\n self.trunk = nn.Sequential(\n nn.Linear(A + img_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, hidden_dim), nn.Tanh(),\n nn.Linear(hidden_dim, 1)).to(device)\n\n self.main.train()\n self.trunk.train()\n\n self.optimizer = torch.optim.Adam(list(self.main.parameters()) + list(self.trunk.parameters()))\n self.returns = None\n self.ret_rms = RunningMeanStd(shape=())\n\n def compute_grad_pen(self,\n expert_state,\n expert_action,\n policy_state,\n policy_action,\n lambda_=10):\n grad_pen = 0\n if True:\n alpha = torch.rand(expert_state.size(0), 1)\n\n # Change state values\n exp_state = self.main(expert_state)\n pol_state = self.main(policy_state)\n\n expert_data = torch.cat([exp_state, expert_action], dim=1)\n policy_data = torch.cat([pol_state, policy_action], dim=1)\n\n alpha = alpha.expand_as(expert_data).to(expert_data.device)\n\n mixup_data = alpha * expert_data + (1 - alpha) * policy_data\n\n disc = self.trunk(mixup_data)\n ones = torch.ones(disc.size()).to(disc.device)\n grad = autograd.grad(\n outputs=disc,\n inputs=mixup_data,\n grad_outputs=ones,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n\n grad_pen = lambda_ * (grad.norm(2, dim=1) - 1).pow(2).mean()\n return grad_pen\n\n def update(self, expert_loader, rollouts, obsfilt=None):\n self.train()\n assert obsfilt is None\n\n policy_data_generator = rollouts.feed_forward_generator(\n None, mini_batch_size=expert_loader.batch_size)\n\n loss = 0\n n = 0\n for expert_batch, policy_batch in zip(expert_loader,\n policy_data_generator):\n policy_state, policy_action = policy_batch[0], policy_batch[2]\n\n if self.n > 0:\n act = torch.zeros(policy_action.shape[0], self.n)\n polact = policy_action.squeeze()\n act[np.arange(polact.shape[0]), polact] = 1\n policy_action = torch.FloatTensor(act).to(policy_action.device)\n #print('policy', policy_action.shape)\n\n pol_state = self.main(policy_state)\n policy_d = self.trunk(\n torch.cat([pol_state, policy_action], dim=1))\n\n expert_state, expert_action = expert_batch\n #print('expert', expert_action.shape)\n expert_state = torch.FloatTensor(expert_state).to(self.device)\n expert_action = expert_action.to(self.device)\n exp_state = self.main(expert_state)\n expert_d = self.trunk(\n torch.cat([exp_state, expert_action], dim=1))\n\n expert_loss = F.binary_cross_entropy_with_logits(\n expert_d,\n torch.ones(expert_d.size()).to(self.device))\n policy_loss = F.binary_cross_entropy_with_logits(\n policy_d,\n torch.zeros(policy_d.size()).to(self.device))\n #expert_loss = -expert_d.mean().to(self.device)\n #policy_loss = policy_d.mean().to(self.device)\n\n gail_loss = expert_loss + policy_loss\n grad_pen = self.compute_grad_pen(expert_state, expert_action,\n policy_state, policy_action)\n\n loss += (gail_loss + grad_pen).item()\n n += 1\n\n self.optimizer.zero_grad()\n (gail_loss + grad_pen).backward()\n self.optimizer.step()\n\n # Clip params here\n #for p in self.parameters():\n #p = p.clamp(-self.clip, self.clip)\n\n return loss / n\n\n def predict_reward(self, state, action, gamma, masks, update_rms=True):\n with torch.no_grad():\n self.eval()\n if self.n > 0:\n acts = torch.zeros((action.shape[0], self.n))\n acts[np.arange(action.shape[0]), action.squeeze()] = 1\n acts = torch.FloatTensor(acts).to(action.device)\n else:\n acts = action\n\n stat = self.main(state)\n d = self.trunk(torch.cat([stat, acts], dim=1))\n s = torch.sigmoid(d)\n reward = -(1 - s).log()\n #reward = d / self.clip\n if self.returns is None:\n self.returns = reward.clone()\n\n if update_rms:\n self.returns = self.returns * masks * gamma + reward\n self.ret_rms.update(self.returns.cpu().numpy())\n\n return reward / np.sqrt(self.ret_rms.var[0] + 1e-8)\n\n\nclass ExpertImageDataset(torch.utils.data.Dataset):\n def __init__(self, file_name, train=None, act=None):\n trajs = torch.load(file_name)\n self.observations = trajs['obs']\n self.actions = trajs['actions']\n self.train = train\n self.act = None\n if isinstance(act, gym.spaces.Discrete):\n self.act = act.n\n\n self.actual_obs = [None for _ in range(len(self.actions))]\n self.lenn = 0\n if train is not None:\n lenn = int(0.8*len(self.actions))\n self.lenn = lenn\n if train:\n self.actions = self.actions[:lenn]\n else:\n self.actions = self.actions[lenn:]\n\n def __len__(self, ):\n return len(self.actions)\n\n def __getitem__(self, idx):\n action = self.actions[idx]\n if self.act:\n act = np.zeros((self.act, ))\n act[action[0]] = 1\n action = act\n # Load only the first time, images in uint8 are supposed to be light\n if self.actual_obs[idx] is None:\n if self.train == False:\n image = np.load(self.observations[idx + self.lenn] + '.npy')\n else:\n image = np.load(self.observations[idx] + '.npy')\n self.actual_obs[idx] = image\n else:\n image = self.actual_obs[idx]\n # rescale image and pass it\n img = image / 255.0\n img = img.transpose(2, 0, 1)\n # [C, H, W ] image and [A] actions\n return torch.FloatTensor(img), torch.FloatTensor(action)\n\n\nclass ExpertDataset(torch.utils.data.Dataset):\n def __init__(self, file_name, num_trajectories=4, subsample_frequency=20, train=True, start=0):\n #file_name = \"/home/bdebrito/code/BC-regularized-GAIL/gail_experts/trajs_halfcheetah.pt\"\n all_trajectories = torch.load(file_name)\n\n perm = torch.randperm(all_trajectories['states'].size(0))\n #idx = perm[:num_trajectories]\n idx = np.arange(num_trajectories) + start\n if not train:\n assert start > 0\n\n self.trajectories = {}\n\n # See https://github.com/pytorch/pytorch/issues/14886\n # .long() for fixing bug in torch v0.4.1\n start_idx = torch.randint(\n 0, subsample_frequency, size=(num_trajectories, )).long()\n\n for k, v in all_trajectories.items():\n data = v[idx]\n\n if k != 'lengths':\n samples = []\n for i in range(num_trajectories):\n samples.append(data[i, start_idx[i]::subsample_frequency])\n self.trajectories[k] = torch.stack(samples)\n else:\n self.trajectories[k] = data // subsample_frequency\n\n self.i2traj_idx = {}\n self.i2i = {}\n\n self.length = self.trajectories['lengths'].sum().item()\n\n traj_idx = 0\n i = 0\n\n self.get_idx = []\n\n for j in range(self.length):\n\n while self.trajectories['lengths'][traj_idx].item() <= i:\n i -= self.trajectories['lengths'][traj_idx].item()\n traj_idx += 1\n\n self.get_idx.append((traj_idx, i))\n\n i += 1\n\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n traj_idx, i = self.get_idx[i]\n\n return self.trajectories['states'][traj_idx][i], self.trajectories[\n 'actions'][traj_idx][i]\n" ]
[ [ "torch.zeros", "torch.sigmoid", "torch.cat", "torch.nn.Linear", "torch.stack", "numpy.zeros", "torch.nn.Tanh", "torch.no_grad", "torch.FloatTensor", "numpy.load", "torch.nn.ReLU", "torch.autograd.grad", "torch.randint", "torch.nn.Conv2d", "numpy.arange", "torch.load", "numpy.sqrt" ] ]
amg1998/BUSeniorDesign-Opticle-21-22
[ "f550cd99852459d15ef60e4b2c4bc9bccd2d748b" ]
[ "examples/test/spatial_tiny_yolo.py" ]
[ "from pathlib import Path\nimport sys\nimport cv2\nimport depthai as dai\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport open3d as o3d\n\nfrom subprocess import Popen\nfrom depthai_setup import DepthAi\nfrom projector_3d import PointCloudVisualizer\nfrom collections import deque\nimport speech_recognition as sr\n\nfrom gtts import *\nfrom playsound import playsound\nimport os\n\n\nrpi = 0\nstart=datetime.now()\n\ncmd_start='gtts-cli '\ncmd_mid=' --output '\ncmd_end='message.mp3'\nscan_end ='scan.mp3'\nopensound = \"\"\nmodeswitchpin=1\nmode = 1\n\nif (rpi==1):\n # setup socket\n import socket\n import RPi.GPIO as GPIO\n\n HOST = '155.41.122.253'\n PORT = 2000\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST,PORT))\n\n #setup PI\n GPIO.setmode(GPIO.BOARD)\n modeswitchpin=3\n GPIO.setup(modeswitchpin, GPIO.IN)\n \n\n\nclass Main:\n depthai_class = DepthAi\n\n def __init__(self):\n self.nnBlobPath = str((Path(__file__).parent / Path('../models/2classes_model.blob')).resolve().absolute())\n self.depthai = self.depthai_class(self.nnBlobPath)\n self.labelMap = [\"\", \"door\", \"handle\"]\n # [\n # \"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\", \"bus\", \"train\",\n # \"truck\", \"boat\", \"traffic light\", \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\",\n # \"bird\", \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\",\n # \"bear\", \"zebra\", \"giraffe\", \"backpack\", \"umbrella\", \"handbag\", \"tie\",\n # \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\",\n # \"baseball glove\", \"skateboard\", \"surfboard\", \"tennis racket\", \"bottle\", \"wine glass\", \"cup\",\n # \"fork\", \"knife\", \"spoon\", \"bowl\", \"banana\", \"apple\", \"sandwich\",\n # \"orange\", \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\",\n # \"chair\", \"sofa\", \"pottedplant\", \"bed\", \"diningtable\", \"toilet\", \"tvmonitor\",\n # \"laptop\", \"mouse\", \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\",\n # \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\", \"vase\", \"scissors\",\n # \"teddy bear\", \"hair drier\", \"toothbrush\"\n # ]\n self.isstarted = False\n # self.pcl_converter = None\n self.target = \"handle\"\n self.confq = deque(maxlen=30)\n self.lastsaid = [0,0,0]\n self.epsDist = 1\n\n\n def run_yolo_pc(self):\n color = (255, 255, 255)\n speed=' -s' + '160'\n pcl_converter = None\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n for frame, depthFrameColor, fps, depthFrame, pcFrame in self.depthai.yolo_det():\n for roiData in self.depthai.roiDatas:\n roi = roiData.roi\n roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])\n topLeft = roi.topLeft()\n bottomRight = roi.bottomRight()\n xmin = int(topLeft.x)\n ymin = int(topLeft.y)\n xmax = int(bottomRight.x)\n ymax = int(bottomRight.y)\n\n cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)\n\n\n # If the frame is available, draw bounding boxes on it and show the frame\n height = frame.shape[0]\n width = frame.shape[1]\n maxconf = 0\n maxconfdepth = 0\n maxconfx = 0\n medvals = [0,0,0]\n label=\"\"\n for detection in self.depthai.detections:\n \n x1 = int(detection.xmin * width)\n x2 = int(detection.xmax * width)\n y1 = int(detection.ymin * height)\n y2 = int(detection.ymax * height)\n try:\n label = self.labelMap[detection.label]\n\n #check if a handle is detected\n if (label==self.target):\n\n #save highest confidence value and corresponding depth\n if detection.confidence>maxconf:\n maxconf = detection.confidence\n maxconfdepth = detection.spatialCoordinates.z\n maxconfx = detection.spatialCoordinates.x\n\n tempq = list(self.confq)\n medvals = np.median(tempq, axis=0)\n \n except:\n label = detection.label\n cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.putText(frame, \"{:.2f}\".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.putText(frame, f\"X: {int(detection.spatialCoordinates.x)} mm\", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.putText(frame, f\"Y: {int(detection.spatialCoordinates.y)} mm\", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.putText(frame, f\"Z: {int(detection.spatialCoordinates.z)} mm\", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n\n cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)\n\n #push highest confidence & corresponding depth to queue\n self.confq.append([maxconf, maxconfdepth, maxconfx])\n # try:\n # print(self.lastsaid, medvals)\n distdiff = abs(round(self.lastsaid[1]/1000*3.28,1)-round(medvals[1]/1000*3.28,1))\n # print(round(self.lastsaid[1]/1000*3.28,1),round(medvals[1]/1000*3.28,1))\n if(label==self.target and distdiff>self.epsDist and medvals[1]>0):\n self.lastsaid = medvals\n heading = self.calc_direction(medvals[1],medvals[2])\n print(\"Notified User\")\n vdistance = str(round(self.lastsaid[1]/1000*3.28,1))\n message=self.target+vdistance+\"feetat\"+heading+\"o'clock\"\n Popen(cmd_start+'\"'+message+'\"'+cmd_mid+cmd_end, shell=True)\n #Popen('message.mp3', shell=True)\n os.system('mpg123 message.mp3')\n \n \n #cmd_start+self.target+vdistance+\"feetat\"+heading+\"o'clock\"+speed,shell=True\n cv2.putText(frame, \"NN fps: {:.2f}\".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)\n cv2.imshow(\"depth\", depthFrameColor)\n cv2.imshow(\"rgb\", frame)\n if cv2.waitKey(1) == ord('q'):\n break\n\n \n corners = np.asarray([[-0.5,-1.0,0.35],[0.5,-1.0,0.35],[0.5,1.0,0.35],[-0.5,1.0,0.35],[-0.5,-1.0,1.7],[0.5,-1.0,1.7],[0.5,1.0,1.7],[-0.5,1.0,1.7]])\n\n bounds = corners.astype(\"float64\")\n bounds = o3d.utility.Vector3dVector(bounds)\n oriented_bounding_box = o3d.geometry.OrientedBoundingBox.create_from_points(bounds)\n\n # inRight = qRight.get()\n right = pcFrame\n \n frame = depthFrame\n median = cv2.medianBlur(frame, 5)\n # median2 = cv2.medianBlur(median,5)\n\n \n pcl_converter = PointCloudVisualizer(self.depthai.right_intrinsic, 640, 400)\n\n pcd = pcl_converter.rgbd_to_projection(median, right,False)\n\n #to get points within bounding box\n num_pts = oriented_bounding_box.get_point_indices_within_bounding_box(pcd.points)\n\n # vis = o3d.visualization.Visualizer()\n \n # vis.create_window()\n # if not self.isstarted:\n # vis.add_geometry(pcd)\n # vis.add_geometry(oriented_bounding_box)\n # self.isstarted = True \n \n # else:\n # vis.update_geometry(pcd)\n # vis.update_geometry(oriented_bounding_box)\n # vis.poll_events()\n # vis.update_renderer()\n\n if len(num_pts)>5000:\n print(\"Obstacle\")\n if (rpi==1):\n s.send(bytes('1','utf-8'))\n else:\n print(\"Nothing\")\n if (rpi==1):\n s.send(bytes('0','utf-8'))\n\n\n if self.pcl_converter is not None:\n self.pcl_converter.close_window()\n \n \n def calc_direction(self,z, x):\n z = round(z/1000*3.28,1)\n x = round(x/1000*3.28,1)\n angle = round(np.arctan(x/z),1)*180/3.14\n print(z,x)\n print(angle)\n if (15<angle<=45):\n heading = 1\n elif (45<angle<75):\n heading = 2\n elif (-15<angle<=15):\n heading = 12\n elif (-45<angle<=-15):\n heading = 11\n elif (-75<angle<=-45):\n heading = 11\n return(str(heading))\n \n def get_target(self):\n while True:\n saidtext=''\n if (modeswitchpin == 1 and mode == 1):\n r = sr.Recognizer()\n with sr.Microphone(device_index=6) as source:\n print(\"You have entered the scanning mode:\")\n prompt='Say'+'object'\n #Popen([s_cmd_start+prompt+speed+s_cmd_end],shell=True)\n Popen(opensound+'sayobject.mp3', shell=True)\n audio=r.adjust_for_ambient_noise(source)\n audio=r.listen(source)\n try:\n text = \"handle\"\n # r.recognize_google(audio)\n\n print(\"You said: \" + text)\n if (text not in self.labelMap):\n errormessage='Try'+'again'\n #Popen([s_cmd_start+errormessage+speed+s_cmd_end],shell=True)\n Popen(opensound+'tryagain.mp3', shell=True)\n break\n else:\n saidtext=text\n confirm='Scanning'+'for'\n #Popen([s_cmd_start+confirm+saidtext+speed+s_cmd_end],shell=True)\n scanmessage = 'Scanning '+'for '+text\n #print(cmd_start+'\"'+scanmessage+'\"'+' '+cmd_mid+scan_end)\n Popen(cmd_start+'\"'+scanmessage+'\"'+' '+cmd_mid+scan_end, shell=True)\n Popen(opensound+'scan.mp3', shell=True)\n\n except sr.UnknownValueError:\n print('Sorry could not recognize voice')\n errormessage='Try'+'again'\n #Popen([s_cmd_start+errormessage+speed+s_cmd_end],shell=True)\n Popen(opensound+'tryagain.mp3', shell=True)\n break\n except sr.RequestError as e:\n print(\"error 2\")\n print(\"out\")\n\n def run_pointcloud(self): \n\n for depthFrame, pcFrame in self.depthai.pc():\n \n corners = np.asarray([[-0.5,-1.0,0.35],[0.5,-1.0,0.35],[0.5,1.0,0.35],[-0.5,1.0,0.35],[-0.5,-1.0,1.7],[0.5,-1.0,1.7],[0.5,1.0,1.7],[-0.5,1.0,1.7]])\n\n bounds = corners.astype(\"float64\")\n bounds = o3d.utility.Vector3dVector(bounds)\n oriented_bounding_box = o3d.geometry.OrientedBoundingBox.create_from_points(bounds)\n\n right = pcFrame\n \n frame = depthFrame\n median = cv2.medianBlur(frame, 5)\n \n self.pcl_converter = PointCloudVisualizer(self.depthai.right_intrinsic, 640, 400)\n\n pcd = self.pcl_converter.rgbd_to_projection(median, right,False)\n\n #to get points within bounding box\n num_pts = oriented_bounding_box.get_point_indices_within_bounding_box(pcd.points)\n\n\n # if not self.isstarted:\n # self.depthai.vis.add_geometry(pcd)\n # self.depthai.vis.add_geometry(oriented_bounding_box)\n # self.isstarted = True \n \n # else:\n # self.depthai.vis.update_geometry(pcd)\n # self.depthai.vis.update_geometry(oriented_bounding_box)\n # self.depthai.vis.poll_events()\n # self.depthai.vis.update_renderer()\n if len(num_pts)>5000:\n print(\"Obstacle\")\n if (rpi==1):\n s.send(bytes('1','utf-8'))\n else:\n print(\"Nothing\")\n if (rpi==1):\n s.send(bytes('0','utf-8'))\n\n if self.pcl_converter is not None:\n self.pcl_converter.close_window()\n\nif __name__ == '__main__':\n\n Main().get_target()\n" ]
[ [ "numpy.median", "numpy.arctan", "numpy.asarray" ] ]
gisilvs/probability
[ "fd8be3ca1243f956578bf1b1280f9d3ed13541f0" ]
[ "tensorflow_probability/python/internal/auto_composite_tensor_test.py" ]
[ "# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for auto_composite_tensor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\n\nfrom absl import flags\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import auto_composite_tensor\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import test_util\n\ntf.enable_v2_behavior()\n\nflags.DEFINE_string(\n 'model_output_path',\n None,\n 'If defined, serialize a `tf.Module` instance to this directory with '\n '`tf.saved_model`.')\n\nFLAGS = flags.FLAGS\n\nTFP_PYTHON_DIR = 'tensorflow_probability/tensorflow_probability/python'\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n\nAutoIdentity = tfp.experimental.auto_composite_tensor(\n tf.linalg.LinearOperatorIdentity, non_identifying_kwargs=('name',))\nAutoDiag = tfp.experimental.auto_composite_tensor(\n tf.linalg.LinearOperatorDiag, non_identifying_kwargs=('name',))\nAutoBlockDiag = tfp.experimental.auto_composite_tensor(\n tf.linalg.LinearOperatorBlockDiag, non_identifying_kwargs=('name',))\nAutoTriL = tfp.experimental.auto_composite_tensor(\n tf.linalg.LinearOperatorLowerTriangular, non_identifying_kwargs=('name',))\n\nAutoNormal = tfp.experimental.auto_composite_tensor(\n tfd.Normal, non_identifying_kwargs=('name',))\nAutoIndependent = tfp.experimental.auto_composite_tensor(\n tfd.Independent, non_identifying_kwargs=('name',))\nAutoReshape = tfp.experimental.auto_composite_tensor(\n tfb.Reshape, non_identifying_kwargs=('name',))\n\n\nclass Model(tf.Module):\n\n def __init__(self):\n self.scale = tf.Variable([0., 1.], shape=[None])\n\n @tf.function(input_signature=(\n tfb.Scale([1., 2.], validate_args=True)._type_spec,))\n def make_bij(self, b):\n return tfb.Scale(\n tf.convert_to_tensor(self.scale) + b.scale,\n validate_args=True)\n\n\[email protected]_composite_tensor\nclass ThingWithCallableArg(tfp.experimental.AutoCompositeTensor):\n\n def __init__(self, a, f):\n self.a = tf.convert_to_tensor(a, dtype_hint=tf.float32, name='a')\n self.f = f\n self.parameters = dict(a=self.a, b=self.f)\n\n def call(self):\n return self.f(self.a)\n\n\ndef tearDownModule():\n # If `FLAGS.model_output_path` is set, serialize a `Model` instance to disk.\n # To update the serialized data read by `test_saved_model_from_disk`, pass\n # the local path to\n # `tensorflow_probability/python/internal/testdata/auto_composite_tensor`.\n # You may need to pass `--test_strategy=local` to avoid permissions errors.\n if FLAGS.model_output_path is not None:\n model = Model()\n tf.saved_model.save(model, FLAGS.model_output_path)\n\n\n@test_util.test_all_tf_execution_regimes\nclass AutoCompositeTensorTest(test_util.TestCase):\n\n def test_example(self):\n @tfp.experimental.auto_composite_tensor(non_identifying_kwargs=('name',))\n class Adder(object):\n\n def __init__(self, x, y, name=None):\n with tf.name_scope(name or 'Adder') as name:\n self._x = tensor_util.convert_nonref_to_tensor(x)\n self._y = tensor_util.convert_nonref_to_tensor(y)\n self._name = name\n\n def xpy(self):\n return self._x + self._y\n\n x = 1.\n y = tf.Variable(1.)\n self.evaluate(y.initializer)\n\n def body(obj):\n return Adder(obj.xpy(), y),\n\n result, = tf.while_loop(\n cond=lambda _: True,\n body=body,\n loop_vars=(Adder(x, y),),\n maximum_iterations=3)\n self.assertAllClose(5., result.xpy())\n\n def test_function(self):\n lop = AutoDiag(2. * tf.ones([3]))\n self.assertAllClose(\n 6. * tf.ones([3]),\n tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))\n\n def test_loop(self):\n def body(lop):\n return AutoDiag(lop.matvec(tf.ones([3]) * 2.)),\n init_lop = AutoDiag(tf.ones([3]))\n lop, = tf.while_loop(\n cond=lambda _: True,\n body=body,\n loop_vars=(init_lop,),\n maximum_iterations=3)\n self.assertAllClose(2.**3 * tf.ones([3]), lop.matvec(tf.ones([3])))\n\n def test_shape_parameters(self):\n dist = AutoIndependent(AutoNormal(0, tf.ones([1])),\n reinterpreted_batch_ndims=1)\n stream = test_util.test_seed_stream()\n lp = dist.log_prob(dist.sample(seed=stream()))\n lp, _ = tf.while_loop(\n lambda *_: True,\n lambda lp, d: (d.log_prob(d.sample(seed=stream())), d),\n (lp, dist),\n maximum_iterations=2)\n self.evaluate(lp)\n\n def test_prefer_static_shape_params(self):\n @tf.function\n def f(b):\n return b\n b = AutoReshape(\n event_shape_out=[2, 3],\n event_shape_in=[tf.reduce_prod([2, 3])]) # Tensor in a list.\n f(b)\n\n def test_nested(self):\n lop = AutoBlockDiag([AutoDiag(tf.ones([2]) * 2), AutoIdentity(1)])\n self.assertAllClose(\n tf.constant([6., 6, 3]),\n tf.function(lambda lop: lop.matvec(3. * tf.ones([3])))(lop))\n\n def test_preconditioner(self):\n xs = self.evaluate(tf.random.uniform([30, 30], seed=test_util.test_seed()))\n cov_linop = tf.linalg.LinearOperatorFullMatrix(\n tf.matmul(xs, xs, transpose_b=True) + tf.linalg.eye(30) * 1e-3,\n is_self_adjoint=True,\n is_positive_definite=True)\n\n tfed = tfp.experimental.distributions\n auto_ct_mvn_prec_linop = tfp.experimental.auto_composite_tensor(\n tfed.MultivariateNormalPrecisionFactorLinearOperator,\n non_identifying_kwargs=('name',))\n tril = AutoTriL(**cov_linop.cholesky().parameters)\n momentum_distribution = auto_ct_mvn_prec_linop(precision_factor=tril)\n def body(d):\n return d.copy(precision_factor=AutoTriL(\n **dict(d.precision_factor.parameters,\n tril=d.precision_factor.to_dense() + tf.linalg.eye(30),))),\n after_loop = tf.while_loop(lambda d: True, body, (momentum_distribution,),\n maximum_iterations=1)\n tf.nest.map_structure(self.evaluate,\n after_loop,\n expand_composites=True)\n\n def test_already_ct_subclass(self):\n\n @tfp.experimental.auto_composite_tensor\n class MyCT(tfp.experimental.AutoCompositeTensor):\n\n def __init__(self, tensor_param, non_tensor_param, maybe_tensor_param):\n self._tensor_param = tf.convert_to_tensor(tensor_param)\n self._non_tensor_param = non_tensor_param\n self._maybe_tensor_param = maybe_tensor_param\n\n def body(obj):\n return MyCT(obj._tensor_param + 1,\n obj._non_tensor_param,\n obj._maybe_tensor_param),\n\n init = MyCT(0., 0, 0)\n result, = tf.while_loop(\n cond=lambda *_: True,\n body=body,\n loop_vars=(init,),\n maximum_iterations=3)\n self.assertAllClose(3., result._tensor_param)\n\n init = MyCT(0., 0, tf.constant(0))\n result, = tf.while_loop(\n cond=lambda *_: True,\n body=body,\n loop_vars=(init,),\n maximum_iterations=3)\n self.assertAllClose(3., result._tensor_param)\n\n def test_parameters_lookup(self):\n\n @tfp.experimental.auto_composite_tensor\n class ThingWithParametersButNoAttrs(tfp.experimental.AutoCompositeTensor):\n\n def __init__(self, a, b):\n self.a = tf.convert_to_tensor(a, dtype_hint=tf.float32, name='a')\n self.b = tf.convert_to_tensor(b, dtype_hint=tf.float32, name='a')\n self.parameters = dict(a=self.a, b=self.b)\n\n t = ThingWithParametersButNoAttrs(1., 2.)\n self.assertIsInstance(t, tf.__internal__.CompositeTensor)\n\n ts = t._type_spec\n components = ts._to_components(t)\n self.assertAllEqualNested(components, dict(a=1., b=2.))\n\n t2 = ts._from_components(components)\n self.assertIsInstance(t2, ThingWithParametersButNoAttrs)\n\n def test_wrapped_constructor(self):\n def add_tag(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n args[0]._tag = 'tagged'\n return f(*args, **kwargs)\n return wrapper\n\n @tfp.experimental.auto_composite_tensor\n class ThingWithWrappedInit(tfp.experimental.AutoCompositeTensor):\n\n @add_tag\n def __init__(self, value):\n self.value = tf.convert_to_tensor(value)\n\n init = ThingWithWrappedInit(3)\n def body(obj):\n return ThingWithWrappedInit(value=obj.value + 1),\n\n out, = tf.while_loop(\n cond=lambda *_: True,\n body=body,\n loop_vars=(init,),\n maximum_iterations=3)\n self.assertEqual(self.evaluate(out.value), 6)\n\n def test_deferred_assertion_context(self):\n # If `validate_args` assertions in `__init__` are not deferred, a graph\n # cycle is created when `d._type_spec` calls `__init__` and this test fails.\n d = AutoNormal(0., 1., validate_args=True)\n\n @tf.function\n def f(d):\n return d\n\n f(d)\n\n def test_function_with_variable(self):\n loc = tf.Variable(3.)\n dist = AutoIndependent(\n AutoNormal(loc, scale=tf.ones([3])), reinterpreted_batch_ndims=1)\n\n new_loc = 32.\n @tf.function\n def f(d):\n d.distribution.loc.assign(new_loc)\n self.assertLen(d.trainable_variables, 1)\n return d\n\n dist_ = f(dist)\n self.evaluate(loc.initializer)\n self.assertEqual(self.evaluate(dist_.distribution.loc), new_loc)\n self.assertEqual(self.evaluate(dist.distribution.loc), new_loc)\n self.assertLen(dist.trainable_variables, 1)\n\n def test_export_import(self):\n path = self.create_tempdir().full_path\n\n m1 = Model()\n self.evaluate([v.initializer for v in m1.variables])\n self.evaluate(m1.scale.assign(m1.scale + 1.))\n\n tf.saved_model.save(m1, os.path.join(path, 'saved_model1'))\n m2 = tf.saved_model.load(os.path.join(path, 'saved_model1'))\n self.evaluate(m2.scale.initializer)\n b = tfb.Scale([5., 9.], validate_args=True)\n self.evaluate(m2.make_bij(b).forward(2.))\n self.evaluate(m2.scale.assign(m2.scale + [1., 2.]))\n self.evaluate(m2.make_bij(b).forward(2.))\n\n self.evaluate(m2.scale.assign([1., 2., 3.]))\n tf.saved_model.save(m2, os.path.join(path, 'saved_model2'))\n m3 = tf.saved_model.load(os.path.join(path, 'saved_model2'))\n self.evaluate(m3.scale.initializer)\n with self.assertRaisesOpError('compatible shape'):\n self.evaluate(m3.make_bij(b).forward([3.]))\n\n def test_saved_model_from_disk(self):\n\n test_srcdir = absltest.get_default_test_srcdir()\n relative_testdata_path = os.path.join(\n TFP_PYTHON_DIR, 'internal/testdata/auto_composite_tensor')\n absolute_testdata_path = os.path.join(test_srcdir, relative_testdata_path)\n\n m = tf.saved_model.load(absolute_testdata_path)\n self.evaluate(m.scale.initializer)\n b = tfb.Scale([5., 9.], validate_args=True)\n self.assertAllClose(self.evaluate(m.make_bij(b).forward(2.)), [10., 20.])\n self.evaluate(m.scale.assign(m.scale + [1., 2.]))\n self.assertAllClose(self.evaluate(m.make_bij(b).forward(2.)), [12., 24.])\n\n def test_callable_arg(self):\n\n t = ThingWithCallableArg(1., lambda x: x + 2.)\n self.assertIsInstance(t, tf.__internal__.CompositeTensor)\n\n ts = t._type_spec\n components = ts._to_components(t)\n self.assertAllEqualNested(components, dict(a=1.))\n\n t2 = ts._from_components(components)\n self.assertIsInstance(t2, ThingWithCallableArg)\n\n self.assertAllClose(tf.function(lambda t: t.call())(t2), 3.)\n\n def test_different_names_type_specs_equal(self):\n\n dist_1 = AutoNormal([0., 2.], scale=1., name='FirstNormal')\n dist_2 = AutoNormal([1., 3.], scale=2., name='SecondNormal')\n self.assertEqual(dist_1._type_spec, dist_2._type_spec)\n\n def test_save_restore_functor(self):\n\n f = lambda x: x ** 2\n a = tf.constant([3., 2.])\n ct = ThingWithCallableArg(a, f=f)\n\n struct_coder = tf.__internal__.saved_model.StructureCoder()\n with self.assertRaisesRegex(ValueError, 'Cannot serialize'):\n struct_coder.encode_structure(ct._type_spec) # pylint: disable=protected-access\n\n @tfp.experimental.auto_composite_tensor(module_name='my.module')\n class F(tfp.experimental.AutoCompositeTensor):\n\n def __call__(self, *args, **kwargs):\n return f(*args, **kwargs)\n\n ct_functor = ThingWithCallableArg(a, f=F())\n enc = struct_coder.encode_structure(ct_functor._type_spec)\n dec = struct_coder.decode_proto(enc)\n self.assertEqual(dec, ct_functor._type_spec)\n\n def test_composite_tensor_callable_arg(self):\n # Parameters that are both `CompositeTensor` and callable should be\n # handled by the `_type_spec` as `CompositeTensor`.\n inner_bij = tfb.Scale([[1., 3.]], validate_args=True)\n bij = tfb.TransformDiagonal(inner_bij, validate_args=True)\n self.assertLen(tf.nest.flatten(bij), 1)\n self.assertLen(bij._type_spec._callable_params, 0) # pylint: disable=protected-access\n self.assertIn('diag_bijector', bij._type_spec._param_specs) # pylint: disable=protected-access\n\n def test_subclass_with_inherited_type_spec_raises(self):\n\n class StandardNormal(AutoNormal):\n\n def __init__(self):\n super(StandardNormal, self).__init__(\n loc=0., scale=1., validate_args=True)\n\n d = StandardNormal()\n with self.assertRaisesRegex(\n ValueError,\n '`StandardNormal` has inherited the `_type_spec` of `Normal`'):\n tf.nest.flatten(d, expand_composites=True)\n\n AutoStandardNormal = tfp.experimental.auto_composite_tensor(StandardNormal) # pylint: disable=invalid-name\n d_ct = AutoStandardNormal()\n self.assertLen(tf.nest.flatten(d_ct, expand_composites=True), 0)\n\n def test_names_preserved_through_flatten(self):\n\n dist = AutoNormal(0., scale=3., name='ScaleThreeNormal')\n flat = tf.nest.flatten(dist, expand_composites=True)\n unflat = tf.nest.pack_sequence_as(dist, flat, expand_composites=True)\n unflat_name = ('ScaleThreeNormal' if tf.executing_eagerly()\n else 'ScaleThreeNormal_1')\n self.assertEqual(unflat.name, unflat_name)\n\n\nclass _TestTypeSpec(auto_composite_tensor._AutoCompositeTensorTypeSpec):\n\n def __init__(self, param_specs, non_tensor_params=None, omit_kwargs=(),\n prefer_static_value=(), non_identifying_kwargs=(),\n callable_params=None):\n non_tensor_params = {} if non_tensor_params is None else non_tensor_params\n super(_TestTypeSpec, self).__init__(\n param_specs, non_tensor_params=non_tensor_params,\n omit_kwargs=omit_kwargs, prefer_static_value=prefer_static_value,\n non_identifying_kwargs=non_identifying_kwargs,\n callable_params=callable_params)\n\n @property\n def value_type(self):\n \"\"\"Unused `value_type` to allow the `TypeSpec` to be instantiated.\"\"\"\n pass\n\n\n@test_util.test_all_tf_execution_regimes\nclass AutoCompositeTensorTypeSpecTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n ('WithoutCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n non_tensor_params={'validate_args': True},\n omit_kwargs=('name',),\n prefer_static_value=('a',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n non_tensor_params={'validate_args': True},\n omit_kwargs=('name',),\n prefer_static_value=('a',))),\n ('WithCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(3.)._type_spec},\n omit_kwargs=('name', 'foo'),\n prefer_static_value=('a',),\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(3.)._type_spec},\n omit_kwargs=('name', 'foo'),\n prefer_static_value=('a',),\n callable_params={'f': tf.math.exp})),\n ('DifferentNonIdentifyingKwargsValues',\n _TestTypeSpec(\n param_specs={'x': tf.TensorSpec([], tf.float64)},\n non_tensor_params={'name': 'MyAutoCT'},\n non_identifying_kwargs=('name')),\n _TestTypeSpec(\n param_specs={'x': tf.TensorSpec([], tf.float64)},\n non_tensor_params={'name': 'OtherAutoCT'},\n non_identifying_kwargs=('name'))),\n )\n def testEquality(self, v1, v2):\n # pylint: disable=g-generic-assert\n self.assertEqual(v1, v2)\n self.assertEqual(v2, v1)\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)\n self.assertEqual(hash(v1), hash(v2))\n\n @parameterized.named_parameters(\n ('DifferentTensorSpecs',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, 2], tf.float32)},\n non_tensor_params={'validate_args': True},\n omit_kwargs=('name',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n non_tensor_params={'validate_args': True},\n omit_kwargs=('name',))),\n ('DifferentCallables',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.sigmoid})),\n ('DifferentMetadata',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, 2], tf.float32)},\n non_tensor_params={'validate_args': True},\n non_identifying_kwargs=('name',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32)},\n non_tensor_params={'validate_args': True})),\n )\n def testInequality(self, v1, v2):\n # pylint: disable=g-generic-assert\n self.assertNotEqual(v1, v2)\n self.assertNotEqual(v2, v1)\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)\n\n @parameterized.named_parameters(\n ('WithoutCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n non_tensor_params={'validate_args': True, 'b': 3.},\n omit_kwargs=('name',),\n prefer_static_value=('b',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n non_tensor_params={'validate_args': True, 'b': 3.},\n omit_kwargs=('name',),\n prefer_static_value=('b',))),\n ('WithCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(3.)._type_spec},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.exp})),\n ('DifferentNonIdentifyingKwargsValues',\n _TestTypeSpec(\n param_specs={'x': tf.TensorSpec(None, tf.float64)},\n non_tensor_params={'name': 'MyAutoCT'},\n non_identifying_kwargs=('name')),\n _TestTypeSpec(\n param_specs={'x': tf.TensorSpec([], tf.float64)},\n non_tensor_params={'name': 'OtherAutoCT'},\n non_identifying_kwargs=('name'))),\n )\n def testIsCompatibleWith(self, v1, v2):\n self.assertTrue(v1.is_compatible_with(v2))\n self.assertTrue(v2.is_compatible_with(v1))\n\n @parameterized.named_parameters(\n ('IncompatibleTensorSpecs',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2, 3], tf.float32)},\n non_tensor_params={'validate_args': True, 'b': [3, 2]},\n omit_kwargs=('name',),\n prefer_static_value=('b',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n non_tensor_params={'validate_args': True, 'b': [3, 2]},\n omit_kwargs=('name',),\n prefer_static_value=('b',))),\n ('DifferentMetadataSameCallables',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n non_tensor_params={'validate_args': True},\n omit_kwargs=('name',),\n callable_params={'g': tf.math.softplus}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n non_tensor_params={'validate_args': False},\n omit_kwargs=('name',),\n callable_params={'g': tf.math.softplus})),\n ('DifferentCallables',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(3.)._type_spec},\n omit_kwargs=('name', 'foo'),\n callable_params={'f': tf.math.sigmoid}))\n )\n def testIsNotCompatibleWith(self, v1, v2):\n self.assertFalse(v1.is_compatible_with(v2))\n self.assertFalse(v2.is_compatible_with(v1))\n\n @parameterized.named_parameters(\n ('WithoutCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n omit_kwargs=('name',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n omit_kwargs=('name',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n omit_kwargs=('name',))),\n ('WithCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec(None, tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(tf.Variable(3.))._type_spec},\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec(None, tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n callable_params={'f': tf.math.exp})),\n )\n def testMostSpecificCompatibleType(self, v1, v2, expected):\n self.assertEqual(v1.most_specific_compatible_type(v2), expected)\n self.assertEqual(v2.most_specific_compatible_type(v1), expected)\n\n @parameterized.named_parameters(\n ('DifferentParamSpecs',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n omit_kwargs=('foo',)),\n _TestTypeSpec(\n param_specs={'b': tf.TensorSpec([5, None], tf.float32)},\n omit_kwargs=('foo',))),\n ('DifferentMetadata',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n omit_kwargs=('foo',)),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, None], tf.float32)},\n omit_kwargs=('bar',))),\n ('DifferentCallables',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec(None, tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n callable_params={'f': tf.math.exp}),\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([3, None], tf.float32),\n 'b': tfb.Scale(tf.Variable(3.))._type_spec},\n callable_params={'f': tf.math.softplus})),\n )\n def testMostSpecificCompatibleTypeException(self, v1, v2):\n with self.assertRaises(ValueError):\n v1.most_specific_compatible_type(v2)\n with self.assertRaises(ValueError):\n v2.most_specific_compatible_type(v1)\n\n @parameterized.named_parameters(\n ('WithoutCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec([4, 2], tf.float32)},\n omit_kwargs=('parameters',), non_identifying_kwargs=('name',))),\n ('WithCallable',\n _TestTypeSpec(\n param_specs={'a': tf.TensorSpec(None, tf.float32),\n 'b': tfb.Scale(\n tf.Variable(2., shape=None))._type_spec},\n callable_params={'f': tf.math.exp})),\n )\n def testRepr(self, spec):\n spec_data = (auto_composite_tensor._AUTO_COMPOSITE_TENSOR_VERSION,\n spec._param_specs, spec._non_tensor_params, spec._omit_kwargs,\n spec._prefer_static_value, spec._non_identifying_kwargs,\n spec._callable_params)\n self.assertEqual(repr(spec), f'_TestTypeSpec{spec_data}')\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.matmul", "tensorflow.compat.v2.__internal__.saved_model.StructureCoder", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.nest.flatten", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.saved_model.load", "tensorflow.compat.v2.nest.pack_sequence_as", "tensorflow.compat.v2.linalg.eye", "tensorflow.compat.v2.saved_model.save", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.while_loop", "tensorflow.compat.v2.nest.map_structure", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.reduce_prod" ] ]
shingchuang/semseg
[ "86789dbb4fa481ac7be30ef6b052517bab360696" ]
[ "tool/demo.py" ]
[ "import os\nimport logging\nimport argparse\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.utils.data\n\nfrom util import config\nfrom util.util import colorize\n\ncv2.ocl.setUseOpenCL(False)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')\n parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')\n parser.add_argument('--image', type=str, default='figure/demo/ADE_val_00001515.jpg', help='input image')\n parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n assert args.config is not None\n cfg = config.load_cfg_from_cfg_file(args.config)\n cfg.image = args.image\n if args.opts is not None:\n cfg = config.merge_cfg_from_list(cfg, args.opts)\n return cfg\n\n\ndef get_logger():\n logger_name = \"main-logger\"\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n fmt = \"[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s\"\n handler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(handler)\n return logger\n\n\ndef check(args):\n assert args.classes > 1\n assert args.zoom_factor in [1, 2, 4, 8]\n assert args.split in ['train', 'val', 'test']\n if args.arch == 'psp':\n assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0\n elif args.arch == 'psa':\n if args.compact:\n args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1\n args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1\n else:\n assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)\n if args.mask_h is None and args.mask_w is None:\n args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1\n args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1\n else:\n assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (\n args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)\n assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (\n args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)\n else:\n raise Exception('architecture not supported yet'.format(args.arch))\n\n\ndef main():\n global args, logger\n args = get_parser()\n check(args)\n logger = get_logger()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: {}\".format(args.classes))\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n colors = np.loadtxt(args.colors_path).astype('uint8')\n\n if args.arch == 'psp':\n from model.pspnet import PSPNet\n model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False)\n elif args.arch == 'psa':\n from model.psanet import PSANet\n model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,\n shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,\n normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax, pretrained=False)\n logger.info(model)\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n if os.path.isfile(args.model_path):\n logger.info(\"=> loading checkpoint '{}'\".format(args.model_path))\n checkpoint = torch.load(args.model_path)\n model.load_state_dict(checkpoint['state_dict'], strict=False)\n logger.info(\"=> loaded checkpoint '{}'\".format(args.model_path))\n else:\n raise RuntimeError(\"=> no checkpoint found at '{}'\".format(args.model_path))\n test(model.eval(), args.image, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, colors)\n\n\ndef net_process(model, image, mean, std=None, flip=True):\n input = torch.from_numpy(image.transpose((2, 0, 1))).float()\n if std is None:\n for t, m in zip(input, mean):\n t.sub_(m)\n else:\n for t, m, s in zip(input, mean, std):\n t.sub_(m).div_(s)\n input = input.unsqueeze(0).cuda()\n if flip:\n input = torch.cat([input, input.flip(3)], 0)\n with torch.no_grad():\n output = model(input)\n _, _, h_i, w_i = input.shape\n _, _, h_o, w_o = output.shape\n if (h_o != h_i) or (w_o != w_i):\n output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)\n output = F.softmax(output, dim=1)\n if flip:\n output = (output[0] + output[1].flip(2)) / 2\n else:\n output = output[0]\n output = output.data.cpu().numpy()\n output = output.transpose(1, 2, 0)\n return output\n\n\ndef scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):\n ori_h, ori_w, _ = image.shape\n pad_h = max(crop_h - ori_h, 0)\n pad_w = max(crop_w - ori_w, 0)\n pad_h_half = int(pad_h / 2)\n pad_w_half = int(pad_w / 2)\n if pad_h > 0 or pad_w > 0:\n image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)\n new_h, new_w, _ = image.shape\n stride_h = int(np.ceil(crop_h*stride_rate))\n stride_w = int(np.ceil(crop_w*stride_rate))\n grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)\n grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)\n prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)\n count_crop = np.zeros((new_h, new_w), dtype=float)\n for index_h in range(0, grid_h):\n for index_w in range(0, grid_w):\n s_h = index_h * stride_h\n e_h = min(s_h + crop_h, new_h)\n s_h = e_h - crop_h\n s_w = index_w * stride_w\n e_w = min(s_w + crop_w, new_w)\n s_w = e_w - crop_w\n image_crop = image[s_h:e_h, s_w:e_w].copy()\n count_crop[s_h:e_h, s_w:e_w] += 1\n prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)\n prediction_crop /= np.expand_dims(count_crop, 2)\n prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]\n prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)\n return prediction\n\n\ndef test(model, image_path, classes, mean, std, base_size, crop_h, crop_w, scales, colors):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR) # BGR 3 channel ndarray wiht shape H * W * 3\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert cv2 read image from BGR order to RGB order\n h, w, _ = image.shape\n prediction = np.zeros((h, w, classes), dtype=float)\n for scale in scales:\n long_size = round(scale * base_size)\n new_h = long_size\n new_w = long_size\n if h > w:\n new_w = round(long_size/float(h)*w)\n else:\n new_h = round(long_size/float(w)*h)\n image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)\n prediction = np.argmax(prediction, axis=2)\n gray = np.uint8(prediction)\n color = colorize(gray, colors)\n image_name = image_path.split('/')[-1].split('.')[0]\n gray_path = os.path.join('./figure/demo/', image_name + '_gray.png')\n color_path = os.path.join('./figure/demo/', image_name + '_color.png')\n cv2.imwrite(gray_path, gray)\n color.save(color_path)\n logger.info(\"=> Prediction saved in {}\".format(color_path))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.uint8", "numpy.ceil", "numpy.zeros", "torch.nn.DataParallel", "torch.no_grad", "torch.nn.functional.interpolate", "numpy.loadtxt", "numpy.argmax", "torch.load", "torch.nn.functional.softmax", "numpy.expand_dims" ] ]
Mausy5043/kamstrupd
[ "b63043f68c8e084125f96359e3aab84611c270dc" ]
[ "bin/trend.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Create trendbargraphs for various periods of electricity use and production.\"\"\"\n\nimport argparse\nfrom datetime import datetime as dt\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport constants\n# noinspection PyUnresolvedReferences\nimport libkamstrup as kl\n\nDATABASE = constants.TREND['database']\nOPTION = \"\"\n\n\ndef fetch_last_day(hours_to_fetch):\n \"\"\"...\n \"\"\"\n global DATABASE\n config = kl.add_time_line({\"grouping\": \"%m-%d %Hh\",\n \"period\": hours_to_fetch,\n \"timeframe\": \"hour\",\n \"database\": DATABASE,\n \"table\": \"production\",\n }\n )\n\n opwekking, prod_lbls = kl.get_historic_data(config, telwerk=\"energy\")\n config[\"table\"] = \"kamstrup\"\n import_lo, data_lbls = kl.get_historic_data(config, telwerk=\"T1in\")\n import_hi, data_lbls = kl.get_historic_data(config, telwerk=\"T2in\")\n export_lo, data_lbls = kl.get_historic_data(config, telwerk=\"T1out\")\n export_hi, data_lbls = kl.get_historic_data(config, telwerk=\"T2out\")\n # production data may not yet have caught up to the current hour\n if not (prod_lbls[-1] == data_lbls[-1]):\n opwekking = opwekking[:-1]\n np.append(opwekking, 0.0)\n return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi\n\n\ndef fetch_last_month(days_to_fetch):\n \"\"\"...\n \"\"\"\n global DATABASE\n config = kl.add_time_line({\"grouping\": \"%m-%d\",\n \"period\": days_to_fetch,\n \"timeframe\": \"day\",\n \"database\": DATABASE,\n \"table\": \"production\",\n }\n )\n opwekking, prod_lbls = kl.get_historic_data(config, telwerk=\"energy\")\n config[\"table\"] = \"kamstrup\"\n import_lo, data_lbls = kl.get_historic_data(config, telwerk=\"T1in\")\n import_hi, data_lbls = kl.get_historic_data(config, telwerk=\"T2in\")\n export_lo, data_lbls = kl.get_historic_data(config, telwerk=\"T1out\")\n export_hi, data_lbls = kl.get_historic_data(config, telwerk=\"T2out\")\n # production data may not yet have caught up to the current hour\n if not (prod_lbls[-1] == data_lbls[-1]):\n opwekking = opwekking[:-1]\n np.append(opwekking, 0.0)\n return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi\n\n\ndef fetch_last_year(months_to_fetch):\n \"\"\"...\n \"\"\"\n global DATABASE\n config = kl.add_time_line({\"grouping\": \"%Y-%m\",\n \"period\": months_to_fetch,\n \"timeframe\": \"month\",\n \"database\": DATABASE,\n \"table\": \"production\",\n }\n )\n opwekking, prod_lbls = kl.get_historic_data(config,\n telwerk=\"energy\",\n from_start_of_year=True\n )\n config[\"table\"] = \"kamstrup\"\n import_lo, data_lbls = kl.get_historic_data(config,\n telwerk=\"T1in\",\n from_start_of_year=True\n )\n import_hi, data_lbls = kl.get_historic_data(config,\n telwerk=\"T2in\",\n from_start_of_year=True\n )\n export_lo, data_lbls = kl.get_historic_data(config,\n telwerk=\"T1out\",\n from_start_of_year=True\n )\n export_hi, data_lbls = kl.get_historic_data(config,\n telwerk=\"T2out\",\n from_start_of_year=True\n )\n # production data may not yet have caught up to the current hour\n if not (prod_lbls[-1] == data_lbls[-1]):\n opwekking = opwekking[:-1]\n np.append(opwekking, 0.0)\n return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi\n\n\ndef fetch_last_years(years_to_fetch):\n \"\"\"...\n \"\"\"\n global DATABASE\n config = kl.add_time_line({\"grouping\": \"%Y\",\n \"period\": years_to_fetch,\n \"timeframe\": \"year\",\n \"database\": DATABASE,\n \"table\": \"production\",\n }\n )\n opwekking, prod_lbls = kl.get_historic_data(config,\n telwerk=\"energy\",\n from_start_of_year=True\n )\n config[\"table\"] = \"kamstrup\"\n import_lo, data_lbls = kl.get_historic_data(config,\n telwerk=\"T1in\",\n from_start_of_year=True\n )\n import_hi, data_lbls = kl.get_historic_data(config,\n telwerk=\"T2in\",\n from_start_of_year=True\n )\n export_lo, data_lbls = kl.get_historic_data(config,\n telwerk=\"T1out\",\n from_start_of_year=True\n )\n export_hi, data_lbls = kl.get_historic_data(config,\n telwerk=\"T2out\",\n from_start_of_year=True\n )\n # production data may not yet have caught up to the current hour\n if not (prod_lbls[-1] == data_lbls[-1]):\n opwekking = opwekking[:-1]\n np.append(opwekking, 0.0)\n return data_lbls, import_lo, import_hi, opwekking, export_lo, export_hi\n\n\ndef plot_graph(output_file, data_tuple, plot_title, show_data=0):\n \"\"\"...\n \"\"\"\n data_lbls = data_tuple[0]\n import_lo = data_tuple[1]\n import_hi = data_tuple[2]\n opwekking = data_tuple[3]\n export_lo = data_tuple[4]\n export_hi = data_tuple[5]\n imprt = kl.contract(import_lo, import_hi)\n exprt = kl.contract(export_lo, export_hi)\n own_usage = kl.distract(opwekking, exprt)\n usage = kl.contract(own_usage, imprt)\n btm_hi = kl.contract(import_lo, own_usage)\n \"\"\"\n --- Start debugging:\n np.set_printoptions(precision=3)\n print(\"data_lbls: \", np.size(data_lbls), data_lbls[-5:])\n print(\" \")\n print(\"opwekking: \", np.size(opwekking), opwekking[-5:])\n print(\" \")\n print(\"export_hi: \", np.size(export_hi), export_hi[-5:])\n print(\"export_lo: \", np.size(export_lo), export_lo[-5:])\n print(\"exprt : \", np.size(exprt), exprt[-5:])\n print(\" \")\n print(\"import_hi: \", np.size(import_hi), import_hi[-5:])\n print(\"import_lo: \", np.size(import_lo), import_lo[-5:])\n print(\"imprt : \", np.size(imprt), imprt[-5:])\n print(\" \")\n print(\"own_usage: \", np.size(own_usage), own_usage[-5:])\n print(\"usage : \", np.size(usage), usage[-5:])\n print(\" \")\n print(\"btm_hi : \", np.size(btm_hi), btm_hi[-5:])\n --- End debugging.\n \"\"\"\n # Set the bar width\n bar_width = 0.75\n # Set the color alpha\n ahpla = 0.7\n # positions of the left bar-boundaries\n tick_pos = list(range(1, len(data_lbls) + 1))\n\n # Create the general plot and the bar\n plt.rc(\"font\", size=6.5)\n dummy, ax1 = plt.subplots(1, figsize=(10, 3.5))\n col_import = \"red\"\n col_export = \"blue\"\n col_usage = \"green\"\n\n # Create a bar plot of import_lo\n ax1.bar(tick_pos,\n import_hi,\n width=bar_width,\n label=\"Inkoop (normaal)\",\n alpha=ahpla,\n color=col_import,\n align=\"center\",\n bottom=btm_hi, # [sum(i) for i in zip(import_lo, own_usage)]\n )\n # Create a bar plot of import_hi\n ax1.bar(tick_pos,\n import_lo,\n width=bar_width,\n label=\"Inkoop (dal)\",\n alpha=ahpla * 0.5,\n color=col_import,\n align=\"center\",\n bottom=own_usage,\n )\n # Create a bar plot of own_usage\n ax1.bar(tick_pos,\n own_usage,\n width=bar_width,\n label=\"Eigen gebruik\",\n alpha=ahpla,\n color=col_usage,\n align=\"center\",\n )\n if show_data == 1:\n for i, v in enumerate(own_usage):\n ax1.text(tick_pos[i],\n 10,\n \"{:7.3f}\".format(v),\n {\"ha\": \"center\", \"va\": \"bottom\"},\n rotation=-90,\n )\n if show_data == 2:\n for i, v in enumerate(usage):\n ax1.text(tick_pos[i],\n 500,\n \"{:4.0f}\".format(v),\n {\"ha\": \"center\", \"va\": \"bottom\"},\n fontsize=12,\n )\n # Exports hang below the y-axis\n # Create a bar plot of export_lo\n ax1.bar(tick_pos,\n [-1 * i for i in export_lo],\n width=bar_width,\n label=\"Verkoop (dal)\",\n alpha=ahpla * 0.5,\n color=col_export,\n align=\"center\",\n )\n # Create a bar plot of export_hi\n ax1.bar(tick_pos,\n [-1 * i for i in export_hi],\n width=bar_width,\n label=\"Verkoop (normaal)\",\n alpha=ahpla,\n color=col_export,\n align=\"center\",\n bottom=[-1 * i for i in export_lo],\n )\n if show_data == 1:\n for i, v in enumerate(exprt):\n ax1.text(tick_pos[i],\n -10,\n \"{:7.3f}\".format(v),\n {\"ha\": \"center\", \"va\": \"top\"},\n rotation=-90,\n )\n if show_data == 2:\n for i, v in enumerate(exprt):\n ax1.text(tick_pos[i],\n -500,\n \"{:4.0f}\".format(v),\n {\"ha\": \"center\", \"va\": \"top\"},\n fontsize=12,\n )\n\n # Set Axes stuff\n ax1.set_ylabel(\"[kWh]\")\n if show_data == 0:\n y_lo = -1 * (max(exprt) + 1)\n y_hi = max(usage) + 1\n if y_lo > -1.5:\n y_lo = -1.5\n if y_hi < 1.5:\n y_hi = 1.5\n ax1.set_ylim([y_lo, y_hi])\n\n ax1.set_xlabel(\"Datetime\")\n ax1.grid(which=\"major\",\n axis=\"y\",\n color=\"k\",\n linestyle=\"--\",\n linewidth=0.5\n )\n ax1.axhline(y=0, color=\"k\")\n ax1.axvline(x=0, color=\"k\")\n # Set plot stuff\n plt.xticks(tick_pos, data_lbls, rotation=-60)\n plt.title(f\"{plot_title}\")\n plt.legend(loc=\"upper left\", ncol=5, framealpha=0.2)\n # Fit every nicely\n plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\n plt.tight_layout()\n plt.savefig(fname=f\"{output_file}\", format=\"png\")\n\n\ndef main():\n \"\"\"\n This is the main loop\n \"\"\"\n global OPTION\n\n if OPTION.hours:\n plot_graph(constants.TREND['day_graph'],\n fetch_last_day(OPTION.hours),\n f\"Energietrend per uur afgelopen dagen ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})\",\n )\n if OPTION.days:\n plot_graph(constants.TREND['month_graph'],\n fetch_last_month(OPTION.days),\n f\"Energietrend per dag afgelopen maand ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})\",\n )\n if OPTION.months:\n plot_graph(constants.TREND['year_graph'],\n fetch_last_year(OPTION.months),\n f\"Energietrend per maand afgelopen jaren ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})\",\n show_data=1,\n )\n if OPTION.years:\n plot_graph(constants.TREND['vsyear_graph'],\n fetch_last_years(OPTION.years),\n f\"Energietrend per jaar afgelopen jaren ({dt.now().strftime('%d-%m-%Y %H:%M:%S')})\",\n show_data=2,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Create a trendgraph\")\n parser.add_argument(\"-hr\",\n \"--hours\",\n type=int,\n help=\"create hour-trend for last <HOURS> hours\",\n )\n parser.add_argument(\"-d\",\n \"--days\",\n type=int,\n help=\"create day-trend for last <DAYS> days\"\n )\n parser.add_argument(\"-m\",\n \"--months\",\n type=int,\n help=\"number of months of data to use for the graph\",\n )\n parser.add_argument(\"-y\",\n \"--years\",\n type=int,\n help=\"number of months of data to use for the graph\",\n )\n OPTION = parser.parse_args()\n if OPTION.hours == 0:\n OPTION.hours = 50\n if OPTION.days == 0:\n OPTION.days = 50\n if OPTION.months == 0:\n OPTION.months = 38\n if OPTION.years == 0:\n OPTION.years = 6\n main()\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.pyplot.rc", "matplotlib.pyplot.tight_layout", "numpy.append", "matplotlib.pyplot.xticks" ] ]
RacleRay/Project-Practice
[ "78d8a31dc4f4ffdf620521d375a475b46ea79cf7" ]
[ "yolov3/slim_infer_only/run_test.py" ]
[ "import numpy as np\r\nimport tensorflow as tf\r\nfrom PIL import Image, ImageDraw\r\n\r\nfrom yolov3 import yolo_v3\r\nfrom nms import non_max_suppression, detections_boxes\r\n\r\n\r\n# 加载权重\r\ndef load_weights(var_list, weights_file):\r\n with open(weights_file, \"rb\") as fp:\r\n _ = np.fromfile(fp, dtype=np.int32, count=5)\r\n weights = np.fromfile(fp, dtype=np.float32)\r\n\r\n ptr = 0\r\n i = 0\r\n assign_ops = []\r\n while i < len(var_list) - 1:\r\n var1 = var_list[i]\r\n var2 = var_list[i + 1]\r\n # 找到卷积项\r\n if 'Conv' in var1.name.split('/')[-2]:\r\n # 找到BN参数项\r\n if 'BatchNorm' in var2.name.split('/')[-2]:\r\n # 加载批量归一化参数\r\n gamma, beta, mean, var = var_list[i + 1:i + 5]\r\n batch_norm_vars = [beta, gamma, mean, var]\r\n for var in batch_norm_vars:\r\n shape = var.shape.as_list()\r\n num_params = np.prod(shape)\r\n var_weights = weights[ptr:ptr + num_params].reshape(shape)\r\n ptr += num_params\r\n assign_ops.append(tf.assign(var, var_weights, validate_shape=True))\r\n\r\n i += 4 # 已经加载了4个变量,指针移动4\r\n elif 'Conv' in var2.name.split('/')[-2]:\r\n bias = var2\r\n bias_shape = bias.shape.as_list()\r\n bias_params = np.prod(bias_shape)\r\n bias_weights = weights[ptr:ptr + bias_params].reshape(bias_shape)\r\n ptr += bias_params\r\n assign_ops.append(tf.assign(bias, bias_weights, validate_shape=True))\r\n\r\n i += 1\r\n\r\n shape = var1.shape.as_list()\r\n num_params = np.prod(shape)\r\n\r\n # 加载权重\r\n var_weights = weights[ptr:ptr + num_params].reshape((shape[3], shape[2], shape[0], shape[1]))\r\n var_weights = np.transpose(var_weights, (2, 3, 1, 0))\r\n ptr += num_params\r\n assign_ops.append(tf.assign(var1, var_weights, validate_shape=True))\r\n i += 1\r\n\r\n return assign_ops\r\n\r\n\r\n# 将结果显示在图片上\r\ndef draw_boxes(boxes, img, cls_names, detection_size):\r\n draw = ImageDraw.Draw(img)\r\n\r\n for cls, bboxs in boxes.items():\r\n color = tuple(np.random.randint(0, 256, 3))\r\n for box, score in bboxs:\r\n box = convert_to_original_size(box, np.array(detection_size), np.array(img.size))\r\n draw.rectangle(box, outline=color)\r\n draw.text(box[:2], '{} {:.2f}%'.format(cls_names[cls], score * 100), fill=color)\r\n print('{} {:.2f}%'.format(cls_names[cls], score * 100),box[:2])\r\n\r\ndef convert_to_original_size(box, size, original_size):\r\n ratio = original_size / size\r\n box = box.reshape(2, 2) * ratio\r\n return list(box.reshape(-1))\r\n\r\n\r\n# 加载数据集标签名称\r\ndef load_coco_names(file_name):\r\n names = {}\r\n with open(file_name) as f:\r\n for id, name in enumerate(f):\r\n names[id] = name\r\n return names\r\n\r\n\r\ndef main(size, conf_threshold, iou_threshold, input_img, output_img, class_names,\r\n weights_file):\r\n tf.reset_default_graph()\r\n img = Image.open(input_img)\r\n img_resized = img.resize(size=(size, size))\r\n classes = load_coco_names(class_names)\r\n\r\n inputs = tf.placeholder(tf.float32, [None, size, size, 3])\r\n\r\n with tf.variable_scope('detector'):\r\n detections = yolo_v3(inputs, len(classes), data_format='NHWC') # 定义网络结构\r\n load_ops = load_weights(tf.global_variables(scope='detector'), weights_file) # 加载权重\r\n\r\n boxes = detections_boxes(detections)\r\n with tf.Session() as sess:\r\n sess.run(load_ops)\r\n detected_boxes = sess.run(boxes, feed_dict={inputs: [np.array(img_resized, dtype=np.float32)]})\r\n\r\n filtered_boxes = non_max_suppression(detected_boxes,\r\n confidence_threshold=conf_threshold,\r\n iou_threshold=iou_threshold)\r\n\r\n draw_boxes(filtered_boxes, img, classes, (size, size))\r\n\r\n img.save(output_img)\r\n img.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n imgsize = 416\r\n input_img ='data/timg.jpg'\r\n output_img = 'data/out.jpg'\r\n class_names = 'data/coco.names'\r\n weights_file = 'model/yolov3.weights'\r\n conf_threshold = 0.5 #置信度阈值\r\n iou_threshold = 0.4 #重叠区域阈值\r\n\r\n main(imgsize, conf_threshold, iou_threshold,\r\n input_img, output_img, class_names,\r\n weights_file)" ]
[ [ "numpy.array", "tensorflow.assign", "tensorflow.Session", "tensorflow.reset_default_graph", "tensorflow.global_variables", "tensorflow.variable_scope", "numpy.prod", "tensorflow.placeholder", "numpy.transpose", "numpy.fromfile", "numpy.random.randint" ] ]
SinestroEdmonce/tensorflow
[ "00befcdeb87f1fc490d247d127ee438f63fe3666" ]
[ "tensorflow/python/distribute/custom_training_loop_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for custom training loops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python import tf2\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.util import nest\n\n\ndef get_dataset_from_tensor_slices(inp_array):\n dataset = dataset_ops.DatasetV2.from_tensor_slices(inp_array)\n # TODO(b/138326910): Remove Dataset V1 version once bug resolved.\n if not tf2.enabled():\n dataset = dataset_ops.Dataset.from_tensor_slices(inp_array)\n return dataset\n\n\nclass AssertFlattenedMixin(object):\n \"\"\"Mixin for specialized asserts.\"\"\"\n\n def assert_equal_flattened(self, expected_results, actual_results):\n \"\"\"Asserts that flattened results are equal.\n\n Due to the number of replicas in the strategy, the output may have a\n different structure and needs to be flattened for comparison.\n\n Args:\n expected_results: The results expected as a result of a computation.\n actual_results: The actual results of a computation.\n \"\"\"\n self.assertEqual(len(expected_results), len(actual_results))\n\n for i, expected_result in enumerate(expected_results):\n final_result = []\n actual_result = actual_results[i]\n for val in actual_result:\n final_result.extend(val.numpy())\n self.assertAllEqual(expected_result, final_result)\n\n\nclass InputIterationTest(test.TestCase, parameterized.TestCase,\n AssertFlattenedMixin):\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testConstantNumpyInput(self, distribution):\n\n @def_function.function\n def run(x):\n\n def computation(x):\n return math_ops.square(x)\n\n outputs = distribution.experimental_local_results(\n distribution.experimental_run_v2(computation, args=(x,)))\n return outputs\n\n self.assertAllEqual(\n constant_op.constant(4., shape=(distribution.num_replicas_in_sync)),\n run(2.))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testStatefulExperimentalRunAlwaysExecute(self, distribution):\n with distribution.scope():\n v = variables.Variable(\n 0.0, aggregation=variables.VariableAggregation.MEAN)\n\n @def_function.function\n def train_step():\n\n def assign_add():\n v.assign_add(1.0)\n\n distribution.experimental_run_v2(assign_add)\n return array_ops.zeros([])\n\n train_step()\n self.assertAllEqual(1.0, v.numpy())\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.strategies_minus_tpu,\n mode=[\"eager\"]))\n def testFullEager(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n def train_step(data):\n return math_ops.square(data)\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(x,)))\n results.append(output)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testStepInFunction(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n @def_function.function\n def train_step(data):\n return math_ops.square(data)\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(x,)))\n results.append(output)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testRunInFunction(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n def train_step(data):\n return math_ops.square(data)\n\n @def_function.function\n def f_train_step(input_data):\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(input_data,)))\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = f_train_step(x)\n results.append(output)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=[\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.tpu_strategy\n ],\n mode=[\"eager\"]))\n def testNestedOutput(self, distribution):\n dataset = get_dataset_from_tensor_slices([0, 1, 2, 3]).batch(2)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(iterator):\n\n def computation(x):\n return [{\n \"a\": x - 1,\n \"b\": x + 1\n }]\n\n inputs = next(iterator)\n outputs = distribution.experimental_run_v2(computation, args=(inputs,))\n return nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n results = run(input_iterator)\n for replica in range(distribution.num_replicas_in_sync):\n # The input dataset is range(4), so the replica id is same as input.\n self.assertAllEqual(results[0][\"a\"][replica], [replica - 1])\n self.assertAllEqual(results[0][\"b\"][replica], [replica + 1])\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testRunInFunctionAutoGraphApplication(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n def train_step(data):\n return math_ops.square(data)\n\n @def_function.function\n def f_train_step(input_data):\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(input_data,)))\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = f_train_step(x)\n results.append(output)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetIterationInFunction(self, distribution):\n with distribution.scope():\n a = variables.Variable(\n 1.0, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)\n\n def train_step(_):\n a.assign_add(1.0)\n\n @def_function.function\n def f_train_step(dist_dataset):\n number_of_steps = constant_op.constant(0.0)\n product_of_means = constant_op.constant(2.0)\n for x in dist_dataset: # loop with values modified each iteration\n number_of_steps += 1\n product_of_means *= math_ops.cast(\n distribution.reduce(\"MEAN\", x, axis=0), product_of_means.dtype)\n\n for y in dist_dataset: # loop with no intermediate state\n distribution.experimental_run_v2(train_step, args=(y,))\n\n return number_of_steps, product_of_means\n\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n\n number_of_steps, product_of_means = f_train_step(dist_dataset)\n self.assertEqual(2, number_of_steps.numpy())\n self.assertNear((2 * (5+6)/2 * (7+8)/2), product_of_means.numpy(), 1e-3)\n\n # We set the initial value of `a` to 1 and iterate through the dataset 2\n # times(4/2 where 4 is the number of dataset elements and 2 is the batch\n # size). Hence the final result is 3.\n self.assertEqual(3.0, (a.numpy()))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetAssertWithDynamicBatch(self, distribution):\n # Regression test for github issue 33517.\n def step_fn(data):\n assert_op = control_flow_ops.Assert(math_ops.less_equal(\n math_ops.reduce_max(data), 100.), [data])\n with ops.control_dependencies([assert_op]):\n return math_ops.square(data)\n\n @def_function.function\n def train(dataset):\n results = []\n iterator = iter(dataset)\n # we iterate through the loop 5 times since we have 3 elements and a\n # global batch of 2.\n for _ in range(2):\n elem = next(iterator)\n output = distribution.experimental_local_results(\n distribution.experimental_run_v2(step_fn, args=(elem,)))\n results.append(output)\n return results\n\n dataset = dataset_ops.DatasetV2.from_tensor_slices([5., 6., 7.,]).batch(2)\n # TODO(b/138326910): Remove Dataset V1 version once bug resolved.\n if not tf2.enabled():\n dataset = dataset_ops.Dataset.from_tensor_slices([5., 6., 7.,]).batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = train(dist_dataset)\n\n expected_results = [[25., 36.], [49.]]\n self.assertEqual(len(expected_results), len(results))\n\n # Need to expand results since output will be grouped differently depending\n # on the number of replicas.\n for i, expected_result in enumerate(expected_results):\n final_result = []\n actual_result = results[i]\n for val in actual_result:\n final_result.extend(val.numpy())\n self.assertAllEqual(expected_result, final_result)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testDynamicShapes(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(iterator):\n def computation(x):\n return math_ops.reduce_mean(x)\n inputs = next(iterator)\n outputs = distribution.experimental_local_results(\n distribution.experimental_run_v2(computation, args=(inputs,)))\n return outputs\n\n # This assumes that there are exactly 2 replicas\n self.assertAllEqual([5.5, 7.], run(input_iterator))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testDynamicShapesWithGetNextOutsideFunction(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(inputs):\n def computation(x):\n return math_ops.reduce_mean(x)\n outputs = distribution.experimental_local_results(\n distribution.experimental_run_v2(computation, args=(inputs,)))\n return outputs\n\n # This assumes that there are exactly 2 replicas\n self.assertAllEqual([5.5, 7.], run(next(input_iterator)))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testStrategyReduceWithDynamicShapes(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(iterator):\n inputs = next(iterator)\n return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)\n\n self.assertAllEqual(6., run(input_iterator))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testStrategyReduceWithDynamicShapesRank2(self, distribution):\n dataset = get_dataset_from_tensor_slices(\n [[1., 1.], [1., 1.], [1., 1.]]).batch(4)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(iterator):\n inputs = next(iterator)\n return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)\n\n self.assertAllEqual([1., 1.], run(input_iterator))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testDynamicShapesWithSizeOp(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(inputs):\n def computation(x):\n return array_ops.size_v2(x)\n outputs = distribution.experimental_local_results(\n distribution.experimental_run_v2(computation, args=(inputs,)))\n return outputs\n\n # This assumes that there are exactly 2 replicas\n self.assertAllEqual([2, 1], run(next(input_iterator)))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.multidevice_strategies,\n mode=[\"eager\"]\n ))\n def testDynamicShapesWithFirstReplicaNotMaximumShape(self, distribution):\n def dataset_fn(_):\n dataset1 = get_dataset_from_tensor_slices([[1., 2.], [1., 2.]])\n dataset2 = get_dataset_from_tensor_slices([[1., 2., 3.],\n [1., 2., 3.]])\n dataset = dataset1.concatenate(dataset2)\n dataset = dataset.batch(2, drop_remainder=True)\n return dataset\n\n input_iterator = iter(\n distribution.experimental_distribute_datasets_from_function(dataset_fn))\n\n @def_function.function\n def run(inputs):\n def computation(x):\n return math_ops.reduce_mean(x)\n outputs = distribution.experimental_local_results(\n distribution.experimental_run_v2(computation, args=(inputs,)))\n return outputs\n\n # This assumes that there are exactly 2 replicas\n self.assertAllEqual([1.5, 2.], run(next(input_iterator)))\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetDistributeEvenlyDivisibleDrop(self, distribution):\n # If the batch size is evenly divisible by the number of workers and we set\n # drop_remainder=True on the dataset, then DistributedIterator will use a\n # different (and more efficient) code path which avoids some control flow\n # ops.\n dataset = get_dataset_from_tensor_slices([5., 6.]).batch(\n 2, drop_remainder=True)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n data = next(input_iterator)\n\n expected_result = [5., 6.]\n final_result = []\n actual_result = distribution.experimental_local_results(data)\n for val in actual_result:\n final_result.extend(val)\n self.assertAllEqual(expected_result, final_result)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetDistributeNotDivisibleDrop(self, distribution):\n # If each batch is not evenly divisible by the number of workers,\n # the remainder will be dropped.\n dataset = get_dataset_from_tensor_slices([5., 6.]).batch(\n 1, drop_remainder=True)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n data = next(input_iterator)\n\n expected_result = [5.]\n final_result = []\n actual_result = distribution.experimental_local_results(data)\n for val in actual_result:\n final_result.extend(val)\n self.assertAllEqual(expected_result, final_result)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetDistributeEvenlyDivisibleNoDrop(self, distribution):\n # Setting drop_remainder=False on the dataset causes DistributedIterator\n # to use get_next_as_optional(), even if the batched dataset is evenly\n # divisible by the number of workers.\n dataset = get_dataset_from_tensor_slices([5., 6.]).batch(\n 2, drop_remainder=False)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n data = next(input_iterator)\n\n expected_result = [5., 6.]\n final_result = []\n actual_result = distribution.experimental_local_results(data)\n for val in actual_result:\n final_result.extend(val)\n self.assertAllEqual(expected_result, final_result)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testDatasetPartialBatchWithMixedOutputs(self, distribution):\n # Dynamic output size with a mix of static and dynamic outputs\n dataset = get_dataset_from_tensor_slices([5.]).batch(2)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n @def_function.function\n def run(iterator):\n\n def computation(x):\n # Fixed size output with a dynamic sized output.\n return array_ops.zeros([3]), math_ops.square(x)\n\n return distribution.experimental_run_v2(\n computation, args=(next(iterator),))\n\n results = run(input_iterator)\n\n # First result is fixed for all replicas.\n for replica_id in range(distribution.num_replicas_in_sync):\n self.assertAllEqual([0., 0., 0.],\n distribution.experimental_local_results(\n results[0])[replica_id])\n # Only first replica has distributed dataset computation.\n self.assertAllEqual([25.],\n distribution.experimental_local_results(results[1])[0])\n # Other replicas have no distributed dataset computation.\n for replica_id in range(1, distribution.num_replicas_in_sync):\n self.assertAllEqual([],\n distribution.experimental_local_results(\n results[1])[replica_id])\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testIterationInsideFunction(self, distribution):\n\n def step_fn(data):\n return math_ops.square(data)\n\n @def_function.function\n def train(dataset):\n results = []\n iterator = iter(dataset)\n # we iterate through the loop 2 times since we have 4 elements and a\n # global batch of 2.\n for _ in range(2):\n elem = next(iterator)\n output = distribution.experimental_local_results(\n distribution.experimental_run_v2(step_fn, args=(elem,)))\n results.append(output)\n return results\n\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = train(dist_dataset)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testIterationOutsideFunction(self, distribution):\n\n def train_step(data):\n return math_ops.square(data)\n\n @def_function.function\n def f_train_step(input_data):\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(input_data,)))\n\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n iterator = iter(dist_dataset)\n results = []\n # we iterate through the loop 2 times since we have 4 elements and a\n # global batch of 2.\n for _ in range(2):\n output = f_train_step(next(iterator))\n results.append(output)\n self.assert_equal_flattened([[25., 36.], [49., 64.]], results)\n\n\nclass GradientTapeTest(test.TestCase, parameterized.TestCase,\n AssertFlattenedMixin):\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testStepInFunctionGradient(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n @def_function.function\n def train_step(x):\n def computation(x):\n return math_ops.square(x)\n with backprop.GradientTape() as tape:\n tape.watch(x) # Manually watch non-variable tensors.\n y = computation(x)\n grads = tape.gradient(y, x)\n return grads\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(x,)))\n results.append(output)\n self.assert_equal_flattened([[10., 12.], [14., 16.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def testRunInFunctionGradient(self, distribution):\n dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)\n\n @def_function.function\n def run(x):\n def train_step(x):\n def computation(x):\n return math_ops.square(x)\n with backprop.GradientTape() as tape:\n tape.watch(x) # Manually watch non-variable tensors.\n y = computation(x)\n grads = tape.gradient(y, x)\n return grads\n return distribution.experimental_local_results(\n distribution.experimental_run_v2(train_step, args=(x,)))\n\n dist_dataset = distribution.experimental_distribute_dataset(dataset)\n results = []\n for x in dist_dataset:\n output = run(x)\n results.append(output)\n self.assert_equal_flattened([[10., 12.], [14., 16.]], results)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"],\n model_in_tf_function=[True, False]\n ))\n def testNestedFunction(self, distribution, model_in_tf_function):\n def model(x):\n return x * x\n\n if model_in_tf_function:\n model = def_function.function(model)\n\n with distribution.scope():\n x = variables.Variable(1.0)\n\n @def_function.function\n def train_step():\n def replica_step():\n with backprop.GradientTape() as tape:\n y = model(x)\n return tape.gradient(y, x)\n return distribution.experimental_run_v2(replica_step)\n\n grads = distribution.experimental_local_results(train_step())\n self.assertLen(grads, distribution.num_replicas_in_sync)\n self.assertTrue(all(g is not None for g in grads))\n\n\nclass KerasModelsTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_single_keras_layer_experimental_run(self, distribution):\n dataset = self._get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = keras.layers.Dense(4, name=\"dense\")\n\n @def_function.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n outputs = model(images)\n loss = math_ops.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.experimental_run_v2(\n step_fn, args=(next(iterator),))\n return nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_keras_model_creation_experimental_run(self, distribution):\n dataset = self._get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = self._get_model()\n\n @def_function.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n outputs = model(images)\n loss = math_ops.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, model.variables)\n return grads\n\n outputs = distribution.experimental_run_v2(\n step_fn, args=(next(iterator),))\n return nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_keras_model_optimizer_experimental_run(self, distribution):\n dataset = self._get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = self._get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @def_function.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n outputs = model(images)\n loss = math_ops.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.experimental_run_v2(\n step_fn, args=(next(iterator),))\n return nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_keras_subclass_model_optimizer_experimental_run(self, distribution):\n def get_subclass_model():\n\n class KerasSubclassModel(keras.Model):\n\n def __init__(self):\n super(KerasSubclassModel, self).__init__()\n self.l = keras.layers.Dense(4, name=\"dense\")\n\n def call(self, x):\n return self.l(x)\n\n return KerasSubclassModel()\n dataset = self._get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = get_subclass_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @def_function.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n outputs = model(images)\n loss = math_ops.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.experimental_run_v2(\n step_fn, args=(next(iterator),))\n return nest.map_structure(distribution.experimental_local_results,\n outputs)\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_keras_model_optimizer_experimental_run_loop(self, distribution):\n dataset = self._get_dataset()\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = self._get_model()\n optimizer = keras.optimizer_v2.rmsprop.RMSprop()\n\n @def_function.function\n def train_step(iterator):\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n outputs = model(images)\n loss = math_ops.reduce_sum(outputs - targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n for _ in range(5):\n distribution.experimental_run_v2(step_fn, args=(next(iterator),))\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_lstm(self, distribution):\n\n batch_size = 32\n\n def create_lstm_model():\n model = keras.models.Sequential()\n # We only have LSTM variables so we can detect no gradient issues more\n # easily.\n model.add(\n keras.layers.LSTM(1, return_sequences=False, input_shape=(10, 1)))\n return model\n\n def create_lstm_data():\n seq_length = 10\n\n x_train = np.random.rand(batch_size, seq_length, 1).astype(\"float32\")\n y_train = np.random.rand(batch_size, 1).astype(\"float32\")\n return x_train, y_train\n\n x, y = create_lstm_data()\n dataset = dataset_ops.Dataset.from_tensor_slices((x, y))\n dataset = dataset.batch(batch_size, drop_remainder=True)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n with distribution.scope():\n model = create_lstm_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD()\n\n @def_function.function\n def train_step(input_iterator):\n\n def step_fn(inputs):\n inps, targ = inputs\n with backprop.GradientTape() as tape:\n output = model(inps)\n loss = math_ops.reduce_mean(\n keras.losses.binary_crossentropy(\n y_true=targ, y_pred=output, from_logits=False))\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n return loss\n\n outputs = distribution.experimental_run_v2(\n step_fn, args=(next(input_iterator),))\n return distribution.experimental_local_results(outputs)\n\n train_step(input_iterator)\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies, mode=[\"eager\"]))\n def test_nested_tf_functions(self, distribution):\n # The test builds two computations with keras layers, one with nested\n # tf.function, and the other without nested tf.function. We run these\n # computations independently on the model with same weights, and make sure\n # the variables are still the same after one training step.\n\n inputs = np.random.random((10, 3)).astype(np.float32)\n targets = np.ones((10, 4), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).repeat()\n dataset = dataset.batch(10, drop_remainder=True)\n input_iterator = iter(distribution.experimental_distribute_dataset(dataset))\n\n def get_model():\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n with distribution.scope():\n model = get_model()\n optimizer = keras.optimizer_v2.gradient_descent.SGD(0.1, momentum=0.01)\n weights_file = os.path.join(self.get_temp_dir(), \".h5\")\n model.save_weights(weights_file)\n model2 = get_model()\n model2.load_weights(weights_file)\n\n # Make sure model and model2 variables are in sync when initialized.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def compute_loss(images, targets):\n outputs = model(images)\n return math_ops.reduce_sum(outputs - targets)\n\n @def_function.function\n def train_step_without_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n loss = compute_loss(images, targets)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n\n distribution.experimental_run_v2(step_fn, args=(inputs,))\n\n @def_function.function\n def compute_loss2(images, targets):\n outputs = model2(images)\n return math_ops.reduce_sum(outputs - targets)\n\n @def_function.function\n def train_step_with_nested_tf_function(inputs):\n\n def step_fn(inputs):\n images, targets = inputs\n with backprop.GradientTape() as tape:\n loss = compute_loss2(images, targets)\n grads = tape.gradient(loss, model2.variables)\n optimizer.apply_gradients(zip(grads, model2.variables))\n\n distribution.experimental_run_v2(step_fn, args=(inputs,))\n\n inputs = next(input_iterator)\n\n train_step_without_nested_tf_function(inputs)\n train_step_with_nested_tf_function(inputs)\n\n # Make sure model and model2 variables are still in sync.\n for model_v, model2_v in zip(model.variables, model2.variables):\n self.assertAllClose(model_v.numpy(), model2_v.numpy())\n\n def _get_dataset(self):\n inputs = np.zeros((10, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10, drop_remainder=True)\n return dataset\n\n def _get_model(self):\n x = keras.layers.Input(shape=(3,), name=\"input\")\n y = keras.layers.Dense(4, name=\"dense\")(x)\n model = keras.Model(x, y)\n return model\n\n\nclass KerasMetricsTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(\n distribution=strategy_combinations.all_strategies,\n mode=[\"eager\"]\n ))\n def test_multiple_keras_metrics_experimental_run(self, distribution):\n with distribution.scope():\n loss_metric = keras.metrics.Mean(\"loss\", dtype=np.float32)\n loss_metric_2 = keras.metrics.Mean(\"loss_2\", dtype=np.float32)\n\n @def_function.function\n def train_step():\n def step_fn():\n loss = constant_op.constant(5.0, dtype=np.float32)\n loss_metric.update_state(loss)\n loss_metric_2.update_state(loss)\n\n distribution.experimental_run_v2(step_fn)\n\n train_step()\n self.assertEqual(loss_metric.result().numpy(),\n loss_metric_2.result().numpy())\n self.assertEqual(loss_metric.result().numpy(), 5.0)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "numpy.random.rand", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.tf2.enabled", "tensorflow.python.keras.layers.Dense", "numpy.random.random", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.optimizer_v2.gradient_descent.SGD", "tensorflow.python.eager.test.main", "tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop", "tensorflow.python.keras.layers.LSTM", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.array_ops.size_v2", "tensorflow.python.distribute.combinations.combine", "numpy.zeros", "tensorflow.python.eager.def_function.function", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.square", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices", "tensorflow.python.keras.Model", "tensorflow.python.keras.losses.binary_crossentropy", "tensorflow.python.keras.layers.Input", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.math_ops.reduce_max", "numpy.ones", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.keras.metrics.Mean" ] ]
gate42qc/EM-and-DD
[ "2a37e5f9dbe59a7636c6876df88995f853fec941" ]
[ "helpers.py" ]
[ "from pyquil.noise import add_decoherence_noise\nfrom pyquil.gates import *\nfrom pyquil.quil import Program\nimport random\nrandom.seed()\nimport numpy as np\n\npi = np.pi\n\n\ndef get_one_q_circuit(q_index, depth):\n \"\"\"\n :param q_index: index of the qubit which the circuit acts on\n :depth: depth of the circuit\n :return: a program corresponding to a random U \n \"\"\"\n gate_set = [RX, RZ, T]\n instructions = []\n for i in range(depth):\n g = random.choice(gate_set)\n if g is T:\n instructions.append(RZ(pi/4,q_index))\n else:\n instructions.append(g(pi/2,q_index))\n \n return Program(instructions)\n\ndef get_two_q_circuit(q_index,n_cycles):\n \"\"\"\n :param q_index: indexes of the qubits which the circuit acts on\n :n_cycles: depth of the circuit\n :return: a program corresponding to a random U \n \"\"\"\n get_set = [RX, RZ, T]\n instructions = []\n #1. applying Hadamard's in native language\n instructions.extend([RZ(pi/2, q_index[0]),RX(pi/2, q_index[0]),RZ(pi/2, q_index[0])])\n instructions.extend([RZ(pi/2, q_index[1]),RX(pi/2, q_index[1]),RZ(pi/2, q_index[1])])\n #2. applying CZ followed by 1 qubit gates \n for i in range(n_cycles):\n instructions.append(CZ(q_index[0],q_index[1]))\n for idx in (q_index):\n g = random.choice(get_set)\n if g is T:\n instructions.append(RZ(pi/4,idx))\n else:\n instructions.append(g(pi/2,idx))\n \n return Program(instructions)\n\n\ndef add_pragma_block(program):\n inst = program.instructions\n new_inst = ['PRAGMA PRESERVE_BLOCK'] + inst + ['PRAGMA END_PRESERVE_BLOCK']\n return Program(new_inst)\n \ndef get_zx_DD_sequence(q_index, n):\n \"\"\"\n :param q_index: index(es) of qubit(s) for applying DD sequence\n :param n: number of sequence; each sequence is consisted of ZXZX pulses\n :return: program with DD sequence \n \"\"\"\n \n indexes = q_index\n if type(q_index) == int:\n q_index = [q_index]\n dd = [] \n for i, index in enumerate(q_index):\n dd.extend([RZ(pi, index),RX(pi,index), RZ(pi,index),RX(pi,index)] * n) #it can be modified to include buffer time (I gates)\n \n \n return Program(dd)\n\n\ndef get_xy_DD_sequence(q_index, n):\n \"\"\"\n :param q_index: index(es) of qubit(s) for applying DD sequence\n :param n: number of sequence; each sequence is consisted of XYXY (XY== RX(pi)RZ(pi)RX(pi)) pulses\n :return: program with DD sequence \n \"\"\"\n \n indexes = q_index\n if type(q_index) == int:\n q_index = [q_index]\n dd = [] \n for i, index in enumerate(q_index):\n dd.extend([RX(pi,index),RZ(pi, index),RX(pi,index),RX(pi,index), RZ(pi,index),RX(pi,index)] * n) \n \n \n return Program(dd)\n \ndef get_idle_sequence(q_index, n, nI = 4):\n \"\"\"\n :param q_index: index(es) of qubit(s) for applying DD sequence\n :param n: number of wait circuits; each circuit consists of nI identity gates \n :param nI: number of identity gates in wait circuit\n :return: program with wait sequence \n \"\"\"\n \n indexes = q_index\n if type(q_index) == int:\n q_index = [q_index]\n dd = [] \n for i, index in enumerate(q_index):\n dd.extend([I(index)] * (n * nI)) \n \n \n return Program(dd)\n\n\n# sampling programs with different gate times\ndef run_with_gate_time_sampling(cxn: QVMConnection,\n programs: Iterable[Tuple[float, Program]],\n program_modifier=None,\n trials=20000):\n records = []\n base = 50e-9\n gate_times = np.array([1, 2, 3, 4]) * base\n\n for param, program in programs:\n program = program.copy()\n ro = program.declare('ro', 'BIT', 2)\n for gate_time in gate_times:\n noisy = add_decoherence_noise(program, gate_time_1q=gate_time, gate_time_2q=3 * gate_time).inst([\n MEASURE(0, ro[0]),\n MEASURE(1, ro[1]),\n ])\n\n if program_modifier:\n noisy = program_modifier(noisy)\n\n bitstring = np.array(cxn.run(noisy, [0, 1], trials))\n z0, z1 = np.mean(bitstring, axis=0)\n zz = 1 - (np.sum(bitstring, axis=1) % 2).mean() * 2\n\n f0, f1 = (trials - np.sum(bitstring, axis=0)) / trials\n ff = np.sum(np.sum(bitstring, axis=1) == 0) / trials\n\n record = {\n 'z0': z0,\n 'z1': z1,\n 'zz': zz,\n 'f0': f0,\n 'f1': f1,\n 'ff': ff,\n 'param': param,\n 'noise_param': gate_time,\n }\n records += [record]\n\n return records\n\n\n# Computing mittigated values\ndef get_analyzed_and_mitigated(records):\n df_all = pd.DataFrame(records)\n\n noise_params = df_all['noise_param'].unique()\n\n qubits = 2\n mitigated = []\n\n for order in range(2, len(noise_params) + 1):\n matrix = noise_params[:order, np.newaxis] ** np.arange(order)\n\n mo = [[] for _ in range(qubits+1)]\n\n for param in df_all['param'].unique():\n df = df_all.query('{} == @{}'.format('param', 'param'))\n\n q1 = np.linalg.solve(matrix, df['z0'][:order])\n q2 = np.linalg.solve(matrix, df['z1'][:order])\n\n ff = np.linalg.solve(matrix, df['ff'][:order])\n\n mo[0] += [q1[0]] * len(df)\n mo[1] += [q2[0]] * len(df)\n mo[2] += [ff[0]] * len(df)\n\n mitigated += [mo]\n\n for order, o_values in enumerate(mitigated):\n for qubit, q_values in enumerate(o_values[:-1]):\n df_all.loc[:, 'm{}-{}'.format(qubit+1, order+1)] = np.array(q_values)\n df_all.loc[:, 'mf{}-{}'.format(qubit+1, order+1)] = 1 - np.array(q_values)\n df_all.loc[:, 'mfzz-{}'.format(order+1)] = np.array(o_values[-1])\n\n return df_all\n\n\n# appling DD to a program\ndef add_dd(program: Program):\n new_program = program.copy_everything_except_instructions()\n\n counts = [0, 0]\n for gate in program:\n try:\n if len(gate.qubits) > 1:\n if abs(counts[0] - counts[1]) >= 2:\n min_ind = int(counts[0] > counts[1])\n times = max(int(abs(counts[0] - counts[1])/4), 1)\n\n p = add_decoherence_noise(Program(get_dd_sec(min_ind)*times))\n\n new_program.inst(p)\n counts = [0, 0]\n else:\n counts[gate.qubits[0].index] += 1\n except AttributeError:\n pass\n\n new_program.inst(gate)\n return new_program\n\n\n# Generate Random cirquit\ndef two_qubit_circuit(length: int, qubit_one: int, qubit_two: int):\n \"\"\"\n genereates two qubit identity equal circuit with given length\n\n :param length: length of the circuit\n :param qubit_one: one of the qubits\n :param qubit_two: second qubit\n :return: pyquil Program\n \"\"\"\n\n p = Program()\n\n for j in range(int(length/2)):\n theta = 2 * np.pi * random.random()\n gate_list = [RZ(theta, qubit_one), RX(np.pi / 2, qubit_one), RX(- np.pi / 2, qubit_one),\n CZ(qubit_one, qubit_two),\n RZ(theta, qubit_two), RX(np.pi / 2, qubit_two), RX(- np.pi / 2, qubit_two), CZ(qubit_two, qubit_one)]\n new_gate = random.choice(gate_list)\n p.inst(new_gate)\n\n p += p.dagger()\n\n return Program('PRAGMA PRESERVE_BLOCK') + p + Program('PRAGMA END_PRESERVE_BLOCK')\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.mean", "numpy.arange", "numpy.linalg.solve" ] ]
kubic71/RayS
[ "bc9836b4f4e15410368417e4754e5381067f145d" ]
[ "attack_natural.py" ]
[ "import argparse\nimport json\nimport numpy as np\nimport torch\nimport torchvision.models as models\n\nfrom dataset import load_mnist_test_data, load_cifar10_test_data, load_imagenet_test_data\nfrom general_torch_model import GeneralTorchModel\n\nfrom arch import mnist_model\nfrom arch import cifar_model\n\nfrom RayS_Single import RayS\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Hard Label Attacks')\n parser.add_argument('--dataset', default='imagenet', type=str,\n help='Dataset')\n parser.add_argument('--targeted', default='0', type=str,\n help='targeted or untargeted')\n parser.add_argument('--norm', default='linf', type=str,\n help='Norm for attack, linf only')\n parser.add_argument('--num', default=10000, type=int,\n help='Number of samples to be attacked from test dataset.')\n parser.add_argument('--query', default=10000, type=int,\n help='Maximum queries for the attack')\n parser.add_argument('--batch', default=1, type=int,\n help='attack batch size.')\n parser.add_argument('--epsilon', default=0.05, type=float,\n help='attack strength')\n parser.add_argument('--early', default='1', type=str,\n help='early stopping (stop attack once the adversarial example is found)')\n args = parser.parse_args()\n\n targeted = True if args.targeted == '1' else False\n early_stopping = False if args.early == '0' else True\n order = 2 if args.norm == 'l2' else np.inf\n\n print(args)\n\n if args.dataset == 'mnist':\n model = mnist_model.MNIST().cuda()\n model = torch.nn.DataParallel(model, device_ids=[0])\n model.load_state_dict(torch.load('model/mnist_gpu.pt'))\n test_loader = load_mnist_test_data(args.batch)\n torch_model = GeneralTorchModel(model, n_class=10, im_mean=None, im_std=None)\n elif args.dataset == 'cifar':\n model = cifar_model.CIFAR10().cuda()\n model = torch.nn.DataParallel(model, device_ids=[0])\n model.load_state_dict(torch.load('model/cifar10_gpu.pt'))\n test_loader = load_cifar10_test_data(args.batch)\n torch_model = GeneralTorchModel(model, n_class=10, im_mean=None, im_std=None)\n elif args.dataset == 'resnet':\n model = models.__dict__[\"resnet50\"](pretrained=True).cuda()\n model = torch.nn.DataParallel(model, device_ids=[0])\n test_loader = load_imagenet_test_data(args.batch)\n torch_model = GeneralTorchModel(model, n_class=1000, im_mean=[0.485, 0.456, 0.406],\n im_std=[0.229, 0.224, 0.225])\n elif args.dataset == 'inception':\n model = models.__dict__[\"inception_v3\"](pretrained=True).cuda()\n model = torch.nn.DataParallel(model, device_ids=[0])\n test_loader = load_imagenet_test_data(args.batch)\n torch_model = GeneralTorchModel(model, n_class=1000, im_mean=[0.485, 0.456, 0.406],\n im_std=[0.229, 0.224, 0.225])\n else:\n print(\"Invalid dataset\")\n exit(1)\n\n \n attack = RayS(torch_model, order=order, epsilon=args.epsilon, early_stopping=early_stopping)\n \n stop_dists = []\n stop_queries = []\n asr = []\n np.random.seed(0)\n seeds = np.random.randint(10000, size=10000)\n count = 0\n for i, (xi, yi) in enumerate(test_loader):\n xi, yi = xi.cuda(), yi.cuda()\n\n if count == args.num:\n break\n\n if torch_model.predict_label(xi) != yi:\n continue\n\n np.random.seed(seeds[i])\n\n target = np.random.randint(torch_model.n_class) * torch.ones(yi.shape,\n dtype=torch.long).cuda() if targeted else None\n while target and torch.sum(target == yi) > 0:\n print('re-generate target label')\n target = np.random.randint(torch_model.n_class) * torch.ones(len(xi), dtype=torch.long).cuda()\n\n adv, queries, dist, succ = attack(xi, yi, target=target, seed=seeds[i],\n query_limit=args.query)\n # print(queries, dist, succ)\n if succ:\n stop_queries.append(queries)\n if dist.item() < np.inf:\n stop_dists.append(dist.item())\n elif early_stopping == False:\n if dist.item() < np.inf:\n stop_dists.append(dist.item())\n\n asr.append(succ.item())\n\n count += 1\n\n print(\"index: {:4d} avg dist: {:.4f} avg queries: {:.4f} asr: {:.4f} \\n\"\n .format(i,\n np.mean(np.array(stop_dists)),\n np.mean(np.array(stop_queries)),\n np.mean(np.array(asr))\n ))\n\n\n name = args.dataset + '_' + args.alg + '_' + args.norm + '_query' + str(args.query) + '_eps' + str(\n args.epsilon) + '_early' + args.early\n summary_txt = 'distortion: ' + str(np.mean(np.array(stop_dists))) + ' queries: ' + str(\n np.mean(np.array(stop_queries))) + ' succ rate: ' + str(np.mean(np.array(asr)))\n with open(name + '_summary' + '.txt', 'w') as f:\n json.dump(summary_txt, f)\n \n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.random.seed", "torch.ones", "numpy.random.randint", "torch.load", "torch.nn.DataParallel", "torch.sum" ] ]
jacobkimmel/non-parametric-transformers
[ "5c94a76e2006a4ae949dc337c0db808a234970c2" ]
[ "baselines/utils/baseline_hyper_tuner.py" ]
[ "import functools\nfrom copy import deepcopy\nfrom pprint import pprint\nfrom tempfile import TemporaryDirectory\n\nimport numpy as np\nimport wandb\nfrom numba import njit\nfrom scipy.stats import rankdata\nfrom sklearn.metrics import (\n make_scorer, r2_score, mean_squared_error, accuracy_score, log_loss)\nfrom sklearn.neighbors import KNeighborsTransformer\nfrom sklearn.pipeline import Pipeline\n\nfrom baselines.sklearn_models import LARGE_DATASETS, MEDIUM_DATASETS\nfrom baselines.utils.hyper_tuning_utils import (\n add_baseline_random_state, get_label_log_loss_metric)\n\n\"\"\"\nAUROC computation by William Wu. 15x faster than sklearn.metrics.roc_auc_score\nfor a reasonably sized array \n(<< 1 million; at 1M it is 1.5x slower than sklearn).\nhttps://www.kaggle.com/c/riiid-test-answer-prediction/discussion/208031\n\"\"\"\n\n\n@njit\ndef _auc(actual, pred_ranks):\n actual = np.asarray(actual)\n pred_ranks = np.asarray(pred_ranks)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(pred_ranks[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)\n\n\ndef auc(actual, predicted):\n # only class 1 preds for classification\n if predicted.shape[-1] == 2:\n predicted = predicted[:, 1]\n\n # return roc_auc_score(actual, predicted)\n pred_ranks = rankdata(predicted)\n return _auc(actual, pred_ranks)\n\n\ndef wrapped_partial(func, *args, **kwargs):\n \"\"\"Partial that propagates __name__ and __doc__.\"\"\"\n # louistiao.me/posts/adding-__name__-and~\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\ndef unstd_scorer(scorer, sigma):\n def new_scorer(*args, **kwargs):\n return sigma * scorer(*args, **kwargs)\n return new_scorer\n\n\n# Define scoring metrics for regression/classification\ndef get_scoring_metrics(sigma, labels=None):\n \"\"\"Need to redefine to work with AUROC removal.\"\"\"\n if labels is None:\n log_score = make_scorer(log_loss, needs_proba=True)\n auc_score = make_scorer(auc, needs_proba=True)\n else:\n # explicitly provide labels for dataset where y_true\n # may not always have all labels\n log_score = make_scorer(\n wrapped_partial(log_loss, labels=labels),\n needs_proba=True)\n auc_score = make_scorer(\n wrapped_partial(auc, labels=labels),\n needs_proba=True)\n\n scoring = {\n 'reg': {\n 'r2': make_scorer(r2_score),\n 'rmse': make_scorer(\n wrapped_partial(mean_squared_error, squared=False)),\n 'mse': make_scorer(\n wrapped_partial(mean_squared_error, squared=True)),\n 'rmse_unstd': make_scorer(\n unstd_scorer(\n wrapped_partial(mean_squared_error, squared=False),\n sigma)),\n 'mse_unstd': make_scorer(\n unstd_scorer(\n wrapped_partial(mean_squared_error, squared=True),\n sigma**2)),\n },\n 'class': {\n 'logloss': log_score,\n 'accuracy': make_scorer(accuracy_score),\n 'auroc': auc_score,\n }\n }\n return scoring\n\n\nMETRIC_NEEDS_PROBA = ['logloss', 'auroc']\n\n# Define \"refit\": the metric used to select the top performer\nREFIT = {\n 'reg': 'mse',\n # 'class': 'accuracy',\n 'class': 'logloss'\n}\n\n# Objective -- must match with the above refit\nOBJECTIVE = {\n 'reg': 'minimize',\n 'class': 'minimize'\n # 'class': 'maximize'\n}\n\n\nclass BaselineHyperTuner:\n def __init__(\n self, dataset, wandb_args, args, c, wandb_run,\n models_dict, hypers_dict, search_alg, verbose=1, n_jobs=-1):\n self.dataset = dataset\n self.wandb_args = wandb_args\n self.args = args\n self.c = c\n self.wandb_run = wandb_run\n self.models_dict = models_dict\n self.hypers_dict = hypers_dict\n self.search_alg = search_alg\n self.verbose = verbose\n self.n_jobs = n_jobs\n\n self.n_cv_splits = min(dataset.n_cv_splits, c.exp_n_runs)\n self.metadata = self.dataset.metadata # Will already be loaded\n\n # Parse metadata\n self.D = self.metadata['D']\n self.cat_target_cols = self.metadata['cat_target_cols']\n self.num_target_cols = self.metadata['num_target_cols']\n self.target_cols = self.cat_target_cols + self.num_target_cols\n\n # Store data for each model evaluated\n self.model_name = None\n self.model_classes = None\n\n # Store data for each CV split\n self.cv_index = None\n self.train_indices = None\n self.val_indices = None\n self.test_indices = None\n self.data_arrs = None\n self.non_target_cols = None\n self.X_train = None\n self.X_train_val = None\n self.X_val = None\n self.X_test = None\n\n # Store data for each target column (i.e. multitarget class/reg)\n self.y_train = None\n self.y_train_val = None\n self.y_test = None\n self.reg_class_mode = None\n self.model_class = None\n self.model_hypers = None\n self.cv_split = None # Iter: must be defined prior to sklearn tuning\n self.refit = None\n self.target_col = None\n self.scoring = None\n self.fitted_model = None\n self.hyper_run_dict = None\n self.aggregate_hyper_run_dicts = []\n\n # Needs to be set for poker-hand, which explicitly needs to\n # specify labels for the log loss at evaluation time, due to\n # an abnormally small val set (and rare classes).\n self.labels = None\n\n def run_hypertuning(self):\n\n if self.c.exp_batch_size != -1:\n raise Exception(\n f'Batch size {self.c.exp_batch_size} provided for baseline '\n f'hypertuning; invalid. Provide a full batch by not '\n f'specifying --exp_batch_size or providing '\n f'--exp_batch_size=-1.')\n\n for model_index, (model_name, model_classes) in enumerate(\n self.models_dict.items()):\n print('\\n------------------')\n print(f'Running prediction for model {model_name}.')\n\n # Set class model parameters\n self.model_name = model_name\n self.model_classes = model_classes\n\n # KNN on large graphs -- precompute\n if (self.model_name == 'KNN' and\n self.c.data_set in MEDIUM_DATASETS + LARGE_DATASETS):\n self.knn_precompute_graph = True\n assert not self.c.sklearn_val_final_fit\n print('Precomputing KNN Graph.')\n else:\n self.knn_precompute_graph = False\n\n # New wandb logger for each run\n if model_index > 0:\n self.wandb_run = wandb.init(**self.wandb_args)\n wandb.config.update(self.args)\n self.dataset.reset_cv_splits()\n self.aggregate_hyper_run_dicts = []\n\n self.add_model_params_to_config()\n\n for cv_index in range(self.n_cv_splits):\n print('CV Index: ', cv_index)\n\n # Load from ColumnEncodingDataset\n self.dataset.load_next_cv_split()\n\n self.cv_index = cv_index\n\n print(\n f'Train-test Split {cv_index + 1}/'\n f'{self.dataset.n_cv_splits}')\n\n if self.c.exp_n_runs < self.dataset.n_cv_splits:\n print(\n f'c.exp_n_runs = {self.c.exp_n_runs}. '\n f'Stopping at {self.c.exp_n_runs} splits.')\n\n self.run_cv_split_hypertuning()\n\n self.aggregate_results_from_splits()\n self.wandb_run.finish() # TODO: is this fixed now?\n\n def add_model_params_to_config(self):\n new_dict = {}\n new_dict['target_cols'] = self.target_cols\n new_dict['model_name'] = self.model_name\n new_dict['search_alg'] = self.c.sklearn_hyper_search\n wandb.config.update(new_dict)\n\n def run_cv_split_hypertuning(self):\n \"\"\"\n At this point, the CV split dataset has been loaded and we can\n parse it, running hypertuning.\n \"\"\"\n cv_dataset = self.dataset.cv_dataset\n self.train_indices, self.val_indices, self.test_indices = (\n tuple(cv_dataset['new_train_val_test_indices']))\n\n self.data_arrs = cv_dataset['data_arrs']\n self.non_target_cols = sorted(\n list(set(range(self.D)) - set(self.target_cols)))\n\n X = []\n\n for i, col in enumerate(self.data_arrs):\n if i in self.non_target_cols:\n col = col[:, :-1]\n\n if self.model_name == 'XGBoost':\n col = col.astype(np.float32)\n\n X.append(col)\n\n X = np.hstack(X)\n self.X_train = X[self.train_indices]\n self.X_train_val = X[self.train_indices + self.val_indices]\n self.X_val = X[self.val_indices]\n self.X_test = X[self.test_indices]\n\n self.run_class_reg_hyper_tuning()\n\n def run_class_reg_hyper_tuning(self):\n \"\"\"Wrapper: runs prediction over each numerical / categorical col.\"\"\"\n if self.num_target_cols:\n if len(self.num_target_cols) > 1:\n raise NotImplementedError\n # Build y with multiple targets\n\n # Wrap our predictive model\n\n # self.run_col_hyper_tuning(\n # target_col=self.num_target_cols, reg_class_mode='reg',\n # y=y_multitarget)\n else:\n num_col_index = self.num_target_cols[0]\n self.run_col_hyper_tuning(\n target_col=num_col_index, reg_class_mode='reg')\n\n if self.cat_target_cols:\n for class_col_index in self.cat_target_cols:\n self.run_col_hyper_tuning(\n target_col=class_col_index, reg_class_mode='class')\n\n def run_col_hyper_tuning(self, target_col, reg_class_mode,\n y: np.array = None):\n \"\"\"Column-specific prediction preprocessing.\"\"\"\n print(f'Running {reg_class_mode} on col {target_col}.')\n self.reg_class_mode = reg_class_mode\n wandb.config.update({'reg_class_mode': self.reg_class_mode})\n self.model_class = self.model_classes[reg_class_mode]\n self.model_hypers = self.hypers_dict[self.model_name][reg_class_mode]\n\n # For MLP, TabNet, we have different sweep sets based on size of\n # the data.\n if self.model_name in ['MLP', 'TabNet', 'KNN']:\n self.model_hypers = self.model_hypers(\n self.model_name, self.c.data_set)\n\n if not isinstance(self.model_hypers, list):\n self.model_hypers = [self.model_hypers]\n\n if self.model_name in ['TabNet', 'DKL']:\n if self.c.exp_device == 'cuda:0':\n device = ['cuda']\n else:\n device = ['cpu']\n\n for config in self.model_hypers:\n config['device_name'] = device\n\n if self.model_name == 'TabNet':\n for config in self.model_hypers:\n cat_dims = self.dataset.cv_dataset['cat_dims']\n cat_features = self.dataset.metadata['cat_features']\n\n filtered_cat_dims = []\n filtered_cat_features = []\n\n # Assure target column is not included\n for i, cat_feature in enumerate(cat_features):\n if cat_feature in self.cat_target_cols:\n continue\n filtered_cat_dims.append(cat_dims[i])\n filtered_cat_features.append(cat_feature)\n\n config['cat_dims'] = [filtered_cat_dims]\n config['cat_idxs'] = [filtered_cat_features]\n\n if y is None:\n # Get column encoding\n y = self.data_arrs[target_col]\n\n if reg_class_mode == 'reg':\n # Exclude mask token for regression\n y = y[:, 0]\n else:\n y = np.argmax(y, axis=1)\n\n self.y_train = y[self.train_indices]\n self.y_train_val = y[self.train_indices + self.val_indices]\n self.y_val = y[self.val_indices]\n self.y_test = y[self.test_indices]\n\n compute_auroc = True\n\n # Done to avoid a rare\n # case in which y_true and y_pred contain a different\n # number of classes, which confuses sklearn\n # See https://github.com/scikit-learn/scikit-learn/issues/11777\n if self.reg_class_mode == 'class':\n class_labels = np.unique(y)\n num_class_labels = len(class_labels)\n if num_class_labels > 2:\n compute_auroc = False\n print('Disabling AUROC because multiclass.')\n labels = self.labels = np.sort(class_labels)\n if self.model_name == 'TabNet' and self.c.data_set == 'poker-hand':\n # Give TabNet an explicit labels argument\n for config in self.model_hypers:\n config['labels'] = [labels]\n else:\n labels = None\n else:\n labels = None\n\n # TODO: consider using the sklearn multitarget wrappers\n # (but may be extra)\n self.target_col = target_col\n sigma = self.dataset.cv_dataset['sigmas'][self.target_col]\n\n self.scoring = get_scoring_metrics(\n sigma=sigma,\n labels=labels)[reg_class_mode]\n\n if compute_auroc is False and self.reg_class_mode == 'class':\n del self.scoring['auroc']\n\n print(self.scoring)\n\n self.refit = REFIT[reg_class_mode]\n\n # Add random seed for specific\n self.model_hypers = add_baseline_random_state(\n self.model_hypers,\n seed=self.c.baseline_seed + self.cv_index)\n\n self.tune_fit_eval_model()\n\n def tune_fit_eval_model(self):\n kwargs = {}\n\n # We are using the best config reported in the TabNet paper, so we\n # can just skip tuning and go directly to the final fit.\n if self.c.data_set in LARGE_DATASETS and self.model_name in [\n 'TabNet', 'GradientBoosting']:\n print(\n f'Running {self.model_name} on dataset {self.c.data_set} -- skipping to '\n f'final train/eval with their reported/the best config.')\n best_params = self.model_hypers[0]\n best_params = {key: value[0] for key, value in best_params.items()}\n pprint(best_params)\n self.hyper_run_dict = {}\n else:\n cv_results = self.run_class_reg_cv_split()\n cv_results = self.clean_cv_results(cv_results)\n # print('Logging full cv_results: ')\n # print(f'\\t {cv_results}')\n print(cv_results.keys())\n best_params = self.log_top_model(cv_results)\n\n if self.knn_precompute_graph:\n # Remove 'knnmodel' prefix\n best_params = {\n key.split('__')[1]: value for key, value in\n best_params.items()}\n\n self.fit_eval_model(best_params, **kwargs)\n self.log_split_performance()\n\n def run_class_reg_cv_split(self):\n \"\"\"Run hyperparameter tuning for a particular model and column.\"\"\"\n pprint(self.model_hypers)\n\n if not isinstance(self.model_hypers, list):\n param_grid = [self.model_hypers]\n else:\n param_grid = self.model_hypers\n\n # n_jobs = -1 uses all possible cores\n cv_split = iter([(self.train_indices, self.val_indices)])\n\n refit = False\n if refit:\n raise ValueError(\n 'This is *not* supported right now. '\n 'If we want to enable this, we need to make sure that the '\n 'make_scorer() functions give the correct `greater_is_better` '\n 'for each score. However, this will flip the sign of the '\n 'score, which in turn means that *our* evaluation in `self.log'\n 'top_model()` wil fail!!')\n\n # Lets precompute the distance graph\n # Due to Tom Dupre la Tour - https://scikit-learn.org/dev/auto_example\n # s/neighbors/plot_caching_nearest_neighbors.html\n if self.knn_precompute_graph:\n assert len(param_grid) == 1\n assert param_grid[0]['weights'] == ['distance'], (\n 'This precomputation should only be done with distance '\n 'weighting.')\n assert param_grid[0]['p'] == [2], (\n 'This precomputation should only be done with L2 norm.')\n print(f'Precomputing KNN graph with dataset {self.c.data_set}.')\n tmpdir_path = f'sklearn_graph_cache_'\n\n graph_model = KNeighborsTransformer(\n n_neighbors=max(param_grid[0]['n_neighbors']))\n knn_model = self.model_class(metric='precomputed')\n param_grid = [{f'knnmodel__{key}': value\n for key, value in param_grid[0].items()}]\n\n with TemporaryDirectory(prefix=tmpdir_path) as tmpdir:\n model = Pipeline(\n steps=[('graph', graph_model),\n ('knnmodel', knn_model)],\n memory=tmpdir)\n clf = self.search_alg(\n estimator=model, param_grid=param_grid, cv=cv_split,\n scoring=self.scoring, refit=refit,\n verbose=self.verbose, n_jobs=self.n_jobs)\n x, y, kwargs = self.get_train_data()\n clf.fit(x, y, **kwargs)\n\n # # At this point, we have fit the CV folds and the graph model\n # # is using the tmpdir to store the graph. Retrieve it to\n # # be used with our final fit as follows.\n # knn_graph = model[0].fit_transform(X=x, y=y)\n\n # In all other cases, do normal tuning\n else:\n model = self.model_class()\n\n clf = self.search_alg(\n estimator=model, param_grid=param_grid, cv=cv_split,\n scoring=self.scoring, refit=refit,\n verbose=self.verbose, n_jobs=self.n_jobs)\n\n x, y, kwargs = self.get_train_data()\n\n clf.fit(x, y, **kwargs)\n\n return clf.cv_results_\n\n def get_train_data(self):\n kwargs = {}\n y = self.y_train_val\n\n if self.model_name in ['TabNet', 'DKL']:\n y_val = self.y_val\n\n if self.reg_class_mode == 'reg':\n y = y[:, np.newaxis]\n y_val = y_val[:, np.newaxis]\n\n kwargs['eval_set'] = [(self.X_val, y_val)]\n kwargs['eval_metric'] = [\n get_label_log_loss_metric(labels=self.labels)]\n\n return self.X_train_val, y, kwargs\n\n def get_final_fit_data(self):\n \"\"\"Data to be used in the final evaluation of the best\n performing hyperparameter configuration, on a particular\n cross-validation split.\n\n I.e., using this, we do not allow our model to retrain on val rows.\n \"\"\"\n kwargs = {}\n y = self.y_train\n\n if self.model_name in ['TabNet', 'DKL']:\n y_val = self.y_val\n\n if self.reg_class_mode == 'reg':\n y = y[:, np.newaxis]\n y_val = y_val[:, np.newaxis]\n\n kwargs['eval_set'] = [(self.X_val, y_val)]\n\n return self.X_train, y, kwargs\n\n def clean_cv_results(self, cv_results):\n # * Rename all occurrences of 'test' to 'val',\n # to avoid downstream confusion\n\n clean_results = {}\n for key, value in cv_results.items():\n if 'test' in key:\n val_key = key.replace('test', 'val')\n clean_results[val_key] = value\n else:\n clean_results[key] = value\n\n return clean_results\n\n def log_top_model(self, cv_results):\n\n # Print and log hyperparameter settings\n print(\n f'Evaluating top performing {self.reg_class_mode} '\n f'model with settings:')\n\n # This is \"mean\" just over that one train_val split\n eval_criteria = f'mean_val_{self.refit}'\n\n if OBJECTIVE[self.reg_class_mode] == 'minimize':\n best_model_index = np.argmin(cv_results[eval_criteria])\n else:\n best_model_index = np.argmax(cv_results[eval_criteria])\n\n best_hyper_settings = cv_results['params'][best_model_index]\n\n for hyper_name, hyper_setting in best_hyper_settings.items():\n if hyper_name in ['verbose', 'allow_writing_files']:\n continue\n\n print(f'{hyper_name}: {hyper_setting}')\n\n self.hyper_run_dict = dict(cv_index=self.cv_index)\n self.hyper_run_dict['best_hyper_settings'] = best_hyper_settings\n\n # best val performance\n for metric_name, scorer in self.scoring.items():\n val_metric_name = f'best_val_{metric_name}'\n self.hyper_run_dict[val_metric_name] = cv_results[\n f'mean_val_{metric_name}'][best_model_index]\n\n return best_hyper_settings\n\n def fit_eval_model(self, best_params, **kwargs):\n # train best model on train again\n model = self.model_class(**best_params)\n\n if self.c.sklearn_val_final_fit:\n print('Using val in final fit.')\n x, y, kwargs = self.get_train_data()\n print('x.shape', x.shape)\n print('y.shape', y.shape)\n else:\n print('Not using val in final fit.')\n x, y, kwargs = self.get_final_fit_data()\n print('x.shape', x.shape)\n print('y.shape', y.shape)\n\n model.fit(x, y, **kwargs)\n\n y_pred = model.predict(self.X_test)\n\n if self.reg_class_mode == 'class':\n y_pred_proba = model.predict_proba(self.X_test)\n # log test performance of that model\n for metric_name, scorer in self.scoring.items():\n if metric_name in METRIC_NEEDS_PROBA:\n y = y_pred_proba\n else:\n y = y_pred\n\n performance = scorer._score_func(self.y_test, y)\n test_metric_name = f'best_test_{metric_name}'\n print(test_metric_name, performance)\n self.hyper_run_dict[test_metric_name] = performance\n\n return 1\n\n def log_split_performance(self):\n print(\n f'Logged hyper tuning results for {self.model_name}, '\n f'dataset {self.c.data_set}, '\n f'cv split {self.cv_index}, '\n f'search alg {self.c.sklearn_hyper_search} to wandb.')\n\n self.aggregate_hyper_run_dicts.append(deepcopy(self.hyper_run_dict))\n\n def aggregate_results_from_splits(self):\n \"\"\"For each metric collect mean/std for val/test values.\"\"\"\n run_dicts = self.aggregate_hyper_run_dicts\n final_dict = {}\n\n for metric_name, scorer in self.scoring.items():\n val_test = [f'best_val_{metric_name}', f'best_test_{metric_name}']\n for metric_name in val_test:\n values = []\n for cv_index in range(self.cv_index + 1):\n values.append(run_dicts[cv_index][metric_name])\n final_dict[f'{metric_name}_mean'] = np.mean(values)\n final_dict[f'{metric_name}_stddev'] = np.std(values)\n\n # Compute standard error from RMSE std deviation metrics\n rmse_metric_prefixes = [\n 'best_val_rmse_', 'best_test_rmse_',\n 'best_val_rmse_unstd_', 'best_test_rmse_unstd_']\n for rmse_metric_prefix in rmse_metric_prefixes:\n rmse_stddev_metric = f'{rmse_metric_prefix}stddev'\n if rmse_stddev_metric in final_dict.keys():\n rmse_std_error_metric = f'{rmse_metric_prefix}stderr'\n final_dict[rmse_std_error_metric] = (\n final_dict[rmse_stddev_metric] /\n np.sqrt(self.cv_index + 1))\n\n wandb.run.summary.update(final_dict)\n" ]
[ [ "numpy.asarray", "numpy.argmin", "numpy.sum", "numpy.mean", "numpy.std", "numpy.argmax", "numpy.sort", "numpy.sqrt", "sklearn.metrics.make_scorer", "numpy.hstack", "scipy.stats.rankdata", "sklearn.pipeline.Pipeline", "numpy.unique" ] ]
zzyztyy/sami2py
[ "d36f7994ad0a7d2d01bd66f916697827ee380a9b" ]
[ "sami2py/utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2017, JK & JH\n# Full license can be found in License.md\n# -----------------------------------------------------------------------------\n\"\"\" Wrapper for running sami2 model\n\nFunctions\n-------------------------------------------------------------------------------\ngenerate_path(tag, lon, year, day)\n Generates path to archived model runs based on input paramters.\n\nset_archive_dir(path=None, store=None)\n Allows user to specify the location where the model outputs will be stored\n\nget_unformatted_data(dat_dir, var_name, nz, nf, ni, nt, reshape=False)\n routine to interpret unformatted binary files created by the SAMI2 model\n-------------------------------------------------------------------------------\n\nModuleauthor\n-------------------------------------------------------------------------------\nJeff Klenzing (JK), 1 Dec 2017, Goddard Space Flight Center (GSFC)\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport os\n\n\ndef generate_path(tag, lon, year, day, test=False):\n \"\"\"Creates a path based on run tag, date, and longitude\n\n Parameters\n ----------\n tag : (string)\n specifies name of model run\n lon : (int)\n longitude of model run\n year : (int)\n year of model run\n day : (int)\n day of year of model run\n test : (bool)\n If True, use directory for test data. If False, use archive_dir\n (default = False)\n\n Returns\n -------\n archive_path : (string)\n Complete path pointing to model archive for a given run\n\n Examples\n --------\n import sami2py\n sami2py.utils.set_archive_dir(path='path_name_here')\n path = sami2py.utils.generate_path(tag='run_name', lon=0, year=2012,\n day=210)\n Will return 'path_name_here/run_name/lon000/2012_210'\n \"\"\"\n\n if not isinstance(tag, str):\n raise TypeError\n\n if test:\n from sami2py import test_data_dir\n top_directory = test_data_dir\n else:\n from sami2py import archive_dir\n top_directory = archive_dir\n\n # Check if top_directory is empty string, ie, user has not specified\n # a directory through set_archive_dir\n if top_directory:\n str_fmt1 = 'lon{lon:03d}'\n str_fmt2 = '{year:4d}_{day:03d}'\n archive_path = os.path.join(top_directory, tag,\n str_fmt1.format(lon=lon),\n str_fmt2.format(year=year,\n day=day))\n else:\n raise NameError(''.join(('Archive Directory Not Specified: ',\n 'Run sami2py.utils.set_archive_dir')))\n\n return archive_path\n\n\ndef set_archive_dir(path=None, store=True):\n # type: (str, bool) -> None\n \"\"\"Set the top level directory sami2py uses to look for data and reload.\n\n Parameters\n ----------\n path : string\n valid path to directory sami2py uses to look for data\n store : bool\n if True, store data directory for future runs\n\n Examples\n --------\n Should be run upon first installation. Will prompt users if not run.\n import sami2py\n sami2py.utils.set_archive_dir(path='path_name_here')\n \"\"\"\n import sami2py\n\n path = os.path.expanduser(path)\n if os.path.isdir(path):\n if store:\n with open(os.path.join(sami2py.sami2py_dir, 'archive_path.txt'),\n 'w') as archive_file:\n archive_file.write(path)\n sami2py.archive_dir = path\n else:\n raise ValueError('Path does not lead to a valid directory.')\n\n\ndef get_unformatted_data(dat_dir, var_name, reshape=False, dim=(0, 0)):\n \"\"\"Routine to interpret unformatted binary files created by the SAMI2 model\n\n Parameters\n -----------\n data_dir : (str)\n directory where the SAMI2 data is stored\n var_name : (str)\n name of unformatted data variable to be loaded\n nz : (int)\n number of mesh points along the geomagnetic field line\n nf : (int)\n number of mesh points transverse to the geomagnetic field line i.e.\n number of field lines\n ni : (int)\n number of ion species\n nt : (int)\n number of time steps\n reshape : (bool)\n if true the data is reshaped by the mesh geometry\n\n Returns\n -----------\n float_data : (numpy.ndarray)\n unformatted data organized into a numpy array for handling in python\n \"\"\"\n import numpy as np\n\n binary_file = open(os.path.join(dat_dir, var_name + 'u.dat'), 'rb')\n float_data = np.fromfile(binary_file, dtype='float32')\n binary_file.close()\n\n if reshape:\n float_data = np.reshape(float_data, dim, order='F')\n return float_data[1:-1, :]\n else:\n return float_data[1:-1]\n" ]
[ [ "numpy.reshape", "numpy.fromfile" ] ]
liangs6212/BigDL
[ "3c89ff7e8bbdc713110536c18099506811cd2b3a" ]
[ "python/nano/test/pytorch/tests/test_quantize_inference.py" ]
[ "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nimport pytest\nimport os\nfrom unittest import TestCase\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import TensorDataset, DataLoader\n\nimport numpy as np\n\nfrom test.pytorch.utils._train_torch_lightning import create_data_loader, data_transform\nfrom bigdl.nano.pytorch.trainer import Trainer\nfrom bigdl.nano.pytorch.vision.models import vision\n\nbatch_size = 256\nnum_workers = 0\ndata_dir = os.path.join(os.path.dirname(__file__), \"../data\")\n\n\nclass ResNet18(nn.Module):\n def __init__(self, num_classes, pretrained=True, include_top=False, freeze=True):\n super().__init__()\n backbone = vision.resnet18(pretrained=pretrained, include_top=include_top, freeze=freeze)\n output_size = backbone.get_output_size()\n head = nn.Linear(output_size, num_classes)\n self.model = nn.Sequential(backbone, head)\n\n def forward(self, x):\n return self.model(x)\n\nclass TestQuantizeInference(TestCase):\n\n def test_quantized_model_inference(self):\n model = ResNet18(10, pretrained=False, include_top=False, freeze=True)\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n trainer = Trainer(max_epochs=1)\n\n pl_model = Trainer.compile(model, loss, optimizer, onnx=True)\n train_loader = create_data_loader(data_dir, batch_size, \\\n num_workers, data_transform, subset=200)\n trainer.fit(pl_model, train_loader)\n pl_model = trainer.quantize(pl_model, train_loader)\n print(pl_model._quantized_model_up_to_date)\n\n for x, y in train_loader:\n quantized_res = pl_model.inference(x, backend=None, quantize=True).numpy() # quantized\n pl_model.eval(quantize=True)\n with torch.no_grad():\n forward_res = pl_model(x).numpy()\n assert pl_model._quantized_model_up_to_date is True # qmodel is up-to-date while inferencing\n np.testing.assert_almost_equal(quantized_res, forward_res, decimal=5) # same result\n \n trainer.fit(pl_model, train_loader)\n assert pl_model._quantized_model_up_to_date is False # qmodel is not up-to-date after training\n\n pl_model = trainer.quantize(pl_model, train_loader)\n assert pl_model._quantized_model_up_to_date is True # qmodel is up-to-date after building\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "numpy.testing.assert_almost_equal", "torch.no_grad", "torch.nn.CrossEntropyLoss" ] ]
fastestimator/fastestimator
[ "a8ea30c5da2d92ff8aa0de0084d10c86fb8dfd10" ]
[ "apphub/instance_segmentation/solov2/solov2_tf.py" ]
[ "# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\nimport tempfile\n\nimport cv2\nimport numpy as np\nimport pycocotools.mask as mask_util\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom scipy.ndimage.measurements import center_of_mass\nfrom tensorflow.keras import layers\n\nimport fastestimator as fe\nfrom fastestimator.dataset.data import mscoco\nfrom fastestimator.op.numpyop import Delete, NumpyOp\nfrom fastestimator.op.numpyop.meta import Sometimes\nfrom fastestimator.op.numpyop.multivariate import HorizontalFlip, LongestMaxSize, PadIfNeeded, Resize\nfrom fastestimator.op.numpyop.univariate import ReadImage\nfrom fastestimator.op.tensorop.loss import L2Regularizaton\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.op.tensorop.tensorop import LambdaOp, TensorOp\nfrom fastestimator.schedule import EpochScheduler, cosine_decay\nfrom fastestimator.trace.adapt import LRScheduler\nfrom fastestimator.trace.io import BestModelSaver\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util import Suppressor, get_num_devices\n\n\ndef fpn(C2, C3, C4, C5):\n # lateral conv\n P5 = layers.Conv2D(256, kernel_size=1)(C5)\n P5_up = layers.UpSampling2D()(P5)\n P4 = layers.Conv2D(256, kernel_size=1)(C4)\n P4 = P4 + P5_up\n P4_up = layers.UpSampling2D()(P4)\n P3 = layers.Conv2D(256, kernel_size=1)(C3)\n P3 = P3 + P4_up\n P3_up = layers.UpSampling2D()(P3)\n P2 = layers.Conv2D(256, kernel_size=1)(C2)\n P2 = P2 + P3_up\n # fpn conv\n P5 = layers.Conv2D(256, kernel_size=3, padding=\"same\")(P5)\n P4 = layers.Conv2D(256, kernel_size=3, padding=\"same\")(P4)\n P3 = layers.Conv2D(256, kernel_size=3, padding=\"same\")(P3)\n P2 = layers.Conv2D(256, kernel_size=3, padding=\"same\")(P2)\n return P2, P3, P4, P5\n\n\ndef pad_with_coord(data):\n data_shape = tf.shape(data)\n batch_size, height, width = data_shape[0], data_shape[1], data_shape[2]\n x = tf.cast(tf.linspace(-1, 1, num=width), data.dtype)\n x = tf.tile(x[tf.newaxis, tf.newaxis, ..., tf.newaxis], [batch_size, height, 1, 1])\n y = tf.cast(tf.linspace(-1, 1, num=height), data.dtype)\n y = tf.tile(y[tf.newaxis, ..., tf.newaxis, tf.newaxis], [batch_size, 1, width, 1])\n data = tf.concat([data, x, y], axis=-1)\n return data\n\n\ndef conv_norm(x, filters, kernel_size=3, groups=32):\n x = layers.Conv2D(filters=filters, kernel_size=kernel_size, padding='same', use_bias=False)(x)\n x = tfa.layers.GroupNormalization(groups=groups, epsilon=1e-5)(x)\n return x\n\n\ndef solov2_head_model(stacked_convs=4, ch_in=258, ch_feature=512, ch_kernel_out=256, num_classes=80):\n inputs = layers.Input(shape=(None, None, ch_in))\n feature_kernel = inputs\n feature_cls = inputs[..., :-2]\n for _ in range(stacked_convs):\n feature_kernel = tf.nn.relu(conv_norm(feature_kernel, filters=ch_feature))\n feature_cls = tf.nn.relu(conv_norm(feature_cls, filters=ch_feature))\n feature_kernel = layers.Conv2D(filters=ch_kernel_out,\n kernel_size=3,\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))(feature_kernel)\n feature_cls = layers.Conv2D(filters=num_classes,\n kernel_size=3,\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n bias_initializer=tf.initializers.constant(np.log(1 / 99)))(feature_cls)\n return tf.keras.Model(inputs=inputs, outputs=[feature_kernel, feature_cls])\n\n\ndef solov2_head(P2, P3, P4, P5, num_classes=80):\n head_model = solov2_head_model(num_classes=num_classes)\n # applying maxpool first for P2\n P2 = layers.MaxPool2D()(P2)\n features = [P2, P3, P4, P5, P5]\n grid_sizes = [40, 36, 24, 16, 12]\n feat_kernel_list, feat_cls_list = [], []\n for feature, grid_size in zip(features, grid_sizes):\n feature = pad_with_coord(feature)\n feature = tf.image.resize(feature, size=(grid_size, grid_size))\n feat_kernel, feat_cls = head_model(feature)\n feat_kernel_list.append(feat_kernel)\n feat_cls_list.append(tf.sigmoid(feat_cls))\n return feat_cls_list, feat_kernel_list\n\n\ndef solov2_maskhead(P2, P3, P4, P5, mid_ch=128, out_ch=256):\n # first level\n P2 = tf.nn.relu(conv_norm(P2, filters=mid_ch))\n # second level\n P3 = tf.nn.relu(conv_norm(P3, filters=mid_ch))\n P3 = layers.UpSampling2D()(P3)\n # third level\n P4 = tf.nn.relu(conv_norm(P4, filters=mid_ch))\n P4 = layers.UpSampling2D()(P4)\n P4 = tf.nn.relu(conv_norm(P4, filters=mid_ch))\n P4 = layers.UpSampling2D()(P4)\n # top level, add coordinate\n P5 = tf.nn.relu(conv_norm(pad_with_coord(P5), filters=mid_ch))\n P5 = layers.UpSampling2D()(P5)\n P5 = tf.nn.relu(conv_norm(P5, filters=mid_ch))\n P5 = layers.UpSampling2D()(P5)\n P5 = tf.nn.relu(conv_norm(P5, filters=mid_ch))\n P5 = layers.UpSampling2D()(P5)\n seg_outputs = tf.nn.relu(conv_norm(P2 + P3 + P4 + P5, filters=out_ch, kernel_size=1))\n return seg_outputs\n\n\ndef solov2(input_shape=(None, None, 3), num_classes=80):\n inputs = tf.keras.Input(shape=input_shape)\n resnet50 = tf.keras.applications.ResNet50(weights=\"imagenet\", include_top=False, input_tensor=inputs, pooling=None)\n assert resnet50.layers[38].name == \"conv2_block3_out\"\n C2 = resnet50.layers[38].output\n assert resnet50.layers[80].name == \"conv3_block4_out\"\n C3 = resnet50.layers[80].output\n assert resnet50.layers[142].name == \"conv4_block6_out\"\n C4 = resnet50.layers[142].output\n assert resnet50.layers[-1].name == \"conv5_block3_out\"\n C5 = resnet50.layers[-1].output\n P2, P3, P4, P5 = fpn(C2, C3, C4, C5)\n feat_seg = solov2_maskhead(P2, P3, P4, P5) # [B, h/4, w/4, 256]\n feat_cls_list, feat_kernel_list = solov2_head(P2, P3, P4, P5, num_classes=num_classes) # [B, grid, grid, 80], [B, grid, grid, 256]\n model = tf.keras.Model(inputs=inputs, outputs=[feat_seg, feat_cls_list, feat_kernel_list])\n return model\n\n\nclass MergeMask(NumpyOp):\n def forward(self, data, state):\n data = np.stack(data, axis=-1)\n return data\n\n\nclass GetImageSize(NumpyOp):\n def forward(self, data, state):\n height, width, _ = data.shape\n return np.array([height, width], dtype=\"int32\")\n\n\nclass Gt2Target(NumpyOp):\n def __init__(self,\n inputs,\n outputs,\n mode=None,\n num_grids=(40, 36, 24, 16, 12),\n scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),\n coord_sigma=0.05):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.num_grids = num_grids\n self.scale_ranges = scale_ranges\n self.coord_sigma = coord_sigma\n missing_category = [66, 68, 69, 71, 12, 45, 83, 26, 29, 30]\n category = [x for x in range(1, 91) if not x in missing_category]\n self.mapping = {k: v for k, v in zip(category, list(range(80)))}\n\n def forward(self, data, state):\n masks, bboxes = data\n bboxes = np.array(bboxes, dtype=\"float32\")\n masks = np.transpose(masks, [2, 0, 1]) # (H, W, #objects) -> (#objects, H, W)\n masks, bboxes = self.remove_empty_gt(masks, bboxes)\n # 91 classes -> 80 classes that starts from 1\n classes = np.array([self.mapping[int(x[-1])] + 1 for x in bboxes], dtype=np.int32)\n widths, heights = bboxes[:, 2], bboxes[:, 3]\n gt_match = [] # number of objects x (grid_idx, height_idx, width_idx, exist)\n for width, height, mask in zip(widths, heights, masks):\n object_match = []\n object_scale = np.sqrt(width * height)\n center_h, center_w = center_of_mass(mask)\n for grid_idx, ((lower_scale, upper_scale), num_grid) in enumerate(zip(self.scale_ranges, self.num_grids)):\n grid_matched = (object_scale >= lower_scale) & (object_scale <= upper_scale)\n if grid_matched:\n w_delta, h_delta = 0.5 * width * self.coord_sigma, 0.5 * height * self.coord_sigma\n coord_h, coord_w = int(center_h / mask.shape[0] * num_grid), int(center_w / mask.shape[1] * num_grid)\n # each object will have some additional area of effect\n top_box_extend = max(0, int((center_h - h_delta) / mask.shape[0] * num_grid))\n down_box_extend = min(num_grid - 1, int((center_h + h_delta) / mask.shape[0] * num_grid))\n left_box_extend = max(0, int((center_w - w_delta) / mask.shape[1] * num_grid))\n right_box_extend = min(num_grid - 1, int((center_w + w_delta) / mask.shape[1] * num_grid))\n # make sure the additional area of effect is at most 1 grid more\n top_box_extend = max(top_box_extend, coord_h - 1)\n down_box_extend = min(down_box_extend, coord_h + 1)\n left_box_extend = max(left_box_extend, coord_w - 1)\n right_box_extend = min(right_box_extend, coord_w + 1)\n object_match.extend([(grid_idx, y, x, 1) for y in range(top_box_extend, down_box_extend + 1)\n for x in range(left_box_extend, right_box_extend + 1)])\n gt_match.append(object_match)\n gt_match = self.pad_match(gt_match) #num_object x num_matches x [grid_idx, heihght_idx, width_idx, exist]\n return gt_match, masks, classes\n\n @staticmethod\n def pad_match(gt_match):\n max_num_matches = max([len(match) for match in gt_match])\n for match in gt_match:\n match.extend([(0, 0, 0, 0) for _ in range(max_num_matches - len(match))])\n return np.array(gt_match, dtype=\"int32\")\n\n @staticmethod\n def remove_empty_gt(masks, bboxes):\n num_objects = masks.shape[0]\n non_empty_mask = np.sum(masks.reshape(num_objects, -1), axis=1) > 0\n return masks[non_empty_mask], bboxes[non_empty_mask]\n\n\nclass Normalize(TensorOp):\n def __init__(self, inputs, outputs, mean, std, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.mean = tf.convert_to_tensor(mean)\n self.std = tf.convert_to_tensor(std)\n\n def forward(self, data, state):\n data = (data / 255 - self.mean) / self.std\n return data\n\n\nclass Solov2Loss(TensorOp):\n def __init__(self, level, grid_dim, inputs, outputs, mode=None, num_class=80):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.level = level\n self.grid_dim = grid_dim\n self.num_class = num_class\n\n def forward(self, data, state):\n masks, classes, gt_match, feat_segs, feat_clss, kernels = data\n cls_loss, grid_object_maps = tf.map_fn(fn=lambda x: self.get_cls_loss(x[0], x[1], x[2]),\n elems=(classes, feat_clss, gt_match),\n fn_output_signature=(tf.float32, tf.float32))\n seg_loss = tf.map_fn(fn=lambda x: self.get_seg_loss(x[0], x[1], x[2], x[3]),\n elems=(masks, feat_segs, kernels, grid_object_maps),\n fn_output_signature=tf.float32)\n return cls_loss, seg_loss\n\n def get_seg_loss(self, mask, feat_seg, kernel, grid_object_map):\n indices = tf.where(grid_object_map[..., 0] > 0)\n object_indices = tf.cast(tf.gather_nd(grid_object_map, indices)[:, 1], tf.int32)\n mask_gt = tf.cast(tf.gather(mask, object_indices), kernel.dtype)\n active_kernel = tf.gather_nd(kernel, indices)\n feat_seg = tf.reshape(tf.transpose(feat_seg, perm=[2, 0, 1]),\n (tf.shape(kernel)[-1], -1)) # H/4,W/4,C->C,H/4,W/4\n seg_preds = tf.reshape(tf.matmul(active_kernel, feat_seg), tf.shape(mask_gt))\n loss = self.dice_loss(seg_preds, mask_gt)\n return loss\n\n @staticmethod\n def dice_loss(pred, gt):\n pred = tf.sigmoid(pred)\n a = tf.reduce_sum(pred * gt)\n b = tf.reduce_sum(pred * pred) + 0.001\n c = tf.reduce_sum(gt * gt) + 0.001\n dice = (2 * a) / (b + c)\n return 1 - tf.where(dice > 0, dice, 1)\n\n def get_cls_loss(self, cls_gt, feat_cls, match):\n cls_gt = tf.cast(cls_gt, feat_cls.dtype)\n match, cls_gt = match[cls_gt > 0], cls_gt[cls_gt > 0] # remove the padded object\n feat_cls_gts_raw = tf.map_fn(fn=lambda x: self.assign_cls_feat(x[0], x[1]),\n elems=(match, cls_gt),\n fn_output_signature=tf.float32)\n grid_object_map = self.reduce_to_single_grid(feat_cls_gts_raw)\n feat_cls_gts = tf.one_hot(tf.cast(grid_object_map[..., 0], tf.int32), depth=self.num_class + 1)[..., 1:]\n cls_loss = self.focal_loss(feat_cls, feat_cls_gts)\n return cls_loss, grid_object_map\n\n def reduce_to_single_grid(self, feat_cls_gts_raw):\n feat_cls_gts = tf.zeros((self.grid_dim, self.grid_dim), dtype=feat_cls_gts_raw.dtype)\n object_idx = tf.zeros((self.grid_dim, self.grid_dim), dtype=feat_cls_gts_raw.dtype)\n num_obj = tf.shape(feat_cls_gts_raw)[0]\n for idx in range(num_obj):\n classes = feat_cls_gts_raw[idx]\n indexes = tf.cast(tf.where(classes > 0, idx, 0), classes.dtype)\n object_idx = object_idx + tf.where(feat_cls_gts == 0, indexes, tf.zeros_like(indexes))\n feat_cls_gts = feat_cls_gts + tf.where(feat_cls_gts == 0, classes, tf.zeros_like(classes))\n grid_object_map = tf.stack([feat_cls_gts, object_idx], axis=-1)\n return grid_object_map\n\n @staticmethod\n def focal_loss(pred, gt, alpha=0.25, gamma=2.0):\n pred, gt = tf.reshape(pred, (-1, 1)), tf.reshape(gt, (-1, 1))\n anchor_obj_count = tf.cast(tf.math.count_nonzero(gt), pred.dtype)\n alpha_factor = tf.ones_like(gt) * alpha\n alpha_factor = tf.where(tf.equal(gt, 1), alpha_factor, 1 - alpha_factor)\n focal_weight = tf.where(tf.equal(gt, 1), 1 - pred, pred)\n focal_weight = alpha_factor * focal_weight**gamma / (anchor_obj_count + 1)\n cls_loss = tf.losses.BinaryCrossentropy(reduction='sum')(gt, pred, sample_weight=focal_weight)\n return cls_loss\n\n def assign_cls_feat(self, grid_match_info, cls_gt_obj):\n match_bool = tf.logical_and(tf.reduce_sum(grid_match_info, axis=-1) > 0, grid_match_info[:, 0] == self.level)\n grid_match_info = grid_match_info[match_bool]\n grid_indices = grid_match_info[:, 1:3]\n num_indices = tf.shape(grid_indices)[0]\n feat_cls_gt = tf.scatter_nd(grid_indices, tf.fill([num_indices], cls_gt_obj), (self.grid_dim, self.grid_dim))\n return feat_cls_gt\n\n\nclass CombineLoss(TensorOp):\n def forward(self, data, state):\n l_c1, l_s1, l_c2, l_s2, l_c3, l_s3, l_c4, l_s4, l_c5, l_s5 = data\n cls_losses = tf.reduce_sum(tf.stack([l_c1, l_c2, l_c3, l_c4, l_c5], axis=-1), axis=-1)\n seg_losses = tf.reduce_sum(tf.stack([l_s1, l_s2, l_s3, l_s4, l_s5], axis=-1), axis=-1)\n mean_cls_loss, mean_seg_loss = tf.reduce_mean(cls_losses), tf.reduce_mean(seg_losses) * 3\n return mean_cls_loss + mean_seg_loss, mean_cls_loss, mean_seg_loss\n\n\nclass PointsNMS(TensorOp):\n def forward(self, data, state):\n feat_cls_list = [self.points_nms(x) for x in data]\n return feat_cls_list\n\n @staticmethod\n def points_nms(x):\n x_max_pool = tf.nn.max_pool2d(x, ksize=2, strides=1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]])[:, :-1, :-1, :]\n x = tf.where(tf.equal(x, x_max_pool), x, 0)\n return x\n\n\nclass Predict(TensorOp):\n def __init__(self, inputs, outputs, mode=None, score_threshold=0.1, segm_strides=(8.0, 8.0, 16.0, 32.0, 32.0)):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.score_threshold = score_threshold\n self.segm_strides = segm_strides\n\n def forward(self, data, state):\n feat_seg, feat_cls_list, feat_kernel_list = data\n strides = [tf.fill((tf.shape(x)[1] * tf.shape(x)[2], ), s) for s, x in zip(self.segm_strides, feat_cls_list)]\n batch_size, num_class = tf.shape(feat_cls_list[0])[0], tf.shape(feat_cls_list[0])[3]\n kernel_dim = tf.shape(feat_kernel_list[0])[-1]\n feat_cls = tf.concat([tf.reshape(x, (batch_size, -1, num_class)) for x in feat_cls_list], axis=1)\n feat_kernel = tf.concat([tf.reshape(x, (batch_size, -1, kernel_dim)) for x in feat_kernel_list], axis=1)\n strides = tf.concat(strides, axis=0)\n seg_preds, cate_scores, cate_labels = tf.map_fn(fn=lambda x: self.predict_sample(x[0], x[1], x[2], strides),\n elems=(feat_cls, feat_seg, feat_kernel),\n fn_output_signature=(tf.float32, tf.float32, tf.int32))\n return seg_preds, cate_scores, cate_labels\n\n def predict_sample(self, cate_preds, seg_preds, kernel_preds, strides):\n # first filter class prediction by score_threshold\n select_indices = tf.where(cate_preds > self.score_threshold)\n cate_labels = tf.cast(select_indices[:, 1], tf.int32)\n kernel_preds = tf.gather(kernel_preds, select_indices[:, 0])\n cate_scores = tf.gather_nd(cate_preds, select_indices)\n strides = tf.gather(strides, select_indices[:, 0])\n # next calculate the mask\n kernel_preds = tf.transpose(kernel_preds)[tf.newaxis, tf.newaxis, ...] # [k_h, k_w, c_in, c_out]\n seg_preds = tf.sigmoid(tf.nn.conv2d(seg_preds[tf.newaxis, ...], kernel_preds, strides=1, padding=\"VALID\"))[0]\n seg_preds = tf.transpose(seg_preds, perm=[2, 0, 1]) # [C, H, W]\n seg_masks = tf.where(seg_preds > 0.5, 1.0, 0.0)\n # then filter masks based on strides\n mask_sum = tf.reduce_sum(seg_masks, axis=[1, 2])\n select_indices = tf.where(mask_sum > strides)[:, 0]\n seg_preds, seg_masks = tf.gather(seg_preds, select_indices), tf.gather(seg_masks, select_indices)\n mask_sum = tf.gather(mask_sum, select_indices)\n cate_labels, cate_scores = tf.gather(cate_labels, select_indices), tf.gather(cate_scores, select_indices)\n # scale the category score by mask confidence then matrix nms\n mask_scores = tf.reduce_sum(seg_preds * seg_masks, axis=[1, 2]) / mask_sum\n cate_scores = cate_scores * mask_scores\n seg_preds, cate_scores, cate_labels = self.matrix_nms(seg_preds, seg_masks, cate_labels, cate_scores, mask_sum)\n return seg_preds, cate_scores, cate_labels\n\n @staticmethod\n def matrix_nms(seg_preds, seg_masks, cate_labels, cate_scores, mask_sum, pre_nms_k=500, post_nms_k=100):\n # first select top k category scores\n num_selected = tf.minimum(pre_nms_k, tf.shape(cate_scores)[0])\n indices = tf.argsort(cate_scores, direction='DESCENDING')[:num_selected]\n seg_preds, seg_masks = tf.gather(seg_preds, indices), tf.gather(seg_masks, indices)\n cate_labels, cate_scores = tf.gather(cate_labels, indices), tf.gather(cate_scores, indices)\n mask_sum = tf.gather(mask_sum, indices)\n # calculate iou between different masks\n seg_masks = tf.reshape(seg_masks, shape=(num_selected, -1))\n intersection = tf.matmul(seg_masks, seg_masks, transpose_b=True)\n mask_sum = tf.tile(mask_sum[tf.newaxis, ...], multiples=[num_selected, 1])\n union = mask_sum + tf.transpose(mask_sum) - intersection\n iou = intersection / union\n iou = tf.linalg.band_part(iou, 0, -1) - tf.linalg.band_part(iou, 0, 0) # equivalent of np.triu(diagonal=1)\n # iou decay and compensation\n labels_match = tf.tile(cate_labels[tf.newaxis, ...], multiples=[num_selected, 1])\n labels_match = tf.where(labels_match == tf.transpose(labels_match), 1.0, 0.0)\n labels_match = tf.linalg.band_part(labels_match, 0, -1) - tf.linalg.band_part(labels_match, 0, 0)\n decay_iou = iou * labels_match # iou with any object from same class\n compensate_iou = tf.reduce_max(decay_iou, axis=0)\n compensate_iou = tf.tile(compensate_iou[..., tf.newaxis], multiples=[1, num_selected])\n # matrix nms\n decay_coefficient = tf.reduce_min(tf.exp(-2 * decay_iou**2) / tf.exp(-2 * compensate_iou**2), axis=0)\n cate_scores = cate_scores * decay_coefficient\n cate_scores = tf.where(cate_scores >= 0.05, cate_scores, 0)\n num_selected = tf.minimum(post_nms_k, tf.shape(cate_scores)[0])\n # select the final predictions and pad output for batch shape consistency\n indices = tf.argsort(cate_scores, direction='DESCENDING')[:num_selected]\n seg_preds = tf.pad(tf.gather(seg_preds, indices), paddings=[[0, post_nms_k - num_selected], [0, 0], [0, 0]])\n cate_scores = tf.pad(tf.gather(cate_scores, indices), paddings=[[0, post_nms_k - num_selected]])\n cate_labels = tf.pad(tf.gather(cate_labels, indices), paddings=[[0, post_nms_k - num_selected]])\n return seg_preds, cate_scores, cate_labels\n\n\ndef lr_schedule_warmup(step, init_lr):\n if step < 1000:\n lr = init_lr / 1000 * step\n else:\n lr = init_lr\n return lr\n\n\nclass COCOMaskmAP(Trace):\n def __init__(self, data_dir, inputs=None, outputs=\"mAP\", mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n with Suppressor():\n self.coco_gt = COCO(os.path.join(data_dir.replace('val2017', 'annotations'), \"instances_val2017.json\"))\n missing_category = [66, 68, 69, 71, 12, 45, 83, 26, 29, 30]\n category = [x for x in range(1, 91) if not x in missing_category]\n self.mapping = {k: v for k, v in zip(list(range(80)), category)}\n\n def on_epoch_begin(self, data):\n self.results = []\n\n def on_batch_end(self, data):\n seg_preds, = data['seg_preds'].numpy(),\n cate_scores, cate_labels = data['cate_scores'].numpy(), data['cate_labels'].numpy()\n image_ids, imsizes = data['image_id'].numpy(), data['imsize'].numpy()\n for seg_pred, cate_score, cate_label, image_id, imsize in zip(seg_preds, cate_scores, cate_labels, image_ids, imsizes):\n # remove the padded data due to batching\n indices = cate_score > 0.01\n seg_pred, cate_score, cate_label = seg_pred[indices], cate_score[indices], cate_label[indices]\n if seg_pred.shape[0] == 0:\n continue\n seg_pred = np.transpose(seg_pred, axes=(1, 2, 0)) # [H, W, #objects]\n # remove the padded data due to image resize\n mask_h, mask_w, num_obj = seg_pred.shape\n image_h, image_w = 4 * mask_h, 4 * mask_w\n seg_pred = cv2.resize(seg_pred, (image_w, image_h))\n if num_obj == 1:\n seg_pred = seg_pred[..., np.newaxis] # when there's only single object, resize will remove the channel\n ori_h, ori_w = imsize\n scale_ratio = min(image_h / ori_h, image_w / ori_w)\n pad_h, pad_w = image_h - scale_ratio * ori_h, image_w - scale_ratio * ori_w\n h_start, h_end = round(pad_h / 2), image_h - round(pad_h / 2)\n w_start, w_end = round(pad_w / 2), image_w - round(pad_w / 2)\n seg_pred = seg_pred[h_start:h_end, w_start:w_end, :]\n # now reshape to original shape\n seg_pred = cv2.resize(seg_pred, (ori_w, ori_h))\n if num_obj == 1:\n seg_pred = seg_pred[..., np.newaxis] # when there's only single object, resize will remove the channel\n seg_pred = np.transpose(seg_pred, [2, 0, 1]) # [#objects, H, W]\n seg_pred = np.uint8(np.where(seg_pred > 0.5, 1, 0))\n for seg, score, label in zip(seg_pred, cate_score, cate_label):\n result = {\n \"image_id\": image_id,\n \"category_id\": self.mapping[label],\n \"score\": score,\n \"segmentation\": mask_util.encode(np.array(seg[..., np.newaxis], order='F'))[0]\n }\n self.results.append(result)\n return data\n\n def on_epoch_end(self, data):\n mAP = 0.0\n if self.results:\n with Suppressor():\n coco_results = self.coco_gt.loadRes(self.results)\n cocoEval = COCOeval(self.coco_gt, coco_results, 'segm')\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n mAP = cocoEval.stats[0]\n data.write_with_log(self.outputs[0], mAP)\n\n\ndef get_estimator(data_dir=None,\n epochs=12,\n batch_size_per_gpu=4,\n im_size=1344,\n model_dir=tempfile.mkdtemp(),\n train_steps_per_epoch=None,\n eval_steps_per_epoch=None):\n assert im_size % 32 == 0, \"im_size must be a multiple of 32\"\n num_device = get_num_devices()\n train_ds, val_ds = mscoco.load_data(root_dir=data_dir, load_masks=True)\n batch_size = num_device * batch_size_per_gpu\n pipeline = fe.Pipeline(\n train_data=train_ds,\n eval_data=val_ds,\n test_data=val_ds,\n batch_size=batch_size,\n ops=[\n ReadImage(inputs=\"image\", outputs=\"image\"),\n MergeMask(inputs=\"mask\", outputs=\"mask\"),\n GetImageSize(inputs=\"image\", outputs=\"imsize\", mode=\"test\"),\n LongestMaxSize(max_size=im_size, image_in=\"image\", mask_in=\"mask\", bbox_in=\"bbox\", bbox_params=\"coco\"),\n PadIfNeeded(min_height=im_size,\n min_width=im_size,\n image_in=\"image\",\n mask_in=\"mask\",\n bbox_in=\"bbox\",\n bbox_params=\"coco\",\n border_mode=cv2.BORDER_CONSTANT,\n value=0),\n Sometimes(HorizontalFlip(image_in=\"image\", mask_in=\"mask\", bbox_in=\"bbox\", bbox_params=\"coco\",\n mode=\"train\")),\n Resize(height=im_size // 4, width=im_size // 4, image_in='mask'), # downscale mask for memory efficiency\n Gt2Target(inputs=(\"mask\", \"bbox\"), outputs=(\"gt_match\", \"mask\", \"classes\")),\n Delete(keys=\"bbox\"),\n Delete(keys=\"image_id\", mode=\"!test\")\n ],\n pad_value=0,\n num_process=8 * num_device)\n init_lr = 1e-2 / 16 * batch_size\n model = fe.build(model_fn=lambda: solov2(input_shape=(im_size, im_size, 3)),\n optimizer_fn=lambda: tf.optimizers.SGD(learning_rate=init_lr, momentum=0.9))\n network = fe.Network(ops=[\n Normalize(inputs=\"image\", outputs=\"image\", mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ModelOp(model=model, inputs=\"image\", outputs=(\"feat_seg\", \"feat_cls_list\", \"feat_kernel_list\")),\n LambdaOp(fn=lambda x: x, inputs=\"feat_cls_list\", outputs=(\"cls1\", \"cls2\", \"cls3\", \"cls4\", \"cls5\")),\n LambdaOp(fn=lambda x: x, inputs=\"feat_kernel_list\", outputs=(\"k1\", \"k2\", \"k3\", \"k4\", \"k5\")),\n Solov2Loss(0, 40, inputs=(\"mask\", \"classes\", \"gt_match\", \"feat_seg\", \"cls1\", \"k1\"), outputs=(\"l_c1\", \"l_s1\")),\n Solov2Loss(1, 36, inputs=(\"mask\", \"classes\", \"gt_match\", \"feat_seg\", \"cls2\", \"k2\"), outputs=(\"l_c2\", \"l_s2\")),\n Solov2Loss(2, 24, inputs=(\"mask\", \"classes\", \"gt_match\", \"feat_seg\", \"cls3\", \"k3\"), outputs=(\"l_c3\", \"l_s3\")),\n Solov2Loss(3, 16, inputs=(\"mask\", \"classes\", \"gt_match\", \"feat_seg\", \"cls4\", \"k4\"), outputs=(\"l_c4\", \"l_s4\")),\n Solov2Loss(4, 12, inputs=(\"mask\", \"classes\", \"gt_match\", \"feat_seg\", \"cls5\", \"k5\"), outputs=(\"l_c5\", \"l_s5\")),\n CombineLoss(inputs=(\"l_c1\", \"l_s1\", \"l_c2\", \"l_s2\", \"l_c3\", \"l_s3\", \"l_c4\", \"l_s4\", \"l_c5\", \"l_s5\"),\n outputs=(\"total_loss\", \"cls_loss\", \"seg_loss\")),\n L2Regularizaton(inputs=\"total_loss\", outputs=\"total_loss_l2\", model=model, beta=1e-5, mode=\"train\"),\n UpdateOp(model=model, loss_name=\"total_loss_l2\"),\n PointsNMS(inputs=\"feat_cls_list\", outputs=\"feat_cls_list\", mode=\"test\"),\n Predict(inputs=(\"feat_seg\", \"feat_cls_list\", \"feat_kernel_list\"),\n outputs=(\"seg_preds\", \"cate_scores\", \"cate_labels\"),\n mode=\"test\")\n ])\n train_steps_epoch = int(np.ceil(len(train_ds) / batch_size))\n lr_schedule = {\n 1:\n LRScheduler(model=model, lr_fn=lambda step: lr_schedule_warmup(step, init_lr=init_lr)),\n 2:\n LRScheduler(\n model=model,\n lr_fn=lambda step: cosine_decay(step,\n cycle_length=train_steps_epoch * (epochs - 1),\n init_lr=init_lr,\n min_lr=init_lr / 100,\n start=train_steps_epoch + 1))\n }\n traces = [\n EpochScheduler(lr_schedule),\n COCOMaskmAP(data_dir=val_ds.root_dir,\n inputs=(\"seg_preds\", \"cate_scores\", \"cate_labels\", \"image_id\", \"imsize\"),\n mode=\"test\"),\n BestModelSaver(model=model, save_dir=model_dir, metric=\"total_loss\")\n ]\n estimator = fe.Estimator(pipeline=pipeline,\n network=network,\n epochs=epochs,\n traces=traces,\n monitor_names=(\"cls_loss\", \"seg_loss\", \"total_loss\"),\n train_steps_per_epoch=train_steps_per_epoch,\n eval_steps_per_epoch=eval_steps_per_epoch)\n return estimator\n" ]
[ [ "tensorflow.exp", "tensorflow.keras.applications.ResNet50", "tensorflow.losses.BinaryCrossentropy", "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.ones_like", "tensorflow.reshape", "tensorflow.math.count_nonzero", "numpy.where", "tensorflow.keras.Model", "tensorflow.zeros_like", "tensorflow.nn.max_pool2d", "tensorflow.stack", "tensorflow.tile", "scipy.ndimage.measurements.center_of_mass", "tensorflow.cast", "tensorflow.random_normal_initializer", "tensorflow.shape", "tensorflow.concat", "numpy.log", "tensorflow.sigmoid", "tensorflow.transpose", "tensorflow.keras.layers.Conv2D", "numpy.transpose", "numpy.sqrt", "tensorflow.keras.Input", "numpy.array", "tensorflow.zeros", "tensorflow.keras.layers.UpSampling2D", "tensorflow.where", "tensorflow.gather_nd", "tensorflow.keras.layers.MaxPool2D", "tensorflow.fill", "numpy.stack", "tensorflow.reduce_sum", "tensorflow.convert_to_tensor", "tensorflow.linspace", "tensorflow.keras.layers.Input", "tensorflow.argsort", "tensorflow.equal", "tensorflow.reduce_max", "tensorflow.gather", "tensorflow.image.resize", "tensorflow.reduce_mean", "tensorflow.optimizers.SGD", "tensorflow.linalg.band_part" ] ]
Baisal89/ds_8_lamdata
[ "67911b6f15ae6230a65c439a978303ac4b492075" ]
[ "lamdata_baisal89/__init__.py" ]
[ "\"\"\"\nlamdata - a collection of data science helper functions\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n# sample code\nONES = pd.DataFrame(np.ones(10))\nZEROS = pd.DataFrame(np.zeros(50))\n\n#The reson that ONES and ZEROS are capitalized because python convetion\n# global variable are all caps\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
YipengHu/MPHY0041
[ "6e9706eba2b9f9a2449539d7dea5f91dde807584" ]
[ "tutorials/synthesis/visualise.py" ]
[ "# This is part of the tutorial materials in the UCL Module MPHY0041: Machine Learning in Medical Imaging\n# run train_*.py before visualise the results\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nPATH_TO_RESULT = 'result'\n\n# to plot example slices of segmentation results\nfor ext in [\"-tf.npy\",\"-pt.npy\"]: # find all npy files\n files = [f for f in sorted(os.listdir(PATH_TO_RESULT)) if f.endswith(ext)]\n if len(files)==0: continue\n for ii, filename in enumerate(files):\n images = np.load(os.path.join(PATH_TO_RESULT,filename))\n\n for n in range(int(images.shape[0]**(0.5))):\n if images.shape[0] % (n+1) == 0:\n nn = n+1\n\n images = np.reshape(images,(-1,images.shape[1]*nn,images.shape[2]))\n images = np.reshape(np.transpose(images,[0,2,1]),(-1,images.shape[1]))\n\n plt.figure()\n plt.imshow(images,cmap='gray')\n plt.axis('off')\n # plt.show()\n plt.savefig(os.path.join(PATH_TO_RESULT, '{}-{:03d}.jpg'.format(filename.split('.')[0],ii)),bbox_inches='tight')\n plt.close()\n\nprint('Plots saved: {}'.format(os.path.abspath(PATH_TO_RESULT)))\n" ]
[ [ "numpy.reshape", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.transpose", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
greatestgoat/kaggle_petfinder
[ "54ac4e43a3442cfec5a45a0eaced2e09738b08c0" ]
[ "kaggle_petfinder/main.py" ]
[ "from collections import Counter\nfrom functools import partial\nfrom math import sqrt\nfrom pathlib import Path\n\nimport lightgbm as lgb\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nfrom sklearn.decomposition import TruncatedSVD, NMF\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom kaggle_petfinder.utils import is_script_running\n\nON_KAGGLE: bool = is_script_running()\nDATA_ROOT = Path('../input/petfinder-adoption-prediction'\n if ON_KAGGLE else '../resources/petfinder-adoption-prediction')\nEXTRA_DATA_ROOT = Path('../input/extract-image-features-from-pretrained-nn'\n if ON_KAGGLE else '../resources/extract-image-features-from-pretrained-nn')\n\n# basic datasets\ntrain = pd.read_csv(DATA_ROOT / 'train/train.csv')\ntest = pd.read_csv(DATA_ROOT / 'test/test.csv')\nsample_submission = pd.read_csv(DATA_ROOT / 'test/sample_submission.csv')\nlabels_breed = pd.read_csv(DATA_ROOT / 'breed_labels.csv')\nlabels_state = pd.read_csv(DATA_ROOT / 'color_labels.csv')\nlabels_color = pd.read_csv(DATA_ROOT / 'state_labels.csv')\n\n# extract datasets\n# https://www.kaggle.com/christofhenkel/extract-image-features-from-pretrained-nn\ntrain_img_features = pd.read_csv(\n EXTRA_DATA_ROOT / 'train_img_features.csv')\ntest_img_features = pd.read_csv(\n EXTRA_DATA_ROOT / 'test_img_features.csv')\n\n# img_features columns set names\ncol_names = [\"PetID\"] + [\"{}_img_feature\".format(_) for _ in range(256)]\ntrain_img_features.columns = col_names\ntest_img_features.columns = col_names\n\n\ndef agg_features(df_metadata, df_sentiment):\n # Extend aggregates and improve column naming\n aggregates = ['mean', \"median\", 'sum', \"var\", \"std\", \"min\", \"max\", \"nunique\"]\n\n metadata_desc = df_metadata.groupby(['PetID'])['metadata_annots_top_desc'].unique()\n metadata_desc = metadata_desc.reset_index()\n metadata_desc['metadata_annots_top_desc'] = metadata_desc['metadata_annots_top_desc'].apply(\n lambda x: ' '.join(x))\n\n prefix = 'metadata'\n metadata_gr = df_metadata.drop(['metadata_annots_top_desc'], axis=1)\n for i in metadata_gr.columns:\n if 'PetID' not in i:\n metadata_gr[i] = metadata_gr[i].astype(float)\n metadata_gr = metadata_gr.groupby(['PetID']).agg(aggregates)\n metadata_gr.columns = pd.Index(\n ['{}_{}_{}'.format(prefix, c[0], c[1].upper()) for c in metadata_gr.columns.tolist()])\n metadata_gr = metadata_gr.reset_index()\n\n sentiment_desc = df_sentiment.groupby(['PetID'])['sentiment_entities'].unique()\n sentiment_desc = sentiment_desc.reset_index()\n sentiment_desc['sentiment_entities'] = sentiment_desc['sentiment_entities'].apply(\n lambda x: ' '.join(x))\n\n prefix = 'sentiment'\n sentiment_gr = df_sentiment.drop(['sentiment_entities'], axis=1)\n for i in sentiment_gr.columns:\n if 'PetID' not in i:\n sentiment_gr[i] = sentiment_gr[i].astype(float)\n sentiment_gr = sentiment_gr.groupby(['PetID']).agg(aggregates)\n sentiment_gr.columns = pd.Index(['{}_{}_{}'.format(\n prefix, c[0], c[1].upper()) for c in sentiment_gr.columns.tolist()])\n sentiment_gr = sentiment_gr.reset_index()\n\n return sentiment_gr, metadata_gr, metadata_desc, sentiment_desc\n\n\ndef breed_features(df, _labels_breed):\n breed_main = df[['Breed1']].merge(_labels_breed, how='left', left_on='Breed1',\n right_on='BreedID', suffixes=('', '_main_breed'))\n breed_main = breed_main.iloc[:, 2:]\n breed_main = breed_main.add_prefix('main_breed_')\n\n breed_second = df[['Breed2']].merge(_labels_breed, how='left', left_on='Breed2',\n right_on='BreedID', suffixes=('', '_second_breed'))\n breed_second = breed_second.iloc[:, 2:]\n breed_second = breed_second.add_prefix('second_breed_')\n\n return breed_main, breed_second\n\n\ndef impact_coding(data, feature, target='y'):\n '''\n In this implementation we get the values and the dictionary as two different steps.\n This is just because initially we were ignoring the dictionary as a result variable.\n\n In this implementation the KFolds use shuffling. If you want reproducibility the cv\n could be moved to a parameter.\n '''\n n_folds = 20\n n_inner_folds = 10\n impact_coded = pd.Series()\n\n oof_default_mean = data[\n target].mean() # Gobal mean to use by default (you could further tune this)\n kf = KFold(n_splits=n_folds, shuffle=True)\n oof_mean_cv = pd.DataFrame()\n split = 0\n for infold, oof in kf.split(data[feature]):\n impact_coded_cv = pd.Series()\n kf_inner = KFold(n_splits=n_inner_folds, shuffle=True)\n inner_split = 0\n inner_oof_mean_cv = pd.DataFrame()\n oof_default_inner_mean = data.iloc[infold][target].mean()\n for infold_inner, oof_inner in kf_inner.split(data.iloc[infold]):\n # The mean to apply to the inner oof split (a 1/n_folds % based on the rest)\n oof_mean = data.iloc[infold_inner].groupby(by=feature)[target].mean()\n impact_coded_cv = impact_coded_cv.append(data.iloc[infold].apply(\n lambda x: oof_mean[x[feature]]\n if x[feature] in oof_mean.index\n else oof_default_inner_mean\n , axis=1))\n\n # Also populate mapping (this has all group -> mean for all inner CV folds)\n inner_oof_mean_cv = inner_oof_mean_cv.join(pd.DataFrame(oof_mean), rsuffix=inner_split,\n how='outer')\n inner_oof_mean_cv.fillna(value=oof_default_inner_mean, inplace=True)\n inner_split += 1\n\n # Also populate mapping\n oof_mean_cv = oof_mean_cv.join(pd.DataFrame(inner_oof_mean_cv), rsuffix=split, how='outer')\n oof_mean_cv.fillna(value=oof_default_mean, inplace=True)\n split += 1\n\n impact_coded = impact_coded.append(data.iloc[oof].apply(\n lambda x: inner_oof_mean_cv.loc[x[feature]].mean()\n if x[feature] in inner_oof_mean_cv.index\n else oof_default_mean\n , axis=1))\n\n return impact_coded, oof_mean_cv.mean(axis=1), oof_default_mean\n\n\ndef frequency_encoding(df, col_name):\n new_name = \"{}_counts\".format(col_name)\n new_col_name = \"{}_freq\".format(col_name)\n grouped = df.groupby(col_name).size().reset_index(name=new_name)\n df = df.merge(grouped, how=\"left\", on=col_name)\n df[new_col_name] = df[new_name] / df[new_name].count()\n del df[new_name]\n return df\n\n\n# FROM: https://www.kaggle.com/myltykritik/simple-lgbm-image-features\n\n# The following 3 functions have been taken from Ben Hamner's github repository\n# https://github.com/benhamner/Metrics\ndef confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):\n \"\"\"\n Returns the confusion matrix between rater's ratings\n \"\"\"\n assert (len(rater_a) == len(rater_b))\n if min_rating is None:\n min_rating = min(rater_a + rater_b)\n if max_rating is None:\n max_rating = max(rater_a + rater_b)\n num_ratings = int(max_rating - min_rating + 1)\n conf_mat = [[0 for i in range(num_ratings)]\n for j in range(num_ratings)]\n for a, b in zip(rater_a, rater_b):\n conf_mat[a - min_rating][b - min_rating] += 1\n return conf_mat\n\n\ndef histogram(ratings, min_rating=None, max_rating=None):\n \"\"\"\n Returns the counts of each type of rating that a rater made\n \"\"\"\n if min_rating is None:\n min_rating = min(ratings)\n if max_rating is None:\n max_rating = max(ratings)\n num_ratings = int(max_rating - min_rating + 1)\n hist_ratings = [0 for x in range(num_ratings)]\n for r in ratings:\n hist_ratings[r - min_rating] += 1\n return hist_ratings\n\n\ndef quadratic_weighted_kappa(y, y_pred):\n \"\"\"\n Calculates the quadratic weighted kappa\n axquadratic_weighted_kappa calculates the quadratic weighted kappa\n value, which is a measure of inter-rater agreement between two raters\n that provide discrete numeric ratings. Potential values range from -1\n (representing complete disagreement) to 1 (representing complete\n agreement). A kappa value of 0 is expected if all agreement is due to\n chance.\n quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b\n each correspond to a list of integer ratings. These lists must have the\n same length.\n The ratings should be integers, and it is assumed that they contain\n the complete range of possible ratings.\n quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating\n is the minimum possible rating, and max_rating is the maximum possible\n rating\n \"\"\"\n rater_a = y\n rater_b = y_pred\n min_rating = None\n max_rating = None\n rater_a = np.array(rater_a, dtype=int)\n rater_b = np.array(rater_b, dtype=int)\n assert (len(rater_a) == len(rater_b))\n if min_rating is None:\n min_rating = min(min(rater_a), min(rater_b))\n if max_rating is None:\n max_rating = max(max(rater_a), max(rater_b))\n conf_mat = confusion_matrix(rater_a, rater_b,\n min_rating, max_rating)\n num_ratings = len(conf_mat)\n num_scored_items = float(len(rater_a))\n\n hist_rater_a = histogram(rater_a, min_rating, max_rating)\n hist_rater_b = histogram(rater_b, min_rating, max_rating)\n\n numerator = 0.0\n denominator = 0.0\n\n for i in range(num_ratings):\n for j in range(num_ratings):\n expected_count = (hist_rater_a[i] * hist_rater_b[j]\n / num_scored_items)\n d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)\n numerator += d * conf_mat[i][j] / num_scored_items\n denominator += d * expected_count / num_scored_items\n\n return (1.0 - numerator / denominator)\n\n\nclass OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _kappa_loss(self, coef, X, y):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n\n ll = quadratic_weighted_kappa(y, X_p)\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._kappa_loss, X=X, y=y)\n initial_coef = [0.5, 1.5, 2.5, 3.5]\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\n\n def predict(self, X, coef):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n return X_p\n\n def coefficients(self):\n return self.coef_['x']\n\n\ndef rmse(actual, predicted):\n return sqrt(mean_squared_error(actual, predicted))\n\n\ndef train_lightgbm(X_train, X_test, params, n_splits, num_rounds, verbose_eval, early_stop):\n kfold = StratifiedKFold(n_splits=n_splits, random_state=1337)\n oof_train = np.zeros((X_train.shape[0]))\n oof_test = np.zeros((X_test.shape[0], n_splits))\n\n i = 0\n for train_index, valid_index in kfold.split(X_train, X_train['AdoptionSpeed'].values):\n X_tr = X_train.iloc[train_index, :]\n X_val = X_train.iloc[valid_index, :]\n\n y_tr = X_tr['AdoptionSpeed'].values\n X_tr = X_tr.drop(['AdoptionSpeed'], axis=1)\n\n y_val = X_val['AdoptionSpeed'].values\n X_val = X_val.drop(['AdoptionSpeed'], axis=1)\n\n print('\\ny_tr distribution: {}'.format(Counter(y_tr)))\n\n d_train = lgb.Dataset(X_tr, label=y_tr)\n d_valid = lgb.Dataset(X_val, label=y_val)\n watchlist = [d_train, d_valid]\n\n print('training LGB:')\n model = lgb.train(params,\n train_set=d_train,\n num_boost_round=num_rounds,\n valid_sets=watchlist,\n verbose_eval=verbose_eval,\n early_stopping_rounds=early_stop)\n\n val_pred = model.predict(X_val, num_iteration=model.best_iteration)\n test_pred = model.predict(X_test, num_iteration=model.best_iteration)\n\n oof_train[valid_index] = val_pred\n oof_test[:, i] = test_pred\n\n i += 1\n\n return oof_train, oof_test\n\n\ndef main():\n train_pet_ids = train.PetID.unique()\n test_pet_ids = test.PetID.unique()\n\n train_proc = train.copy()\n test_proc = test.copy()\n\n train_proc = pd.merge(train_proc, train_img_features, on=\"PetID\")\n test_proc = pd.merge(test_proc, test_img_features, on=\"PetID\")\n\n train_breed_main, train_breed_second = breed_features(train_proc, labels_breed)\n train_proc = pd.concat([train_proc, train_breed_main, train_breed_second], axis=1)\n\n test_breed_main, test_breed_second = breed_features(test_proc, labels_breed)\n test_proc = pd.concat([test_proc, test_breed_main, test_breed_second], axis=1)\n\n X = pd.concat([train_proc, test_proc], ignore_index=True, sort=False)\n column_types = X.dtypes\n\n int_cols = column_types[column_types == 'int']\n float_cols = column_types[column_types == 'float']\n cat_cols = column_types[column_types == 'object']\n\n X_temp = X.copy()\n\n text_columns = ['Description']\n categorical_columns = ['main_breed_BreedName', 'second_breed_BreedName']\n\n to_drop_columns = ['PetID', 'Name', 'RescuerID']\n\n rescuer_count = X.groupby(['RescuerID'])['PetID'].count().reset_index()\n rescuer_count.columns = ['RescuerID', 'RescuerID_COUNT']\n\n X_temp = X_temp.merge(rescuer_count, how='left', on='RescuerID')\n\n for i in categorical_columns:\n X_temp.loc[:, i] = pd.factorize(X_temp.loc[:, i])[0]\n\n X_text = X_temp[text_columns]\n\n for i in X_text.columns:\n X_text.loc[:, i] = X_text.loc[:, i].fillna('<MISSING>')\n\n n_components = 5\n text_features = []\n\n # Generate text features:\n for i in X_text.columns:\n # Initialize decomposition methods:\n print('generating features from: {}'.format(i))\n svd_ = TruncatedSVD(\n n_components=n_components, random_state=1337)\n nmf_ = NMF(\n n_components=n_components, random_state=1337)\n\n tfidf_col = TfidfVectorizer().fit_transform(X_text.loc[:, i].values)\n svd_col = svd_.fit_transform(tfidf_col)\n svd_col = pd.DataFrame(svd_col)\n svd_col = svd_col.add_prefix('SVD_{}_'.format(i))\n\n nmf_col = nmf_.fit_transform(tfidf_col)\n nmf_col = pd.DataFrame(nmf_col)\n nmf_col = nmf_col.add_prefix('NMF_{}_'.format(i))\n\n text_features.append(svd_col)\n text_features.append(nmf_col)\n\n # Combine all extracted features:\n text_features = pd.concat(text_features, axis=1)\n\n # Concatenate with main DF:\n X_temp = pd.concat([X_temp, text_features], axis=1)\n\n # Remove raw text columns:\n for i in X_text.columns:\n X_temp = X_temp.drop(i, axis=1)\n\n X_temp[\"name_length\"] = X_temp.Name[X_temp.Name.isnull()].map(lambda x: len(str(x)))\n X_temp[\"name_length\"] = X_temp.Name.map(lambda x: len(str(x)))\n X_temp = X_temp.drop(to_drop_columns, axis=1)\n\n # Split into train and test again:\n X_train = X_temp.loc[np.isfinite(X_temp.AdoptionSpeed), :]\n X_test = X_temp.loc[~np.isfinite(X_temp.AdoptionSpeed), :]\n\n # Remove missing target column from test:\n X_test = X_test.drop(['AdoptionSpeed'], axis=1)\n\n print('X_train shape: {}'.format(X_train.shape))\n print('X_test shape: {}'.format(X_test.shape))\n\n assert X_train.shape[0] == train.shape[0]\n assert X_test.shape[0] == test.shape[0]\n\n # Check if columns between the two DFs are the same:\n train_cols = X_train.columns.tolist()\n train_cols.remove('AdoptionSpeed')\n\n test_cols = X_test.columns.tolist()\n\n np.random.seed(13)\n\n categorical_features = [\"Type\", \"Breed1\", \"Breed2\", \"Color1\", \"Color2\", \"Color3\", \"State\"]\n\n impact_coding_map = {}\n for f in categorical_features:\n print(\"Impact coding for {}\".format(f))\n X_train[\n \"impact_encoded_{}\".format(f)], impact_coding_mapping, default_coding = impact_coding(\n X_train, f, target=\"AdoptionSpeed\")\n impact_coding_map[f] = (impact_coding_mapping, default_coding)\n mapping, default_mean = impact_coding_map[f]\n X_test[\"impact_encoded_{}\".format(f)] = X_test.apply(\n lambda x: mapping[x[f]] if x[f] in mapping\n else default_mean, axis=1)\n\n for cat in categorical_features:\n X_train = frequency_encoding(X_train, cat)\n X_test = frequency_encoding(X_test, cat)\n\n params = {'application': 'regression',\n 'boosting': 'gbdt',\n 'metric': 'rmse',\n 'num_leaves': 70,\n 'max_depth': 9,\n 'learning_rate': 0.01,\n 'bagging_fraction': 0.85,\n 'feature_fraction': 0.8,\n 'min_split_gain': 0.02,\n 'min_child_samples': 150,\n 'min_child_weight': 0.02,\n 'lambda_l2': 0.0475,\n 'verbosity': -1,\n 'data_random_seed': 17}\n\n # Additional parameters:\n early_stop = 500\n verbose_eval = 100\n num_rounds = 10000\n n_splits = 5\n\n oof_train, oof_test = train_lightgbm(X_train, X_test, params, n_splits, num_rounds,\n verbose_eval, early_stop)\n optR = OptimizedRounder()\n optR.fit(oof_train, X_train['AdoptionSpeed'].values)\n coefficients = optR.coefficients()\n pred_test_y_k = optR.predict(oof_train, coefficients)\n print(\"\\nValid Counts = \", Counter(X_train['AdoptionSpeed'].values))\n print(\"Predicted Counts = \", Counter(pred_test_y_k))\n print(\"Coefficients = \", coefficients)\n qwk = quadratic_weighted_kappa(X_train['AdoptionSpeed'].values, pred_test_y_k)\n print(\"QWK = \", qwk)\n\n # Manually adjusted coefficients:\n coefficients_ = coefficients.copy()\n\n coefficients_[0] = 1.645\n coefficients_[1] = 2.115\n coefficients_[3] = 2.84\n\n train_predictions = optR.predict(oof_train, coefficients_).astype(int)\n print('train pred distribution: {}'.format(Counter(train_predictions)))\n\n test_predictions = optR.predict(oof_test.mean(axis=1), coefficients_)\n print('test pred distribution: {}'.format(Counter(test_predictions)))\n\n # Generate submission:\n submission = pd.DataFrame(\n {'PetID': test['PetID'].values, 'AdoptionSpeed': test_predictions.astype(np.int32)})\n submission.head()\n submission.to_csv('submission.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "sklearn.metrics.mean_squared_error", "sklearn.model_selection.StratifiedKFold", "numpy.zeros", "pandas.merge", "pandas.concat", "pandas.DataFrame", "numpy.random.seed", "numpy.copy", "pandas.factorize", "sklearn.decomposition.NMF", "sklearn.feature_extraction.text.TfidfVectorizer", "pandas.Series", "sklearn.model_selection.KFold", "sklearn.decomposition.TruncatedSVD", "pandas.read_csv", "numpy.isfinite", "scipy.optimize.minimize" ] ]
maxim-borisyak/craynn
[ "fceabd33f5969033fb3605f894778c42c42f3e08" ]
[ "craynn/viz/img_watcher.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n__all__ = [\n 'ImgWatcher', 'SImgWatcher'\n]\ntry:\n from IPython import display\nexcept ImportError:\n display = None\n\nclass ImgWatcher(object):\n def __init__(self,\n n_rows=3, img_size=(128, 128), cmap1=plt.cm.gray_r, cmap2=plt.cm.gray_r, fig_size=3,\n vmin=None, vmax=None):\n self.fig = plt.figure(figsize=(fig_size * 2 + 1, fig_size * n_rows + n_rows - 1))\n self.vmin = vmin\n self.vmax = vmax\n\n def add_image(j, cmap):\n ax = self.fig.add_subplot(n_rows, 2, j)\n ax.grid('off')\n im = ax.imshow(\n np.random.uniform(size=img_size), interpolation='None', cmap=cmap,\n vmin=vmin, vmax=vmax\n )\n cb = self.fig.colorbar(im)\n return im, cb\n\n self.first_column = [\n add_image(i * 2 + 1, cmap1)\n for i in range(n_rows)\n ]\n\n self.second_column = [\n add_image(i * 2 + 2, cmap2)\n for i in range(n_rows)\n ]\n\n def draw(self, imgs1, imgs2):\n for col, imgs in zip([self.first_column, self.second_column], [imgs1, imgs2]):\n for i, (im, cb) in enumerate(col):\n img = imgs[i]\n im.set_data(img)\n\n vmin = self.vmin if self.vmin is not None else np.min(img)\n vmax = self.vmax if self.vmax is not None else np.max(img)\n\n im.set_clim(vmin, vmax)\n cb.set_clim(vmin, vmax)\n cb.update_normal(im)\n\n self.fig.canvas.draw()\n\nclass SImgWatcher(object):\n def __init__(self,\n n_rows=3, img_size=(128, 128), cmap1=plt.cm.gray_r, cmap2=plt.cm.gray_r, fig_size=3,\n vmin=None, vmax=None):\n self.fig = plt.figure(figsize=(fig_size * 2 + 1, fig_size * n_rows + n_rows - 1))\n self.vmin = vmin\n self.vmax = vmax\n\n def add_image(j, cmap):\n ax = self.fig.add_subplot(n_rows, 2, j)\n ax.grid('off')\n im = ax.imshow(\n np.random.uniform(size=img_size), interpolation='None', cmap=cmap,\n vmin=vmin, vmax=vmax\n )\n cb = self.fig.colorbar(im)\n return im, cb\n\n self.first_column = [\n add_image(i * 2 + 1, cmap1)\n for i in range(n_rows)\n ]\n\n self.second_column = [\n add_image(i * 2 + 2, cmap2)\n for i in range(n_rows)\n ]\n\n def draw(self, imgs1, imgs2):\n display.clear_output(wait=True)\n \n for col, imgs in zip([self.first_column, self.second_column], [imgs1, imgs2]):\n for i, (im, cb) in enumerate(col):\n img = imgs[i]\n im.set_data(img)\n\n vmin = self.vmin if self.vmin is not None else np.min(img)\n vmax = self.vmax if self.vmax is not None else np.max(img)\n\n im.set_clim(vmin, vmax)\n cb.set_clim(vmin, vmax)\n cb.update_normal(im)\n\n display.display(self.fig)" ]
[ [ "numpy.max", "numpy.random.uniform", "numpy.min", "matplotlib.pyplot.figure" ] ]
lauracanalini/eddl
[ "c5efac642e8e1f99b31dfaaacd0a5a058b09923b" ]
[ "scripts/tests/py_onnx/pytorch/export_scripts/lstm_enc_dec_mnist_pytorch_export.py" ]
[ "from __future__ import print_function\nimport os\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # Encoder\n self.lstm_enc = nn.LSTM(28, 128)\n # Decoder\n self.lstm_dec = nn.LSTM(28, 128)\n self.dense = nn.Linear(128, 28)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, x2):\n # Encoder\n x = x.permute(1, 0, 2)\n enc_out, hidden_enc = self.lstm_enc(x)\n # Decoder\n x2 = x2.permute(1, 0, 2)\n dec_out, hidden_dec = self.lstm_dec(x2, hidden_enc)\n x = self.dense(dec_out)\n out = self.sigmoid(x)\n out = out.permute(1, 0, 2)\n return out\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n loss_acc = 0\n current_samples = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data = data.to(device)\n data_dec = torch.nn.functional.pad(data, (0, 0, 1, 0))[:,:-1,:] # Shifted data\n data_el_size = data_dec.size(1) * data.size(2) # 28 * 28 for mnist\n optimizer.zero_grad()\n output = model(data, data_dec)\n loss = F.mse_loss(output, data, reduction='sum') \n loss.backward()\n loss_acc += loss.item() / data_el_size\n current_samples += data.size(0)\n optimizer.step()\n if batch_idx % 10 == 0:\n print('\\rTrain Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss_acc / current_samples))\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n current_samples = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n data_dec = torch.nn.functional.pad(data, (0, 0, 1, 0))[:,:-1,:] # Shifted data\n data_el_size = data.size(1) * data.size(2) # 28 * 28 for mnist\n output = model(data, data_dec)\n test_loss += F.mse_loss(output, data, reduction='sum').item() / data_el_size\n current_samples += data.size(0)\n\n test_loss = test_loss / current_samples\n print(f'\\nTest set: Average loss: {test_loss:.4f}\\n')\n\n return test_loss\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch LSTM encoder-decoder MNIST Example')\n parser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training (default: 100)')\n parser.add_argument('--epochs', type=int, default=5, metavar='N',\n help='number of epochs to train (default: 5)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--output-path', type=str, default=\"onnx_models/lstm_enc_dec_mnist.onnx\",\n help='Output path to store the onnx file')\n parser.add_argument('--output-metric', type=str, default=\"\",\n help='Output file path to store the metric value obtained in test set')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'batch_size': args.batch_size}\n if use_cuda:\n kwargs.update({'num_workers': 2,\n 'pin_memory': True,\n 'shuffle': True})\n\n # Create data preprocessing functions\n class remove_ch(object):\n ''' Custom transform to preprocess data'''\n def __call__(self, img):\n return img.view((28, 28))\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n remove_ch()\n ])\n\n # Prepare data generators\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n train_loader = torch.utils.data.DataLoader(dataset1, drop_last=False, **kwargs)\n test_loader = torch.utils.data.DataLoader(dataset2, drop_last=False, **kwargs)\n\n model = Net().to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # Train\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test_loss = test(model, device, test_loader)\n\n # In case of providing output metric file, store the test accuracy value\n if args.output_metric != \"\":\n with open(args.output_metric, 'w') as ofile:\n ofile.write(str(test_loss))\n\n # Save to ONNX file\n # Input Encoder\n dummy_input = torch.randn(args.batch_size, 28, 28, device=device)\n # Input decoder\n dummy_input2 = torch.randn(args.batch_size, 28, 28, device=device)\n torch.onnx._export(model, (dummy_input, dummy_input2), args.output_path, keep_initializers_as_inputs=True)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.nn.LSTM", "torch.onnx._export", "torch.nn.Sigmoid", "torch.no_grad", "torch.manual_seed", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.functional.pad", "torch.randn" ] ]
whatnick/datacube-wps
[ "753a8de5e3d16b9897adc3ace8deec985ca84448" ]
[ "datacube_wps/processes/fcdrill.py" ]
[ "from timeit import default_timer\nimport multiprocessing\n\nimport altair\nimport xarray\nimport numpy as np\n\nfrom dask.distributed import Client\n\nfrom datacube.storage.masking import make_mask\n\nfrom pywps import LiteralOutput, ComplexOutput\n\nfrom . import PolygonDrill, log_call, chart_dimensions, FORMATS\n\n\nclass FCDrill(PolygonDrill):\n SHORT_NAMES = ['BS', 'PV', 'NPV', 'Unobservable']\n LONG_NAMES = ['Bare Soil',\n 'Photosynthetic Vegetation',\n 'Non-Photosynthetic Vegetation',\n 'Unobservable']\n\n def output_formats(self):\n return [LiteralOutput(\"image\", \"Fractional Cover Drill Preview\"),\n LiteralOutput(\"url\", \"Fractional Cover Drill Chart\"),\n ComplexOutput('timeseries', 'Fractional Cover Polygon Drill Timeseries', supported_formats=[FORMATS['output_json']])]\n\n @log_call\n def process_data(self, data):\n wofs_mask_flags = [\n dict(dry=True),\n dict(terrain_or_low_angle=False, high_slope=False, cloud_shadow=False, cloud=False, sea=False)\n ]\n\n water = data.data_vars['water']\n data = data.drop(['water'])\n\n total = data.count(dim=['x', 'y'])\n total_valid = (data != -1).sum(dim=['x', 'y'])\n\n # TODO enable this check, investigate why it fails\n # if total_valid <= 0:\n # raise ProcessError('query returned no data')\n\n for m in wofs_mask_flags:\n mask = make_mask(water, **m)\n data = data.where(mask)\n\n total_invalid = (np.isnan(data)).sum(dim=['x', 'y'])\n not_pixels = total_valid - (total - total_invalid)\n\n # following robbi's advice, cast the dataset to a dataarray\n maxFC = data.to_array(dim='variable', name='maxFC')\n\n # turn FC array into integer only as nanargmax doesn't seem to handle floats the way we want it to\n FC_int = maxFC.astype('int16')\n\n # use numpy.nanargmax to get the index of the maximum value along the variable dimension\n # BSPVNPV=np.nanargmax(FC_int, axis=0)\n BSPVNPV = FC_int.argmax(dim='variable')\n\n FC_mask = xarray.ufuncs.isfinite(maxFC).all(dim='variable') # pylint: disable=no-member\n\n # #re-mask with nans to remove no-data\n BSPVNPV = BSPVNPV.where(FC_mask)\n\n FC_dominant = xarray.Dataset({\n 'BS': (BSPVNPV == 0).where(FC_mask),\n 'PV': (BSPVNPV == 1).where(FC_mask),\n 'NPV': (BSPVNPV == 2).where(FC_mask)\n })\n\n FC_count = FC_dominant.sum(dim=['x', 'y'])\n\n # Fractional cover pixel count method\n # Get number of FC pixels, divide by total number of pixels per polygon\n new_ds = xarray.Dataset({\n 'BS': (FC_count.BS / total_valid)['BS'] * 100,\n 'PV': (FC_count.PV / total_valid)['PV'] * 100,\n 'NPV': (FC_count.NPV / total_valid)['NPV'] * 100,\n 'Unobservable': (not_pixels / total_valid)['BS'] * 100\n })\n\n print('calling dask with', multiprocessing.cpu_count(), 'processes')\n dask_time = default_timer()\n with Client(threads_per_worker=1):\n new_ds = new_ds.compute()\n print('dask took', default_timer() - dask_time, 'seconds')\n print(new_ds)\n\n df = new_ds.to_dataframe()\n df = df.drop('spatial_ref', axis=1)\n df.reset_index(inplace=True)\n return df\n\n def render_chart(self, df):\n width, height = chart_dimensions(self.style)\n\n melted = df.melt('time', var_name='Cover Type', value_name='Area')\n melted = melted.dropna()\n\n style = self.style['table']['columns']\n\n chart = altair.Chart(melted,\n width=width,\n height=height,\n title='Percentage of Area - Fractional Cover')\n chart = chart.mark_area()\n chart = chart.encode(x='time:T',\n y=altair.Y('Area:Q', stack='normalize'),\n color=altair.Color('Cover Type:N',\n scale=altair.Scale(domain=self.SHORT_NAMES,\n range=[style[name]['chartLineColor']\n for name in self.LONG_NAMES])),\n tooltip=[altair.Tooltip(field='time', format='%d %B, %Y', title='Date', type='temporal'),\n 'Area:Q',\n 'Cover Type:N'])\n\n return chart\n\n def render_outputs(self, df, chart):\n return super().render_outputs(df, chart, is_enabled=True, name=\"FC\",\n header=self.LONG_NAMES)\n" ]
[ [ "numpy.isnan" ] ]
Yacnnn/MVGCCA
[ "84df55790257a489a4370ac4ce4d64724f517462" ]
[ "models/algebraic.py" ]
[ "import numpy as np\n###################### Models GPCA ###################### \n#GPCA #PCA if beta = 0 #LC if beta = 1\ndef gpca(X,W,beta,n_components):\n X = X - np.mean(X,axis=0)\n X = np.transpose(X)\n k = n_components\n XtX = np.transpose(X) @ X\n I = np.eye(XtX.shape[0])\n L = np.diag(np.sum(W,axis=0)) - W\n lambda_n = np.max(np.linalg.eigvals(XtX))\n epsilon_n = np.max(np.linalg.eigvals(L))\n G = (1-beta)*(I - XtX/lambda_n)+beta*L/epsilon_n \n g_eigvalues, g_eigvec = np.linalg.eig(G)\n g_eigvalues = np.real(g_eigvalues)\n g_eigvec = np.real(g_eigvec)\n increasing_order_eigvalues = np.argsort( g_eigvalues)\n Q = g_eigvec[:,increasing_order_eigvalues[:k]]\n U = X @ Q \n return Q, U\n\n###################### Models GMCCA ###################### \n#GMCCA #MCCA if gamma = 0\ndef gmmca(X,W,gamma,n_components):\n r = 1e-4\n try:\n nview = X.shape[0]\n except:\n nview = len(X)\n X = [ np.transpose( X[k] - np.mean(X[k],axis=0) )for k in range(nview) ]\n Xt = [np.transpose(x) for x in X ]\n inv_X_Xt = [np.linalg.inv(X[k] @ Xt[k] + r*np.eye(X[k].shape[0])) for k in range(len(X))]\n L = np.diag(np.sum(W,axis=0)) - W\n C = np.sum([ Xt[k] @ inv_X_Xt[k] @ X[k] for k in range(len(X))],axis=0) - gamma * L\n g_eigvalues, g_eigvec = np.linalg.eigh(C)\n decreasing_order_eigvalues = np.argsort( - g_eigvalues)\n St = g_eigvec[:,decreasing_order_eigvalues[:n_components]]\n U = [ inv_X_Xt[k] @ X[k] @ St for k in range(len(X))]\n Ux = [ Xt[k] @ U[k] for k in range(len(X))]\n return St, U, Ux\n" ]
[ [ "numpy.sum", "numpy.linalg.eigh", "numpy.real", "numpy.mean", "numpy.eye", "numpy.linalg.eig", "numpy.transpose", "numpy.argsort", "numpy.linalg.eigvals" ] ]
b-sherson/hs-logger
[ "537865e44c93a4d234c9a96e9ad784a735869bcc" ]
[ "hs-logger/drivers/HP34420A_HP34970A.py" ]
[ "import pyvisa as visa\nimport numpy as np\nimport json\nimport time\n\n\nclass HP34420A_HP34970A(object):\n def __init__(self, spec):\n rm = visa.ResourceManager()\n self.spec = spec\n self.hp420A = rm.open_resource(spec['port_bridge'])\n self.hp970a = rm.open_resource(spec['port_scanner'])\n\n def read_instrument(self, op_id):\n op = self.spec[\"operations\"][op_id]\n channel = op[\"channel\"]\n nplc = op[\"nplc\"]\n self.configure_nlpc(nplc)\n val = self.read_channel(channel)\n val = np.float64(val)\n val_trans = self.transform(val, op)\n return val, val_trans\n\n def transform(self, data, operation):\n # Bridge transform\n eqb = self.spec.get(\"bridge_transform\", [0, 0])\n x = eqb[0] + (1 + eqb[1]) * data\n # x = data\n eq = operation.get(\"transform_eq\", ['V', 0, 1, 0, 0])\n if eq[0] == 'T': # Callendar-Van Dusen equation\n if np.isnan(eq[1:4]).any() or np.isinf(eq[1:4]).any() or np.isnan(x) or np.isinf(x):\n transformed = float(\"NaN\")\n else:\n if x < eq[4]:\n fulltransformed = np.roots([eq[3], -100 * eq[3], eq[2], eq[1], (1 - (x / eq[4]))])\n else:\n fulltransformed = np.roots([eq[2], eq[1], (1 - (x / eq[4]))])\n transformed = float(\"inf\") # Create a maximum value\n for j in fulltransformed:\n if np.imag(j) == 0:\n if abs(j) < transformed:\n transformed = np.real(j)\n elif abs(j) == transformed and j > transformed:\n transformed = np.real(j)\n if np.isinf(transformed):\n print(\"Invalid Callendar–Van Dusen equation: No real solutions for\")\n print(\"R = {}, R0 = {}, A = {}, B = {}, C = {}\".format(x, eq[4], eq[1], eq[2], eq[3]))\n transformed = float(\"NaN\")\n elif eq[0] == 'V' or eq[0] == 'P':\n transformed = eq[1] + eq[2]*x + eq[3]*x**2 + eq[4]*x**3\n else:\n print(\"Transform form not recognised: {}\".format(eq[0]))\n raise ValueError\n # c = operation.get(\"transform_coeff\", None)\n # transformed = eval(eq)\n return transformed\n\n def read_channel(self, channel):\n self.switch_scanner_channel(channel)\n return self.read()\n\n def configure_nlpc(self, nplc):\n assert nplc in [0.02, 0.2, 1, 2, 10, 20, 100, 200, 'MIN', 'MAX']\n self.hp420A.write(\"VOLT:NPLC {}\".format(nplc))\n\n def read(self):\n return self.hp420A.query(\"READ?\")\n\n def write(self, arg):\n self.hp420A.write(arg)\n\n def switch_scanner_channel(self, channel):\n self.hp970a.write(\"MEAS:VOLT:DC? (@{})\".format(channel))\n val = self.hp970a.read()\n\n\ndef main():\n inst = HP34420A_HP34970A(json.load(open('../instruments/HP34420A_HP34970A.json')))\n print(inst.read_instrument('read_px120'))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.isinf", "numpy.isnan", "numpy.roots", "numpy.real", "numpy.float64", "numpy.imag" ] ]
babypandas-dev/babypandas
[ "a1de7f4bf8ead007b03150a4ad28edf6be4aa14a" ]
[ "tests/test_df.py" ]
[ "import pytest\nimport doctest\nimport re\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport babypandas.bpd as bpd\nimport pandas as pd\n\n#########\n# Utils #\n#########\n\[email protected](scope='function')\ndef dfs():\n inputs = []\n # df1 input: Strings and numbers DataFrame\n inputs.append({'data': {'letter': ['a', 'b', 'c'],\n 'count': [9,3,3],\n 'idx': [0,1,2]}})\n # df2 input: All numbers DataFrame\n inputs.append({'data': {'col1': [5, 2, 7, 5],\n 'col2': [2, 7, 1, 8],\n 'col3': [6, 6, 1, 3],\n 'col4': [5, 5, 5, 9]}})\n # df3 input: DataFrame with groups\n inputs.append({'data': {'name': ['dog', 'cat', 'pidgeon', 'chicken', 'snake'],\n 'kind': ['mammal', 'mammal', 'bird', 'bird', 'reptile']}})\n # df4 input: DataFrame for merge\n inputs.append({'data': {'kind': ['mammal', 'bird', 'reptile'],\n 'short': ['m', 'b', 'r']}})\n # df5 input: DataFrame for append\n inputs.append({'data': {'letter': ['d' ,'e'],\n 'count': [4, 1],\n 'idx': [3, 4]}})\n # df6 input: DataFrame for merge\n inputs.append({'data': {'kind': ['mammal', 'bird', 'reptile'],\n 'len': [6, 4, 7]}})\n\n dct = {}\n for key in range(len(inputs)):\n dct['df{}'.format(key + 1)] = (bpd.DataFrame(**inputs[key]), pd.DataFrame(**inputs[key]))\n return dct\n\ndef assert_df_equal(df, pdf, method=None, **kwargs):\n if method:\n df = getattr(df, method)(**kwargs)\n pdf = getattr(pdf, method)(**kwargs)\n\n assert (np.all(df.columns == pdf.columns)), 'Columns do not match'\n assert (np.all(df.index == pdf.index)), 'Indices do not match'\n assert (np.all(df.values == pdf.values)), 'Values do not match'\n\ndef assert_series_equal(ser, pser, method=None, **kwargs):\n if method:\n ser = getattr(ser, method)(**kwargs)\n pser = getattr(pser, method)(**kwargs)\n\n assert (np.all(ser.index == pser.index)), 'Indices do not match'\n assert (np.all(ser.values == pser.values)), 'Values do not match'\n\n#########\n# Tests #\n#########\n\ndef test_basic(dfs):\n for df, pdf in dfs.values():\n assert_df_equal(df, pdf)\n\ndef test_iloc(dfs):\n for df, pdf in dfs.values():\n assert_series_equal(df.iloc[0], pdf.iloc[0])\n\ndef test_take(dfs):\n for df, pdf in dfs.values():\n indices = np.random.choice(len(pdf), 2, replace=False)\n assert_df_equal(df, pdf, 'take', indices=indices)\n\n # Exceptions\n df1 = dfs['df1'][0]\n assert pytest.raises(TypeError, df1.take, indices=1)\n assert pytest.raises(ValueError, df1.take, indices=['foo'])\n assert pytest.raises(IndexError, df1.take, indices=np.arange(4))\n\ndef test_drop(dfs):\n for df, pdf in dfs.values():\n col = pdf.columns.to_series().sample()\n assert_df_equal(df, pdf, 'drop', columns=col)\n\n # Exceptions\n df1 = dfs['df1'][0]\n assert pytest.raises(TypeError, df1.drop, columns=0)\n assert pytest.raises(KeyError, df1.drop, columns=['count', 'foo'])\n\ndef test_sample(dfs):\n for df, pdf in dfs.values():\n assert_df_equal(df, pdf, 'sample', n=1, random_state=0)\n assert_df_equal(df, pdf, 'sample', n=2, random_state=50)\n\n # Exceptions\n df1 = dfs['df1'][0]\n assert pytest.raises(TypeError, df1.sample, n='foo')\n assert pytest.raises(TypeError, df1.sample, replace='foo')\n assert pytest.raises(TypeError, df1.sample, random_state='foo')\n assert pytest.raises(ValueError, df1.sample, n=8)\n\ndef test_get(dfs):\n for df, pdf in dfs.values():\n key = pdf.columns.to_series().sample(2).values\n assert_series_equal(df, pdf, 'get', key=key[0])\n assert_df_equal(df, pdf, 'get', key=key)\n\n # Exceptions\n df1 = dfs['df1'][0]\n assert pytest.raises(TypeError, df1.get, key=1)\n assert pytest.raises(KeyError, df1.get, key='foo')\n\ndef test_assign(dfs):\n df2, pdf2 = dfs['df2']\n assert_df_equal(df2, pdf2, 'assign', col5=[1, 1, 1, 1])\n\n # Exceptions\n assert pytest.raises(ValueError, df2.assign, col5=[1, 1, 1, 1], col6=[2, 2])\n assert pytest.raises(ValueError, df2.assign, col5=[1, 1])\n\ndef test_apply(dfs):\n df2, pdf2 = dfs['df2']\n f = lambda x: x + 2\n assert_df_equal(df2, pdf2, 'apply', func=f)\n\n # Exceptions\n assert pytest.raises(TypeError, df2.apply, func=2)\n assert pytest.raises(ValueError, df2.apply, func=f, axis=3)\n\ndef test_sort_values(dfs):\n for df, pdf in dfs.values():\n by = pdf.columns.to_series().sample().iloc[0]\n assert_df_equal(df, pdf, by=by)\n assert_df_equal(df, pdf, by=by, ascending=False)\n\n # Exceptions\n df1 = dfs['df1'][0]\n assert pytest.raises(TypeError, df1.sort_values, by=0)\n assert pytest.raises(KeyError, df1.sort_values, by='foo')\n assert pytest.raises(TypeError, df1.sort_values, by='count', ascending='foo')\n\ndef test_describe(dfs):\n for df, pdf in dfs.values():\n assert_df_equal(df, pdf, 'describe')\n\ndef test_groupby(dfs):\n df3, pdf3 = dfs['df3']\n gb = df3.groupby('kind')\n pgb = pdf3.groupby('kind')\n assert isinstance(gb, bpd.DataFrameGroupBy)\n assert_df_equal(gb.sum(), pgb.sum())\n\n # Exceptions\n assert pytest.raises(TypeError, df3.groupby, by=0)\n assert pytest.raises(KeyError, df3.groupby, by='foo')\n\ndef test_reset_index(dfs):\n for df, pdf in dfs.values():\n by = pdf.columns.to_series().sample().iloc[0]\n df = df.sort_values(by=by)\n pdf = pdf.sort_values(by=by)\n assert_df_equal(df, pdf, 'reset_index')\n assert_df_equal(df, pdf, 'reset_index', drop=True)\n\n # Exceptions\n df2 = dfs['df2'][0]\n assert pytest.raises(TypeError, df2.reset_index, drop='foo')\n\ndef test_set_index(dfs):\n for df, pdf in dfs.values():\n keys = pdf.columns.to_series().sample().iloc[0]\n assert_df_equal(df, pdf, 'set_index', keys=keys)\n assert_df_equal(df, pdf, 'set_index', keys=keys, drop=False)\n\n # Exceptions\n df2 = dfs['df2'][0]\n assert pytest.raises(TypeError, df2.set_index, keys=0)\n assert pytest.raises(KeyError, df2.set_index, keys='foo')\n assert pytest.raises(TypeError, df2.set_index, keys='col1', drop='foo')\n\ndef test_merge(dfs):\n df3, pdf3 = dfs['df3']\n df4, pdf4 = dfs['df4']\n assert_df_equal(df3.merge(df4), pdf3.merge(pdf4))\n\n # Exceptions\n assert pytest.raises(TypeError, df3.merge, right=np.array([1, 2, 3]))\n assert pytest.raises(ValueError, df3.merge, right=df4, how='on')\n assert pytest.raises(KeyError, df3.merge, right=df4, on='foo')\n assert pytest.raises(KeyError, df3.merge, right=df4, left_on='kind')\n assert pytest.raises(KeyError, df3.merge, right=df4, left_on='foo', right_on='kind')\n assert pytest.raises(KeyError, df3.merge, right=df4, left_on='kind', right_on='foo')\n\ndef test_merge_on_index(dfs):\n df3, pdf3 = dfs['df3']\n df4, pdf4 = dfs['df4']\n df4 = df4.set_index('kind')\n pdf4 = pdf4.set_index('kind')\n assert_df_equal(df3.merge(df4, left_on='kind', right_index=True), pdf3.merge(pdf4, left_on='kind', right_index=True))\n\ndef test_merge_on_both_index(dfs):\n df4, pdf4 = dfs['df4']\n df6, pdf6 = dfs['df6']\n df4 = df4.set_index('kind')\n pdf4 = pdf4.set_index('kind')\n df6 = df6.set_index('kind')\n pdf6 = pdf6.set_index('kind')\n assert_df_equal(df4.merge(df6, left_index=True, right_index=True), pdf4.merge(pdf6, left_index=True, right_index=True))\n\ndef test_append(dfs):\n df1, pdf1 = dfs['df1']\n df5, pdf5 = dfs['df5']\n assert_df_equal(df1.append(df5), pdf1.append(pdf5))\n assert_df_equal(df1.append(df5, ignore_index=True), pdf1.append(pdf5, ignore_index=True))\n\n # Exceptions\n assert pytest.raises(TypeError, df1.append, right=np.array([1, 2, 3]))\n assert pytest.raises(TypeError, df1.append, right=df5, ignore_index='foo')\n\ndef test_to_numpy(dfs):\n for df, pdf in dfs.values():\n assert isinstance(df.to_numpy(), np.ndarray)\n assert_array_equal(df.to_numpy(), pdf.to_numpy())\n\n\ndef test_indexing(dfs):\n # Check that boolean indexing works as expected.\n bp_df, df = dfs['df2']\n n, p = bp_df.shape\n col1_is_5 = bp_df.get('col1') == 5\n col4_is_5 = bp_df.get('col4') == 5\n # Simple indexing cases, Series, and array.\n for indexer in (col1_is_5, col4_is_5):\n for this_ind in (indexer, np.array(indexer)):\n indexed = bp_df[this_ind]\n assert indexed.shape[0] == np.count_nonzero(this_ind)\n assert list(indexed.index) == list(np.arange(n)[this_ind])\n # Sort Series index, and get the same output (because it depends on the\n # index).\n sorted_indexer = col1_is_5.sort_values()\n with pytest.warns(UserWarning): # Reindex generates warning.\n indexed = bp_df[sorted_indexer]\n assert indexed.shape[0] == 2\n assert list(indexed.index) == [0, 3]\n # Any other type of indexing generates an error\n for indexer in ('col2', ['col1', 'col2'], 2, slice(1, 3),\n (col1_is_5, 'col1')):\n with pytest.raises(IndexError):\n bp_df[indexer]\n" ]
[ [ "numpy.array", "numpy.count_nonzero", "pandas.DataFrame", "numpy.arange", "numpy.all" ] ]
heather999/gcr-catalogs
[ "4ca770e7e1b1bb846240b0ba2316ee0ebc4a65ab" ]
[ "GCRCatalogs/photoz_calibrate.py" ]
[ "\"\"\"\nPZCalibrate reference objects catalog reader\n\nThis reader was designed by Yao-Yuan Mao,\nbased a catalog of \"spectroscopic\" reference objects for use in cross-\ncorrelation redshifts provided by Chris Morrison, in Mar 2019.\n\"\"\"\n\nimport re\nimport os\n\nimport numpy as np\nfrom GCR import BaseGenericCatalog\n\nfrom .utils import first\n\n__all__ = ['PZCalibrateCatalog']\n\nFILE_PATTERN = r'z_(\\d)\\S+healpix_(\\d+)_pz_calib\\.npz$'\n\n\nclass PZCalibrateCatalog(BaseGenericCatalog):\n\n def _subclass_init(self, **kwargs):\n self.base_dir = kwargs['base_dir']\n self._filename_re = re.compile(kwargs.get('filename_pattern', FILE_PATTERN))\n self._healpix_pixels = kwargs.get('healpix_pixels')\n\n self._healpix_files = dict()\n for f in sorted(os.listdir(self.base_dir)):\n m = self._filename_re.match(f)\n if m is None:\n continue\n key = tuple(map(int, m.groups()))\n if self._healpix_pixels and key[1] not in self._healpix_pixels:\n continue\n self._healpix_files[key] = os.path.join(self.base_dir, f)\n\n self._native_filter_quantities = {'healpix_pixel', 'redshift_block_lower'}\n\n self._quantity_dict = {\n \"QSO\": \"Flag selecting QSOs by BlackHoleMass and EddingtonRatio. Objects have a mag/redshift \"\n \"distributions similar to those in DESI and are meant to be used as reference objects \"\n \"in cross-correlation redshift analyses.\",\n \"LRG\": \"Flag selecting LRGs by stellar mass. Objects have a mag/redshift \"\n \"distributions similar to those in DESI and are meant to be used as reference objects \"\n \"in cross-correlation redshift analyses.\",\n \"ELG\": \"Flag selecting ELGs by star formation rate. Objects have a mag/redshift \"\n \"distributions similar to those in DESI and are meant to be used as reference objects \"\n \"in cross-correlation redshift analyses.\",\n \"MagLim\": \"Flag selection all objects R<19.4. Objects have a mag/redshift \"\n \"distributions similar to those in DESI and are meant to be used as reference objects \"\n \"in cross-correlation redshift analyses.\",\n \"AllReferences\": \"Union of QSO, LRG, ELG, and MagLim flags. Objects have a mag/redshift \"\n \"distributions similar to those in DESI and are meant to be used as reference \"\n \"objects in cross-correlation redshift analyses.\",\n }\n \n self._quantity_modifiers = {q: q for q in self._quantity_dict}\n\n def _get_quantity_info_dict(self, quantity, default=None):\n \"\"\"Return a dictionary with descriptive information for a quantity\n\n Returned information includes a quantity description, quantity units, whether\n the quantity is defined in the DPDD, and if the quantity is available in GCRbase.\n\n Args:\n quantity (str): The quantity to return information for\n default (object): Value to return if no information is available (default None)\n\n Returns:\n String describing the quantity.\n \"\"\"\n return self._quantity_dict.get(quantity, default)\n\n def _generate_native_quantity_list(self):\n return list(np.load(first(self._healpix_files.values())).keys())\n\n def _iter_native_dataset(self, native_filters=None):\n for (zlo_this, hpx_this), file_path in self._healpix_files.items():\n d = {'healpix_pixel': hpx_this, 'redshift_block_lower': zlo_this}\n if native_filters is not None and not native_filters.check_scalar(d):\n continue\n yield np.load(file_path).__getitem__\n" ]
[ [ "numpy.load" ] ]
xiaxiaofu/PytorchToCaffe
[ "479a18308903e502f74dce6d2b4c8dede2ef62ee" ]
[ "pytorch_to_caffe.py" ]
[ "import torch\nimport torch.nn as nn\nfrom Caffe import caffe_net\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom Caffe import layer_param\nfrom torch.nn.modules.utils import _pair\nimport numpy as np\n\n\"\"\"\nHow to support a new layer type:\n layer_name=log.add_layer(layer_type_name)\n top_blobs=log.add_blobs(<output of that layer>)\n layer=caffe_net.Layer_param(xxx)\n <set layer parameters>\n [<layer.add_data(*datas)>]\n log.cnet.add_layer(layer)\n\"\"\"\n\n# TODO: support the inplace output of the layers\n\n\nNET_INITTED=False\nclass TransLog(object):\n def __init__(self):\n \"\"\"\n doing init() with inputs Variable before using it\n \"\"\"\n self.layers={}\n self.detail_layers={} \n self.detail_blobs={} \n self._blobs={}\n self._blobs_data=[]\n self.cnet=caffe_net.Caffemodel('')\n self.debug=True\n\n def init(self,inputs):\n \"\"\"\n :param inputs: is a list of input variables\n \"\"\"\n self.add_blobs(inputs)\n def add_layer(self,name='layer'):\n if name in self.layers:\n return self.layers[name]\n if name not in self.detail_layers.keys():\n self.detail_layers[name] =0\n self.detail_layers[name] +=1\n name='{}{}'.format(name,self.detail_layers[name])\n self.layers[name]=name\n if self.debug:\n print(\"{} was added to layers\".format(self.layers[name]))\n return self.layers[name]\n\n def add_blobs(self, blobs,name='blob',with_num=True):\n rst=[]\n for blob in blobs:\n self._blobs_data.append(blob) # to block the memory address be rewrited\n blob=int(id(blob))\n if name not in self.detail_blobs.keys():\n self.detail_blobs[name] =0\n self.detail_blobs[name] +=1 \n if with_num:\n rst.append('{}{}'.format(name,self.detail_blobs[name]))\n else:\n rst.append('{}'.format(name))\n if self.debug:\n print(\"{}:{} was added to blobs\".format(blob,rst[-1]))\n self._blobs[blob]=rst[-1]\n return rst\n def blobs(self, var):\n var=id(var)\n if self.debug:\n print(\"{}:{} getting\".format(var, self._blobs[var]))\n try:\n return self._blobs[var]\n except:\n print(\"WARNING: CANNOT FOUND blob {}\".format(var))\n return None\n\nlog=TransLog()\n\ndef _conv2d(raw,input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):\n x=raw(input,weight,bias,stride,padding,dilation,groups)\n name=log.add_layer(name='conv')\n log.add_blobs([x],name='conv_blob')\n layer=caffe_net.Layer_param(name=name, type='Convolution',\n bottom=[log.blobs(input)], top=[log.blobs(x)])\n layer.conv_param(x.size()[1],weight.size()[2:],stride=_pair(stride),\n pad=_pair(padding),dilation=_pair(dilation),bias_term=bias is not None)\n if bias is not None:\n layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())\n else:\n layer.param.convolution_param.bias_term=False\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _linear(raw,input, weight, bias=None):\n x=raw(input,weight,bias)\n layer_name=log.add_layer(name='fc')\n top_blobs=log.add_blobs([x],name='fc_blob')\n layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',\n bottom=[log.blobs(input)],top=top_blobs)\n layer.fc_param(x.size()[1])\n if bias is not None:\n layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())\n else:\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _split(raw,tensor, split_size, dim=0):\n # split in pytorch is slice in caffe\n x=raw(tensor, split_size, dim)\n layer_name=log.add_layer('split')\n top_blobs=log.add_blobs(x,name='split_blob')\n layer=caffe_net.Layer_param(name=layer_name, type='Slice',\n bottom=[log.blobs(tensor)], top=top_blobs)\n slice_num=int(np.floor(tensor.size()[dim]/split_size))\n slice_param=caffe_net.pb.SliceParameter(axis=dim,slice_point=[split_size*i for i in range(1,slice_num)])\n layer.param.slice_param.CopyFrom(slice_param)\n log.cnet.add_layer(layer)\n return x\n\n\ndef _pool(type,raw,input,x,kernel_size,stride,padding,ceil_mode):\n # TODO dilation,ceil_mode,return indices\n layer_name = log.add_layer(name='{}_pool'.format(type))\n top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))\n layer = caffe_net.Layer_param(name=layer_name, type='Pooling',\n bottom=[log.blobs(input)], top=top_blobs)\n # TODO w,h different kernel, stride and padding\n # processing ceil mode\n layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,\n pad=padding, type=type.upper())\n log.cnet.add_layer(layer)\n if ceil_mode==False and stride is not None:\n oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])\n owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])\n if oheight!=0 or owidth!=0:\n caffe_out=raw(input, kernel_size, stride, padding, ceil_mode=True)\n print(\"WARNING: the output shape miss match at {}: \"\n \n \"input {} output---Pytorch:{}---Caffe:{}\\n\"\n \"This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\\n\"\n \"You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. \".format(layer_name,input.size(),x.size(),caffe_out.size()))\n\ndef _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1,\n ceil_mode=False, return_indices=False):\n x = raw(input, kernel_size, stride, padding, dilation,ceil_mode, return_indices)\n _pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)\n return x\n\ndef _avg_pool2d(raw,input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad = True):\n x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad)\n _pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode)\n return x\n\ndef _max(raw,*args):\n x=raw(*args)\n if len(args)==1:\n # TODO max in one tensor\n assert NotImplementedError\n else:\n bottom_blobs=[]\n for arg in args:\n bottom_blobs.append(log.blobs(arg))\n layer_name=log.add_layer(name='max')\n top_blobs=log.add_blobs([x],name='max_blob')\n layer=caffe_net.Layer_param(name=layer_name,type='Eltwise',\n bottom=bottom_blobs,top=top_blobs)\n layer.param.eltwise_param.operation =2\n log.cnet.add_layer(layer)\n return x\n\ndef _cat(raw,inputs, dim=0):\n x=raw(inputs, dim)\n bottom_blobs=[]\n for input in inputs:\n bottom_blobs.append(log.blobs(input))\n layer_name=log.add_layer(name='cat')\n top_blobs=log.add_blobs([x],name='cat_blob')\n layer=caffe_net.Layer_param(name=layer_name,type='Concat',\n bottom=bottom_blobs,top=top_blobs)\n layer.param.concat_param.axis =dim\n log.cnet.add_layer(layer)\n return x\n\ndef _dropout(raw,input,p=0.5, training=False, inplace=False):\n x=raw(input,p, training, inplace)\n bottom_blobs=[log.blobs(input)]\n layer_name=log.add_layer(name='dropout')\n top_blobs=log.add_blobs([x],name=bottom_blobs[0],with_num=False)\n layer=caffe_net.Layer_param(name=layer_name,type='Dropout',\n bottom=bottom_blobs,top=top_blobs)\n layer.param.dropout_param.dropout_ratio = p\n layer.param.include.extend([caffe_net.pb.NetStateRule(phase=0)]) # 1 for test, 0 for train\n log.cnet.add_layer(layer)\n return x\n\ndef _threshold(raw,input, threshold, value, inplace=False):\n # for threshold or relu\n if threshold==0 and value==0:\n x = raw(input,threshold, value, inplace)\n bottom_blobs=[log.blobs(input)]\n name = log.add_layer(name='relu')\n log.add_blobs([x], name='relu_blob')\n layer = caffe_net.Layer_param(name=name, type='ReLU',\n bottom=bottom_blobs, top=[log.blobs(x)])\n log.cnet.add_layer(layer)\n return x\n if value!=0:\n raise NotImplemented(\"value !=0 not implemented in caffe\")\n x=raw(input,input, threshold, value, inplace)\n bottom_blobs=[log.blobs(input)]\n layer_name=log.add_layer(name='threshold')\n top_blobs=log.add_blobs([x],name='threshold_blob')\n layer=caffe_net.Layer_param(name=layer_name,type='Threshold',\n bottom=bottom_blobs,top=top_blobs)\n layer.param.threshold_param.threshold = threshold\n log.cnet.add_layer(layer)\n return x\n\ndef _prelu(raw, input, weight):\n # for threshold or prelu\n x = raw(input, weight)\n bottom_blobs=[log.blobs(input)]\n name = log.add_layer(name='prelu')\n log.add_blobs([x], name='prelu_blob')\n layer = caffe_net.Layer_param(name=name, type='PReLU',\n bottom=bottom_blobs, top=[log.blobs(x)])\n if weight.size()[0]==1:\n layer.param.prelu_param.channel_shared=True\n layer.add_data(weight.cpu().data.numpy()[0])\n else:\n layer.add_data(weight.cpu().data.numpy())\n log.cnet.add_layer(layer)\n return x\n\ndef _softmax(raw, input, dim=None, _stacklevel=3):\n # for F.softmax\n x=raw(input, dim=dim)\n if dim is None:\n dim=F._get_softmax_dim('softmax', input.dim(), _stacklevel)\n bottom_blobs=[log.blobs(input)]\n name = log.add_layer(name='softmax')\n log.add_blobs([x], name='softmax_blob')\n layer = caffe_net.Layer_param(name=name, type='Softmax',\n bottom=bottom_blobs, top=[log.blobs(x)])\n layer.param.softmax_param.axis=dim\n log.cnet.add_layer(layer)\n return x\n\ndef _batch_norm(raw,input, running_mean, running_var, weight=None, bias=None,\n training=False, momentum=0.1, eps=1e-5):\n # because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters\n running_mean_clone=running_mean.clone()\n running_var_clone=running_var.clone()\n x = raw(input, running_mean, running_var, weight, bias,\n training, momentum, eps)\n bottom_blobs = [log.blobs(input)]\n layer_name1 = log.add_layer(name='batch_norm')\n top_blobs = log.add_blobs([x], name='batch_norm_blob')\n layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',\n bottom=bottom_blobs, top=top_blobs)\n layer1.batch_norm_param(1, eps=eps)\n layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))\n log.cnet.add_layer(layer1)\n layer_name2 = log.add_layer(name='bn_scale')\n layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',\n bottom=top_blobs, top=top_blobs)#top_blobs\n layer2.param.scale_param.bias_term = True\n layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())\n log.cnet.add_layer(layer2)\n return x\n\n# ----- for Variable operations --------\n\ndef _view(input, *args):\n x=raw_view(input, *args)\n if not NET_INITTED:\n return x\n layer_name=log.add_layer(name='view')\n top_blobs=log.add_blobs([x],name='view_blob')\n layer=caffe_net.Layer_param(name=layer_name,type='Reshape',\n bottom=[log.blobs(input)],top=top_blobs)\n # TODO: reshpae added to nn_tools layer\n dims=list(args)\n dims[0]=0 # the first dim should be batch_size\n layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))\n log.cnet.add_layer(layer)\n return x\n\ndef _add(input, *args):\n x = raw__add__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='add')\n top_blobs = log.add_blobs([x], name='add_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _iadd(input, *args):\n x = raw__iadd__(input, *args)\n if not NET_INITTED:\n return x\n x=x.clone()\n layer_name = log.add_layer(name='add')\n top_blobs = log.add_blobs([x], name='add_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _sub(input, *args):\n x = raw__sub__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='sub')\n top_blobs = log.add_blobs([x], name='sub_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n layer.param.eltwise_param.coeff.extend([1.,-1.])\n log.cnet.add_layer(layer)\n return x\n\ndef _isub(input, *args):\n x = raw__isub__(input, *args)\n if not NET_INITTED:\n return x\n x=x.clone()\n layer_name = log.add_layer(name='sub')\n top_blobs = log.add_blobs([x], name='sub_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 1 # sum is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _mul(input, *args):\n x = raw__sub__(input, *args)\n if not NET_INITTED:\n return x\n layer_name = log.add_layer(name='mul')\n top_blobs = log.add_blobs([x], name='mul_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 0 # product is 1\n log.cnet.add_layer(layer)\n return x\n\ndef _imul(input, *args):\n x = raw__isub__(input, *args)\n if not NET_INITTED:\n return x\n x = x.clone()\n layer_name = log.add_layer(name='mul')\n top_blobs = log.add_blobs([x], name='mul_blob')\n layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',\n bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)\n layer.param.eltwise_param.operation = 0 # product is 1\n layer.param.eltwise_param.coeff.extend([1., -1.])\n log.cnet.add_layer(layer)\n return x\n\nclass Rp(object):\n def __init__(self,raw,replace,**kwargs):\n # replace the raw function to replace function\n self.obj=replace\n self.raw=raw\n\n def __call__(self,*args,**kwargs):\n if not NET_INITTED:\n return self.raw(*args,**kwargs)\n out=self.obj(self.raw,*args,**kwargs)\n # if isinstance(out,Variable):\n # out=[out]\n return out\n\n\nF.conv2d=Rp(F.conv2d,_conv2d)\nF.linear=Rp(F.linear,_linear)\nF.max_pool2d=Rp(F.max_pool2d,_max_pool2d)\nF.avg_pool2d=Rp(F.avg_pool2d,_avg_pool2d)\nF.dropout=Rp(F.dropout,_dropout)\nF.threshold=Rp(F.threshold,_threshold)\nF.prelu=Rp(F.prelu,_prelu)\nF.batch_norm=Rp(F.batch_norm,_batch_norm)\nF.softmax=Rp(F.softmax,_softmax)\n\ntorch.split=Rp(torch.split,_split)\ntorch.max=Rp(torch.max,_max)\ntorch.cat=Rp(torch.cat,_cat)\n\n\n# TODO: other types of the view function\ntry:\n raw_view=Variable.view\n Variable.view=_view\n raw__add__=Variable.__add__\n Variable.__add__=_add\n raw__iadd__=Variable.__iadd__\n Variable.__iadd__=_iadd\n raw__sub__=Variable.__sub__\n Variable.__sub__=_sub\n raw__isub__=Variable.__isub__\n Variable.__isub__=_isub\n raw__mul__ = Variable.__mul__\n Variable.__mul__ = _mul\n raw__imul__ = Variable.__imul__\n Variable.__imul__ = _imul\nexcept:\n # for new version 0.4.0\n for t in [torch.Tensor]:\n raw_view = t.view\n t.view = _view\n raw__add__ = t.__add__\n t.__add__ = _add\n raw__iadd__ = t.__iadd__\n t.__iadd__ = _iadd\n raw__sub__ = t.__sub__\n t.__sub__ = _sub\n raw__isub__ = t.__isub__\n t.__isub__ = _isub\n raw__mul__ = t.__mul__\n t.__mul__=_mul\n raw__imul__ = t.__imul__\n t.__imul__ = _imul\n\n\ndef trans_net(net,input_var,name='NoNamePytorchModel'):\n print('Starting Transform, This will take a while')\n log.init([input_var])\n log.cnet.net.name=name\n log.cnet.net.input.extend([log.blobs(input_var)])\n log.cnet.net.input_dim.extend(input_var.size())\n global NET_INITTED\n NET_INITTED=True\n out = net.forward(input_var)\n print('Transform Completed')\n\ndef save_prototxt(save_name):\n log.cnet.save_prototxt(save_name)\n\ndef save_caffemodel(save_name):\n log.cnet.save(save_name)\n" ]
[ [ "numpy.array", "torch.nn.modules.utils._pair" ] ]
JayHeYang/hehuang_cup
[ "1941e351403f0f402b0bc69e2c3015cb59844834" ]
[ "utils/tools.py" ]
[ "\"\"\"\n一些工函数\n\"\"\"\nimport numpy as np\n\nimport torch as t\nimport torch.nn.functional as F\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\n\ndef compute_batch_attributes_weights(Target):\n\n counts = t.sum(Target, dim=0)\n\n N = Target.size()[0] # batchsize 的大小\n zero_idx = counts == 0\n counts[zero_idx] = 1\n\n weights = counts / N\n\n return weights\n\n\nimport random\n\ndef getRandomIndex(n=4000, x=1000):\n\t# 索引范围为[0, n), 随机选x个不重复\n random.seed(0) # 设置随机数种子\n test_index = random.sample(range(n), x)\n\n test_index = np.array(test_index)\n # 再将test_index从总的index中减去就得到了train_index\n train_index = np.delete(np.arange(n), test_index)\n return train_index.tolist(), test_index.tolist()\n\n \ndef compute_macro_f1():\n \"\"\"\n 根据result.csv文件计算MacroF1分数\n \"\"\"\n # \n predict = pd.read_csv('results/result_myresnet.csv')\n target = pd.read_csv('C:/code/hehuang_cup/train/train1A.csv').fillna(.0)\n _, test_idx = getRandomIndex()\n target = pd.DataFrame(target.iloc[test_idx])\n target = target.reset_index(drop=True) # 删除原来的索引重排\n \n\n # print(predict.head())\n # print(target.head())\n \n pred_info = {'LongSleeve': 1e-3, 'ShortSleeve': 1e-3, 'NoSleeve': 1e-3, \n 'Solidcolor': 1e-3, 'multicolour': 1e-3, 'lattice': 1e-3, \n 'Short': 1e-3, 'Long': 1e-3, 'middle': 1e-3, 'Bald': 1e-3} # 统计各个硬标签属性的数量\n targ_info = {'LongSleeve': 1e-3, 'ShortSleeve': 1e-3, 'NoSleeve': 1e-3, \n 'Solidcolor': 1e-3, 'multicolour': 1e-3, 'lattice': 1e-3, \n 'Short': 1e-3, 'Long': 1e-3, 'middle': 1e-3, 'Bald': 1e-3}\n\n for col_name in predict.columns.tolist()[:4]:\n if col_name == 'name':\n continue\n elif col_name in ['upperLength', 'clothesStyles', 'hairStyles']:\n pred_info.update(predict[col_name].value_counts().to_dict())\n targ_info.update(target[col_name].value_counts().to_dict())\n else:\n pass\n \n acc_count = {'LongSleeve': 1e-3, 'ShortSleeve': 1e-3, 'NoSleeve': 1e-3, \n 'Solidcolor': 1e-3, 'multicolour': 1e-3, 'lattice': 1e-3, \n 'Short': 1e-3, 'Long': 1e-3, 'middle': 1e-3, 'Bald': 1e-3}\n\n for col_name in predict.columns.tolist()[1:4]:\n for ii in range(len(target[col_name])):\n target_label = target[col_name][ii]\n pred_label = predict[col_name][ii]\n\n if target_label == pred_label:\n acc_count[target_label] = acc_count.get(target_label, 0) + 1\n \n\n \n \n F1 = 0.0\n count = 0\n for k, v in acc_count.items():\n if pred_info[k] == 1e-3 or targ_info[k] == 1e-3:\n continue\n p = v / pred_info[k]\n r = v / targ_info[k]\n F1 += 2 * p * r / (p + r)\n count += 1\n # print(F1)\n \n F1 /= count\n # print(count)\n # print(pred_info)\n # print(targ_info)\n # print(acc_count)\n print('marcoF1:', F1)\n\n\n # 获取软标签\n soft_pred = t.from_numpy(predict.iloc[:, 4:].to_numpy())\n soft_targ = t.from_numpy(target.iloc[:, 4:].to_numpy())\n loss = F.l1_loss(soft_pred, soft_targ, reduction='sum') / soft_targ.size(0)\n\n print(loss.data)\n \n\n\nif __name__ == '__main__':\n compute_macro_f1()\n # score = np.random.randn(10, 8)\n # a = [0, 1, 1, 0, 0, 1, 0, 1]\n # b = [1, 1, 1, 0, 1, 0, 1, 1]\n # target = np.array([a, b, b, a, a, b, a, b, a, b]).reshape(10, 8)\n #\n # print(PR_curve(score, target, sigmoid=True))\n # compute_attributes_weights()\n" ]
[ [ "numpy.array", "torch.nn.functional.l1_loss", "pandas.DataFrame", "numpy.arange", "pandas.read_csv", "torch.sum" ] ]
shashankcollab/Project3Group8Webpage
[ "890cd3bc06fb2d91823602ea8e652b730dbf53b8" ]
[ "render.py" ]
[ "from time import sleep, strftime, time\nimport matplotlib as mat\n#import matplotlib.animation as ani\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nimport random\nfrom matplotlib import style\n\n \n\n\n#TEMPERATURE =20.0\n\n \n\n \n\n \n\n'''\ndef write_temp(temp):\n with open(\"temp_log.csv\", \"a\") as log:\n log.write(\"{​​​​​​​0}​​​​​​​,{​​​​​​​1}​​​​​​​\\n\".format(strftime(\"%Y-%m-%d %H:%M:%S\"),str(temp)))\n'''\ndef data_collection():\n x =[]\n y1 =[]\n y2 =[]\n \n for i in range(1,21):\n y1.append(20.0 + random.random() * 15)\n y2.append(60 + random.random() * 20)\n x.append(time() * i)\n \n graph(x,y1,y2)\n \ndef alert ():\n Alert = ''\n temp = 20.0 + random.random() * 15\n hum = 60 + random.random() * 20\n if temp > 30 and hum > 70.0:\n Alert = ('Please Stay at Home!!')\n else:\n Alert = ('Enjoy your Ride!!')\n return(Alert)\n\n \n\ndef graph(x,y1,y2):\n fig = Figure()\n #plt = fig.add_subplot()\n mat.use('Agg')\n title = alert()\n plt.clf()\n plt.scatter(x,y1)\n \n plt.plot(x,y1 , color = 'blue', label = 'Temperature')\n plt.plot(x,y2 , color = 'green',label = 'Humidity')\n plt.xlabel('Time in Seconds')\n plt.ylabel('Weather condition')\n plt.title(label = title)\n plt.legend(loc = 'best')\n plt.savefig('fig1.png')\n #return fig\n #plt.show()\n \n\n \n\n\nwhile True:\n \n #write_temp(temp)\n data_collection()\n #graph(temp)\n \n plt.pause(2)\n \n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.pause", "matplotlib.figure.Figure", "matplotlib.pyplot.clf" ] ]
vishalbelsare/sporco
[ "afc3dae3ab81d84a23e8487812670ecb7457e869" ]
[ "tests/dictlrn/test_onlinecdl.py" ]
[ "from __future__ import division\nfrom builtins import object\n\nimport numpy as np\n\nfrom sporco.dictlrn import onlinecdl\n\n\n\nclass TestSet01(object):\n\n def setup_method(self, method):\n N = 16\n Nd = 5\n M = 4\n K = 3\n np.random.seed(12345)\n self.D0 = np.random.randn(Nd, Nd, M)\n self.S = np.random.randn(N, N, K)\n\n\n def test_01(self):\n lmbda = 1e-1\n opt = onlinecdl.OnlineConvBPDNDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., img_index])\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_02(self):\n lmbda = 1e-1\n opt = onlinecdl.OnlineConvBPDNDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., [img_index]])\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_03(self):\n lmbda = 1e-1\n W = np.random.randn(*self.S.shape[0:2])\n opt = onlinecdl.OnlineConvBPDNMaskDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNMaskDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., img_index], W)\n except Exception as e:\n print(e)\n assert 0\n\n\n\nclass TestSet02(object):\n\n def setup_method(self, method):\n N = 16\n Nc = 3\n Nd = 5\n M = 4\n K = 3\n np.random.seed(12345)\n self.D0 = np.random.randn(Nd, Nd, Nc, M)\n self.S = np.random.randn(N, N, Nc, K)\n\n\n def test_01(self):\n lmbda = 1e-1\n opt = onlinecdl.OnlineConvBPDNDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., img_index])\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_02(self):\n lmbda = 1e-1\n opts = onlinecdl.OnlineConvBPDNDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10, 'AutoRho': {'Enabled': False}}})\n try:\n b = onlinecdl.OnlineConvBPDNDictLearn(self.D0, lmbda, opt=opts)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., [img_index]])\n except Exception as e:\n print(e)\n assert 0\n\n\n def test_03(self):\n lmbda = 1e-1\n W = np.random.randn(*self.S.shape[0:3])\n opt = onlinecdl.OnlineConvBPDNMaskDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNMaskDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., img_index], W)\n except Exception as e:\n print(e)\n assert 0\n\n\n\nclass TestSet03(object):\n\n def setup_method(self, method):\n N = 16\n Nc = 3\n Nd = 5\n M = 4\n K = 3\n np.random.seed(12345)\n self.D0 = np.random.randn(Nd, Nd, 1, M)\n self.S = np.random.randn(N, N, Nc, K)\n\n\n def test_01(self):\n lmbda = 1e-1\n opt = onlinecdl.OnlineConvBPDNDictLearn.Options(\n {'CBPDN': {'MaxMainIter': 10}})\n try:\n b = onlinecdl.OnlineConvBPDNDictLearn(self.D0, lmbda, opt=opt)\n for it in range(10):\n img_index = np.random.randint(0, self.S.shape[-1])\n b.solve(self.S[..., img_index])\n except Exception as e:\n print(e)\n assert 0\n" ]
[ [ "numpy.random.seed", "numpy.random.randint", "numpy.random.randn" ] ]
showmonki/learn_notes
[ "8a416e0294170e242c40d16370e8f42ec9ae8582" ]
[ "CV/classification/COFFFEE_GROUP/Code/cnn_model.py" ]
[ "from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\nclass t_model():\n\n def __init__(self,num_class,input_shape):\n self.input_shape = input_shape # xception at least 71x71\n self.num_cls = num_class\n # self.base_model = self.load_model()\n self.base_model1 = self.load_model1()\n\n def load_model(self):\n inputs = keras.Input(shape=self.input_shape, name = 'model_origin_input')\n K.set_learning_phase(0)\n base_model = keras.applications.Xception(weights='imagenet', include_top=False,input_tensor=inputs)\n base_model.trainable = False\n K.set_learning_phase(1)\n gmp = keras.layers.GlobalMaxPool2D(name='gmp')(base_model.output)\n # bn = keras.layers.BatchNormalization()(gmp)\n top_dropout_rate = 0.2\n # rld = keras.layers.Dense(16, activation='relu')(gmp)\n dp = keras.layers.Dropout(top_dropout_rate, name=\"top_dropout\")(gmp)\n outputs = keras.layers.Dense(self.num_cls, activation='softmax')(dp)\n model = keras.Model(inputs, outputs,name = 'new_model')\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n # keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n return model\n\n def load_model1(self):\n base_model = keras.applications.Xception(weights='imagenet', include_top=False,input_shape=self.input_shape)\n base_model.trainable = False\n x = base_model.output\n x = keras.layers.GlobalMaxPool2D(name='gmp')(x)\n # x = keras.layers.Dense(30, activation='relu')(x)\n outputs = keras.layers.Dense(self.num_cls, activation='softmax')(x)\n model = keras.Model(inputs = base_model.inputs, outputs = outputs,name = 'top_model')\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n # keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n return model\n\n\nclass t2():\n\n def __init__(self,num_classes,img_shape):\n self.input_shape = img_shape\n self.num_classes = num_classes\n self.base_model = self.load_model()\n\n def load_model(self):\n pretrain_model = keras.applications.InceptionResNetV2(include_top=False,input_shape=self.input_shape,weights='imagenet')\n pretrain_model.trainable = False\n x=pretrain_model.output\n x = keras.layers.GlobalMaxPool2D(name='gmp')(x)\n x = keras.layers.Dense(100, activation='softmax')(x)\n outputs = keras.layers.Dense(self.num_classes, activation='softmax')(x)\n model = keras.Model(inputs=pretrain_model.input, outputs=outputs, name='transfer_model')\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='categorical_crossentropy',\n # keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n return model\n\n" ]
[ [ "tensorflow.keras.layers.GlobalMaxPool2D", "tensorflow.keras.applications.InceptionResNetV2", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.applications.Xception", "tensorflow.keras.Model", "tensorflow.keras.Input", "tensorflow.keras.backend.set_learning_phase", "tensorflow.keras.optimizers.Adam" ] ]
jtang10/PyTorch-ENet
[ "d407eb6444e12ca5dd0fbe60145ed17440d31db2" ]
[ "profiling.py" ]
[ "import argparse\nimport math\nimport os\nimport time\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter, MultipleLocator\nfrom PIL import Image\n\nfrom models.enet import ENet\nimport transforms as ext_transforms\nimport utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test', action='store_false', help=\"If True, showcase how this model works; else, return latency\")\nparser.add_argument('--iter_batch', action='store_true', help=\"If True, iterate from batch size of 1 to maximum batch size; else, only run for current batch size\")\nparser.add_argument('--plot', action='store_true', help=\"If True, plot the latency result\")\nparser.add_argument('-i', '--iter', type=int, default=100, help=\"Number of iterations to run for latency\")\nparser.add_argument('-b', '--batch_size', type=int, default=1, help=\"Batch size for inference\")\nargs = parser.parse_args()\n\n# Set for GPU\ndevice = torch.device('cuda')\n\n# Load the sample data\ndata_dir = \"../data/cityscapes\"\nimage_path = \"berlin_000000_000019_leftImg8bit.png\"\nimage_path = os.path.join(data_dir, image_path)\nsample_image = Image.open(image_path)\nprint(\"Original sample image dimension:\", sample_image.size)\n\n\n\n# Preprocess the image per model requirement and load onto the GPU\nheight, width = 512, 1024\nimage_transform = transforms.Compose(\n [transforms.Resize((height, width)),\n transforms.ToTensor()])\nsample_image = image_transform(sample_image).to(device)\nprint(\"Preprocessed sample image dimension:\", sample_image.shape)\n\n\n# Load the required parameters for inference\ncolor_encoding = OrderedDict([\n ('unlabeled', (0, 0, 0)),\n ('road', (128, 64, 128)),\n ('sidewalk', (244, 35, 232)),\n ('building', (70, 70, 70)),\n ('wall', (102, 102, 156)),\n ('fence', (190, 153, 153)),\n ('pole', (153, 153, 153)),\n ('traffic_light', (250, 170, 30)),\n ('traffic_sign', (220, 220, 0)),\n ('vegetation', (107, 142, 35)),\n ('terrain', (152, 251, 152)),\n ('sky', (70, 130, 180)),\n ('person', (220, 20, 60)),\n ('rider', (255, 0, 0)),\n ('car', (0, 0, 142)),\n ('truck', (0, 0, 70)),\n ('bus', (0, 60, 100)),\n ('train', (0, 80, 100)),\n ('motorcycle', (0, 0, 230)),\n ('bicycle', (119, 11, 32))\n])\nnum_classes = len(color_encoding)\nmodel = ENet(num_classes).to(device)\n\n# Load the pre-trained weights\nmodel_path = \"./save/ENet_Cityscapes/ENet\"\ncheckpoint = torch.load(model_path)\nmodel.load_state_dict(checkpoint['state_dict'])\nprint('Model loaded successfully!')\n\n# Run the inference\n# If args.test, then showcase how this model works\nif not args.test:\n model.eval()\n sample_image = torch.unsqueeze(sample_image, 0)\n with torch.no_grad():\n output = model(sample_image)\n print(\"Model output dimension:\", output.shape)\n\n # Convert it to a single int using the indices where the maximum (1) occurs\n _, predictions = torch.max(output.data, 1)\n\n label_to_rgb = transforms.Compose([\n ext_transforms.LongTensorToRGBPIL(color_encoding),\n transforms.ToTensor()\n ])\n color_predictions = utils.batch_transform(predictions.cpu(), label_to_rgb)\n utils.imshow_batch(sample_image.data.cpu(), color_predictions)\n# Run several iterations for each batch size to determine the \nelse:\n model.eval()\n with torch.no_grad():\n if args.iter_batch:\n batch_size = [int(2**i) for i in range(int(math.log2(args.batch_size)+1))]\n else:\n batch_size = [args.batch_size]\n means = []\n stds = []\n percentile_90 = []\n percentile_99 = []\n fps = []\n for bs in batch_size:\n print(\"Batch size: {}\".format(bs))\n batched_image = torch.stack([sample_image]*bs, 0)\n latencies = np.zeros(args.iter)\n \n # Warm up round\n for _ in range(5):\n # start = time.time()\n output = model(batched_image)\n # end = time.time()\n # print(\"Cold start latency: {:.3f} ms\".format((end-start)*1000))\n for i in range(args.iter):\n start = time.time()\n output = model(batched_image)\n end = time.time()\n latencies[i] = end - start\n\n latencies.sort()\n mean_latency = np.mean(latencies) * 1000\n std_latency = np.std(latencies) * 1000\n p90 = latencies[int(args.iter * 0.9 - 1)] * 1000\n p99 = latencies[int(args.iter * 0.99 - 1)] * 1000\n # print(\"Latency Total: mean: {:.3f} ms, std: {:.3f} ms\".format(mean_latency, std_latency))\n print(\"Latency: mean: {:.3f}ms ({:.2f} FPS), std: {:.3f}ms, P90: {:.3f}ms, P99: {:.3f}ms\".format(\n mean_latency/bs, 1000/mean_latency*bs, std_latency/bs, p90/bs, p99/bs))\n means.append(mean_latency/bs)\n stds.append(std_latency/bs)\n fps.append(1000/mean_latency*bs)\n percentile_90.append(p90/bs)\n percentile_99.append(p99/bs)\n\n fig = plt.figure(figsize=(16, 9))\n fig.suptitle(\"PyTorch-ENet Latency Test on Cityscapes Dataset\", fontsize='xx-large', fontweight='bold')\n axs = fig.subplots(2, 1)\n axs[0].errorbar(batch_size, means, stds, c='b')\n axs[0].set_xlabel('Batch Size'); \n axs[0].set_ylabel('Latency (ms)', c='b')\n axs[0].set_ylim(0, 30)\n axs[0].set_xscale('log', basex=2); axs[0].xaxis.set_major_formatter(ScalarFormatter()); axs[0].set_xticks(batch_size)\n # axs[0].set_title(\"Latency vs Batch Size\")\n axs[0].grid(True)\n axs[0].yaxis.set_major_locator(MultipleLocator(5))\n axs[0].tick_params(axis='y', labelcolor='b')\n for x, y in zip(batch_size, means):\n axs[0].annotate('{:.1f}'.format(y), xy=(x, y))\n\n ax_fps = axs[0].twinx()\n ax_fps.plot(batch_size, fps, c='r', marker='o')\n # ax_fps.set_xlabel('Batch Size')\n ax_fps.set_ylabel('FPS', c='r')\n ax_fps.set_ylim(0, 150)\n ax_fps.yaxis.set_major_locator(MultipleLocator(30))\n ax_fps.tick_params(axis='y', labelcolor='r')\n # ax_fps.set_xscale('log', basex=2); ax_fps.xaxis.set_major_formatter(ScalarFormatter()); ax_fps.set_xticks(batch_size)\n # ax_fps.grid(True)\n for x, y in zip(batch_size, fps):\n ax_fps.annotate('{:.1f}'.format(y), xy=(x, y))\n \n labels = [str(bs) for bs in batch_size]\n x = np.arange(len(labels))\n print(labels, x)\n width = 0.2\n rects1 = axs[1].bar(x - width, means, width, label='mean')\n rects2 = axs[1].bar(x, percentile_90, width, label='P90')\n rects3 = axs[1].bar(x + width, percentile_99, width, label='P99')\n axs[1].set_ylabel('Latency (ms)')\n axs[1].set_xlabel('Batch Size')\n # axs[1].set_title(\"Latency across percentile\")\n # axs[1].set_xscale('log', basex=2); axs[0].xaxis.set_major_formatter(ScalarFormatter())\n axs[1].set_xticks(x)\n axs[1].set_xticklabels(labels)\n axs[1].set_ylim(0, 20)\n axs[1].legend()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n axs[1].annotate('{:.1f}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n autolabel(rects1)\n autolabel(rects2)\n autolabel(rects3)\n # fig.tight_layout()\n \n if args.plot:\n plt.show()\n else:\n plt.savefig('enet')\n" ]
[ [ "torch.device", "matplotlib.ticker.MultipleLocator", "torch.stack", "numpy.zeros", "torch.max", "matplotlib.pyplot.savefig", "torch.no_grad", "numpy.mean", "matplotlib.pyplot.figure", "torch.unsqueeze", "numpy.std", "matplotlib.pyplot.show", "torch.load", "matplotlib.ticker.ScalarFormatter" ] ]
HughPaynter/PyGRB
[ "2eaf834cf3c62a639a056285ca9518456daa4b7c" ]
[ "PyGRB/postprocess/make_evidence_tables.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom abc import ABCMeta\n\nimport bilby\nfrom prettytable import PrettyTable\n\n\nfrom PyGRB.backend.makemodels import create_model_from_key\n\nclass EvidenceTables(object):\n \"\"\"\n Defines the :class:`~EvidenceTables` class of the *PyGRB* package.\n This is an abstract class that contains the private methods of the\n :class:`~.PulseFitter` class. These methods predominantly translate fitting\n parameters into labels for file or folder names and vice versa.\n \"\"\"\n\n def __init__(self):\n super(EvidenceTables, self).__init__()\n\n def get_evidence_from_models(self, model_dict, channels = None):\n \"\"\"\n A method to generate the evidence tables for the given channels\n for the specified models. Returns one table per channel. Creates both\n .txt tables and .tex tables.\n\n Parameters\n ----------\n model_dict : Dict.\n A dictionary of models to be evaluated, and model evidence tabulated.\n Each model will be a dict {}.\n channels : list.\n A list of integers. The channels to be evaluated.\n \"\"\"\n\n if type(channels) is not np.ndarray or not isinstance(channels, list):\n channels = [0, 1, 2, 3]\n\n keys = model_dict.keys()\n models = [model for key, model in model_dict.items()]\n\n self._evidence_table_to_latex(\n models = models, channels = channels, keys = keys)\n self._evidence_table_to_txt(\n models = models, channels = channels, keys = keys)\n\n def _evidence_table_to_txt(self, models, channels, keys):\n \"\"\"\n Returns the evidence tables as a single .txt file.\n See :meth:`~get_evidence_from_models`.\n \"\"\"\n self.tlabel = self._get_trigger_label()\n self._get_base_directory()\n directory = self.base_folder\n Z_file = f'{directory}/evidence_table_T{self.trigger}_nlive{self.nSamples}.txt'\n open(Z_file, 'w').close()\n for i in channels:\n # hrules = 1 puts a horizontal line between each table entry\n # which makes the table .rst interpretable\n x = PrettyTable(['Model', 'ln Z', 'error'], hrules = 1)\n x.align['Model'] = \"l\" # Left align models\n # One space between column edges and contents (default)\n x.padding_width = 1\n for k in range(len(models)):\n if 'name' not in [*models[k]]:\n models[k]['name'] = keys[k]\n self._setup_labels(models[k])\n result_label = f'{self.fstring}{self.clabels[i]}'\n open_result = f'{self.outdir}/{result_label}_result.json'\n try:\n result = bilby.result.read_in_result(filename=open_result)\n x.add_row([ models[k]['name'],\n f'{result.log_evidence:.2f}',\n f'{result.log_evidence_err:.2f}'])\n except:\n print(f'Could not find {open_result}')\n\n min_e = np.inf\n for row in x:\n row.border = False\n row.header = False\n e = float(row.get_string(fields=['ln Z']).strip())\n if e < min_e:\n min_e = e\n bayes_facs = []\n for row in x:\n row.border = False\n row.header = False\n e = float(row.get_string(fields=['ln Z']).strip())\n bayes_facs.append(f'{e - min_e:.2f}')\n x.add_column('ln BF', bayes_facs)\n # indentation should be the same as k loop\n # \\n padding makes that table .rst interpretable\n with open(Z_file, 'a') as w:\n w.write(f'Channel {i+1}\\n\\n')\n w.write(str(x))\n w.write('\\n\\n\\n')\n\n def _evidence_table_to_latex(self, models, channels, keys):\n \"\"\"\n Returns the evidence tables as a separate .tex files for each channel.\n See :meth:`~get_evidence_from_models`.\n \"\"\"\n self.tlabel = self._get_trigger_label()\n self._get_base_directory()\n directory = self.base_folder\n columns = ['Model', 'ln evidence', 'ln error', 'ln BF']\n index = np.arange(len(models))\n for i in channels:\n channel_df = pd.DataFrame(columns=columns, index = index)\n for k in range(len(models)):\n if 'name' not in [*models[k]]:\n models[k]['name'] = keys[k]\n self._setup_labels(models[k])\n result_label = f'{self.fstring}{self.clabels[i]}'\n open_result = f'{self.outdir}/{result_label}_result.json'\n try:\n result = bilby.result.read_in_result(filename=open_result)\n new_row = { 'Model' : [models[k]['name']],\n 'ln evidence' : [result.log_evidence],\n 'ln error' : [result.log_evidence_err],\n 'ln BF' : [result.log_evidence]\n }\n except:\n print(f'Could not find {open_result}')\n new_row = { 'Model' : [models[k]['name']]}\n df = pd.DataFrame(new_row, index = [k])\n channel_df.update(df)\n base_BF = channel_df['ln evidence'].min()\n for k in range(len(models)):\n channel_df.loc[[k], ['ln BF']] = channel_df.loc[[k], ['ln BF']] - base_BF\n print(channel_df.to_latex( index=False, float_format=\"{:0.2f}\".format))\n channel_df.to_latex(f'{directory}/BF_table_ch_{i+1}.tex',\n index=False, float_format=\"{:0.2f}\".format)\n" ]
[ [ "pandas.DataFrame" ] ]
gwdgithubnom/gjgr
[ "581957a296b13a4231ea1e67ec62083b7da445bf" ]
[ "content/test/faceswap/tools/sort.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nA tool that allows for sorting and grouping images in different ways.\n\"\"\"\nimport logging\nimport os\nimport sys\nimport operator\nfrom shutil import copyfile\n\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\n# faceswap imports\nfrom lib.cli import FullHelpArgumentParser\nfrom lib import Serializer\nfrom lib.faces_detect import DetectedFace\nfrom lib.multithreading import SpawnProcess\nfrom lib.queue_manager import queue_manager, QueueEmpty\nfrom lib.utils import cv2_read_img\nfrom lib.vgg_face2_keras import VGGFace2 as VGGFace\nfrom plugins.plugin_loader import PluginLoader\n\nfrom . import cli\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass Sort():\n \"\"\" Sorts folders of faces based on input criteria \"\"\"\n # pylint: disable=no-member\n def __init__(self, arguments):\n self.args = arguments\n self.changes = None\n self.serializer = None\n self.vgg_face = None\n\n def process(self):\n \"\"\" Main processing function of the sort tool \"\"\"\n\n # Setting default argument values that cannot be set by argparse\n\n # Set output dir to the same value as input dir\n # if the user didn't specify it.\n if self.args.output_dir is None:\n logger.verbose(\"No output directory provided. Using input dir as output dir.\")\n self.args.output_dir = self.args.input_dir\n\n # Assigning default threshold values based on grouping method\n if (self.args.final_process == \"folders\"\n and self.args.min_threshold < 0.0):\n method = self.args.group_method.lower()\n if method == 'face-cnn':\n self.args.min_threshold = 7.2\n elif method == 'hist':\n self.args.min_threshold = 0.3\n\n # Load VGG Face if sorting by face\n if self.args.sort_method.lower() == \"face\":\n self.vgg_face = VGGFace(backend=self.args.backend, loglevel=self.args.loglevel)\n\n # If logging is enabled, prepare container\n if self.args.log_changes:\n self.changes = dict()\n\n # Assign default sort_log.json value if user didn't specify one\n if self.args.log_file_path == 'sort_log.json':\n self.args.log_file_path = os.path.join(self.args.input_dir,\n 'sort_log.json')\n\n # Set serializer based on logfile extension\n serializer_ext = os.path.splitext(\n self.args.log_file_path)[-1]\n self.serializer = Serializer.get_serializer_from_ext(\n serializer_ext)\n\n # Prepare sort, group and final process method names\n _sort = \"sort_\" + self.args.sort_method.lower()\n _group = \"group_\" + self.args.group_method.lower()\n _final = \"final_process_\" + self.args.final_process.lower()\n self.args.sort_method = _sort.replace('-', '_')\n self.args.group_method = _group.replace('-', '_')\n self.args.final_process = _final.replace('-', '_')\n\n self.sort_process()\n\n def launch_aligner(self):\n \"\"\" Load the aligner plugin to retrieve landmarks \"\"\"\n out_queue = queue_manager.get_queue(\"out\")\n kwargs = {\"in_queue\": queue_manager.get_queue(\"in\"),\n \"out_queue\": out_queue}\n\n for plugin in (\"fan\", \"cv2_dnn\"):\n aligner = PluginLoader.get_aligner(plugin)(loglevel=self.args.loglevel)\n process = SpawnProcess(aligner.run, **kwargs)\n event = process.event\n process.start()\n # Wait for Aligner to take init\n # The first ever load of the model for FAN has reportedly taken\n # up to 3-4 minutes, hence high timeout.\n event.wait(300)\n\n if not event.is_set():\n if plugin == \"fan\":\n process.join()\n logger.error(\"Error initializing FAN. Trying CV2-DNN\")\n continue\n else:\n raise ValueError(\"Error inititalizing Aligner\")\n if plugin == \"cv2_dnn\":\n return\n\n try:\n err = None\n err = out_queue.get(True, 1)\n except QueueEmpty:\n pass\n if not err:\n break\n process.join()\n logger.error(\"Error initializing FAN. Trying CV2-DNN\")\n\n @staticmethod\n def alignment_dict(image):\n \"\"\" Set the image to a dict for alignment \"\"\"\n height, width = image.shape[:2]\n face = DetectedFace(x=0, w=width, y=0, h=height)\n face = face.to_bounding_box_dict()\n return {\"image\": image,\n \"detected_faces\": [face]}\n\n @staticmethod\n def get_landmarks(filename):\n \"\"\" Extract the face from a frame (If not alignments file found) \"\"\"\n image = cv2_read_img(filename, raise_error=True)\n queue_manager.get_queue(\"in\").put(Sort.alignment_dict(image))\n face = queue_manager.get_queue(\"out\").get()\n landmarks = face[\"landmarks\"][0]\n return landmarks\n\n def sort_process(self):\n \"\"\"\n This method dynamically assigns the functions that will be used to run\n the core process of sorting, optionally grouping, renaming/moving into\n folders. After the functions are assigned they are executed.\n \"\"\"\n sort_method = self.args.sort_method.lower()\n group_method = self.args.group_method.lower()\n final_method = self.args.final_process.lower()\n\n img_list = getattr(self, sort_method)()\n if \"folders\" in final_method:\n # Check if non-dissim sort method and group method are not the same\n if group_method.replace('group_', '') not in sort_method:\n img_list = self.reload_images(group_method, img_list)\n img_list = getattr(self, group_method)(img_list)\n else:\n img_list = getattr(self, group_method)(img_list)\n\n getattr(self, final_method)(img_list)\n\n logger.info(\"Done.\")\n\n # Methods for sorting\n def sort_blur(self):\n \"\"\" Sort by blur amount \"\"\"\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by blur...\")\n img_list = [[img, self.estimate_blur(img)]\n for img in\n tqdm(self.find_images(input_dir),\n desc=\"Loading\",\n file=sys.stdout)]\n logger.info(\"Sorting...\")\n\n img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)\n\n return img_list\n\n def sort_face(self):\n \"\"\" Sort by face similarity \"\"\"\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by face similarity...\")\n\n images = np.array(self.find_images(input_dir))\n preds = np.array([self.vgg_face.predict(cv2_read_img(img, raise_error=True))\n for img in tqdm(images, desc=\"loading\", file=sys.stdout)])\n logger.info(\"Sorting. Depending on ths size of your dataset, this may take a few \"\n \"minutes...\")\n indices = self.vgg_face.sorted_similarity(preds, method=\"ward\")\n img_list = images[indices]\n return img_list\n\n def sort_face_cnn(self):\n \"\"\" Sort by CNN similarity \"\"\"\n self.launch_aligner()\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by face-cnn similarity...\")\n img_list = []\n for img in tqdm(self.find_images(input_dir),\n desc=\"Loading\",\n file=sys.stdout):\n landmarks = self.get_landmarks(img)\n img_list.append([img, np.array(landmarks)\n if landmarks\n else np.zeros((68, 2))])\n\n queue_manager.terminate_queues()\n img_list_len = len(img_list)\n for i in tqdm(range(0, img_list_len - 1),\n desc=\"Sorting\",\n file=sys.stdout):\n min_score = float(\"inf\")\n j_min_score = i + 1\n for j in range(i + 1, len(img_list)):\n fl1 = img_list[i][1]\n fl2 = img_list[j][1]\n score = np.sum(np.absolute((fl2 - fl1).flatten()))\n\n if score < min_score:\n min_score = score\n j_min_score = j\n (img_list[i + 1],\n img_list[j_min_score]) = (img_list[j_min_score],\n img_list[i + 1])\n return img_list\n\n def sort_face_cnn_dissim(self):\n \"\"\" Sort by CNN dissimilarity \"\"\"\n self.launch_aligner()\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by face-cnn dissimilarity...\")\n\n img_list = []\n for img in tqdm(self.find_images(input_dir),\n desc=\"Loading\",\n file=sys.stdout):\n landmarks = self.get_landmarks(img)\n img_list.append([img, np.array(landmarks)\n if landmarks\n else np.zeros((68, 2)), 0])\n\n img_list_len = len(img_list)\n for i in tqdm(range(0, img_list_len - 1),\n desc=\"Sorting\",\n file=sys.stdout):\n score_total = 0\n for j in range(i + 1, len(img_list)):\n if i == j:\n continue\n fl1 = img_list[i][1]\n fl2 = img_list[j][1]\n score_total += np.sum(np.absolute((fl2 - fl1).flatten()))\n\n img_list[i][2] = score_total\n\n logger.info(\"Sorting...\")\n img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)\n\n return img_list\n\n def sort_face_yaw(self):\n \"\"\" Sort by yaw of face \"\"\"\n self.launch_aligner()\n input_dir = self.args.input_dir\n\n img_list = []\n for img in tqdm(self.find_images(input_dir),\n desc=\"Loading\",\n file=sys.stdout):\n landmarks = self.get_landmarks(img)\n img_list.append(\n [img, self.calc_landmarks_face_yaw(np.array(landmarks))])\n\n logger.info(\"Sorting by face-yaw...\")\n img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)\n\n return img_list\n\n def sort_hist(self):\n \"\"\" Sort by histogram of face similarity \"\"\"\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by histogram similarity...\")\n\n img_list = [\n [img, cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None, [256], [0, 256])]\n for img in\n tqdm(self.find_images(input_dir), desc=\"Loading\", file=sys.stdout)\n ]\n\n img_list_len = len(img_list)\n for i in tqdm(range(0, img_list_len - 1), desc=\"Sorting\",\n file=sys.stdout):\n min_score = float(\"inf\")\n j_min_score = i + 1\n for j in range(i + 1, len(img_list)):\n score = cv2.compareHist(img_list[i][1],\n img_list[j][1],\n cv2.HISTCMP_BHATTACHARYYA)\n if score < min_score:\n min_score = score\n j_min_score = j\n (img_list[i + 1],\n img_list[j_min_score]) = (img_list[j_min_score],\n img_list[i + 1])\n return img_list\n\n def sort_hist_dissim(self):\n \"\"\" Sort by histigram of face dissimilarity \"\"\"\n input_dir = self.args.input_dir\n\n logger.info(\"Sorting by histogram dissimilarity...\")\n\n img_list = [\n [img,\n cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None, [256], [0, 256]), 0]\n for img in\n tqdm(self.find_images(input_dir), desc=\"Loading\", file=sys.stdout)\n ]\n\n img_list_len = len(img_list)\n for i in tqdm(range(0, img_list_len), desc=\"Sorting\", file=sys.stdout):\n score_total = 0\n for j in range(0, img_list_len):\n if i == j:\n continue\n score_total += cv2.compareHist(img_list[i][1],\n img_list[j][1],\n cv2.HISTCMP_BHATTACHARYYA)\n\n img_list[i][2] = score_total\n\n logger.info(\"Sorting...\")\n img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)\n\n return img_list\n\n # Methods for grouping\n def group_blur(self, img_list):\n \"\"\" Group into bins by blur \"\"\"\n # Starting the binning process\n num_bins = self.args.num_bins\n\n # The last bin will get all extra images if it's\n # not possible to distribute them evenly\n num_per_bin = len(img_list) // num_bins\n remainder = len(img_list) % num_bins\n\n logger.info(\"Grouping by blur...\")\n bins = [[] for _ in range(num_bins)]\n idx = 0\n for i in range(num_bins):\n for _ in range(num_per_bin):\n bins[i].append(img_list[idx][0])\n idx += 1\n\n # If remainder is 0, nothing gets added to the last bin.\n for i in range(1, remainder + 1):\n bins[-1].append(img_list[-i][0])\n\n return bins\n\n def group_face_cnn(self, img_list):\n \"\"\" Group into bins by CNN face similarity \"\"\"\n logger.info(\"Grouping by face-cnn similarity...\")\n\n # Groups are of the form: group_num -> reference faces\n reference_groups = dict()\n\n # Bins array, where index is the group number and value is\n # an array containing the file paths to the images in that group.\n bins = []\n\n # Comparison threshold used to decide how similar\n # faces have to be to be grouped together.\n # It is multiplied by 1000 here to allow the cli option to use smaller\n # numbers.\n min_threshold = self.args.min_threshold * 1000\n\n img_list_len = len(img_list)\n\n for i in tqdm(range(0, img_list_len - 1),\n desc=\"Grouping\",\n file=sys.stdout):\n fl1 = img_list[i][1]\n\n current_best = [-1, float(\"inf\")]\n\n for key, references in reference_groups.items():\n try:\n score = self.get_avg_score_faces_cnn(fl1, references)\n except TypeError:\n score = float(\"inf\")\n except ZeroDivisionError:\n score = float(\"inf\")\n if score < current_best[1]:\n current_best[0], current_best[1] = key, score\n\n if current_best[1] < min_threshold:\n reference_groups[current_best[0]].append(fl1[0])\n bins[current_best[0]].append(img_list[i][0])\n else:\n reference_groups[len(reference_groups)] = [img_list[i][1]]\n bins.append([img_list[i][0]])\n\n return bins\n\n def group_face_yaw(self, img_list):\n \"\"\" Group into bins by yaw of face \"\"\"\n # Starting the binning process\n num_bins = self.args.num_bins\n\n # The last bin will get all extra images if it's\n # not possible to distribute them evenly\n num_per_bin = len(img_list) // num_bins\n remainder = len(img_list) % num_bins\n\n logger.info(\"Grouping by face-yaw...\")\n bins = [[] for _ in range(num_bins)]\n idx = 0\n for i in range(num_bins):\n for _ in range(num_per_bin):\n bins[i].append(img_list[idx][0])\n idx += 1\n\n # If remainder is 0, nothing gets added to the last bin.\n for i in range(1, remainder + 1):\n bins[-1].append(img_list[-i][0])\n\n return bins\n\n def group_hist(self, img_list):\n \"\"\" Group into bins by histogram \"\"\"\n logger.info(\"Grouping by histogram...\")\n\n # Groups are of the form: group_num -> reference histogram\n reference_groups = dict()\n\n # Bins array, where index is the group number and value is\n # an array containing the file paths to the images in that group\n bins = []\n\n min_threshold = self.args.min_threshold\n\n img_list_len = len(img_list)\n reference_groups[0] = [img_list[0][1]]\n bins.append([img_list[0][0]])\n\n for i in tqdm(range(1, img_list_len),\n desc=\"Grouping\",\n file=sys.stdout):\n current_best = [-1, float(\"inf\")]\n for key, value in reference_groups.items():\n score = self.get_avg_score_hist(img_list[i][1], value)\n if score < current_best[1]:\n current_best[0], current_best[1] = key, score\n\n if current_best[1] < min_threshold:\n reference_groups[current_best[0]].append(img_list[i][1])\n bins[current_best[0]].append(img_list[i][0])\n else:\n reference_groups[len(reference_groups)] = [img_list[i][1]]\n bins.append([img_list[i][0]])\n\n return bins\n\n # Final process methods\n def final_process_rename(self, img_list):\n \"\"\" Rename the files \"\"\"\n output_dir = self.args.output_dir\n\n process_file = self.set_process_file_method(self.args.log_changes,\n self.args.keep_original)\n\n # Make sure output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n description = (\n \"Copying and Renaming\" if self.args.keep_original\n else \"Moving and Renaming\"\n )\n\n for i in tqdm(range(0, len(img_list)),\n desc=description,\n leave=False,\n file=sys.stdout):\n src = img_list[i] if isinstance(img_list[i], str) else img_list[i][0]\n src_basename = os.path.basename(src)\n\n dst = os.path.join(output_dir, '{:05d}_{}'.format(i, src_basename))\n try:\n process_file(src, dst, self.changes)\n except FileNotFoundError as err:\n logger.error(err)\n logger.error('fail to rename %s', src)\n\n for i in tqdm(range(0, len(img_list)),\n desc=description,\n file=sys.stdout):\n renaming = self.set_renaming_method(self.args.log_changes)\n fname = img_list[i] if isinstance(img_list[i], str) else img_list[i][0]\n src, dst = renaming(fname, output_dir, i, self.changes)\n\n try:\n os.rename(src, dst)\n except FileNotFoundError as err:\n logger.error(err)\n logger.error('fail to rename %s', format(src))\n\n if self.args.log_changes:\n self.write_to_log(self.changes)\n\n def final_process_folders(self, bins):\n \"\"\" Move the files to folders \"\"\"\n output_dir = self.args.output_dir\n\n process_file = self.set_process_file_method(self.args.log_changes,\n self.args.keep_original)\n\n # First create new directories to avoid checking\n # for directory existence in the moving loop\n logger.info(\"Creating group directories.\")\n for i in range(len(bins)):\n directory = os.path.join(output_dir, str(i))\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n description = (\n \"Copying into Groups\" if self.args.keep_original\n else \"Moving into Groups\"\n )\n\n logger.info(\"Total groups found: %s\", len(bins))\n for i in tqdm(range(len(bins)), desc=description, file=sys.stdout):\n for j in range(len(bins[i])):\n src = bins[i][j]\n src_basename = os.path.basename(src)\n\n dst = os.path.join(output_dir, str(i), src_basename)\n try:\n process_file(src, dst, self.changes)\n except FileNotFoundError as err:\n logger.error(err)\n logger.error(\"Failed to move '%s' to '%s'\", src, dst)\n\n if self.args.log_changes:\n self.write_to_log(self.changes)\n\n # Various helper methods\n def write_to_log(self, changes):\n \"\"\" Write the changes to log file \"\"\"\n logger.info(\"Writing sort log to: '%s'\", self.args.log_file_path)\n with open(self.args.log_file_path, 'w') as lfile:\n lfile.write(self.serializer.marshal(changes))\n\n def reload_images(self, group_method, img_list):\n \"\"\"\n Reloads the image list by replacing the comparative values with those\n that the chosen grouping method expects.\n :param group_method: str name of the grouping method that will be used.\n :param img_list: image list that has been sorted by one of the sort\n methods.\n :return: img_list but with the comparative values that the chosen\n grouping method expects.\n \"\"\"\n input_dir = self.args.input_dir\n logger.info(\"Preparing to group...\")\n if group_method == 'group_blur':\n temp_list = [[img, self.estimate_blur(cv2_read_img(img, raise_error=True))]\n for img in\n tqdm(self.find_images(input_dir),\n desc=\"Reloading\",\n file=sys.stdout)]\n elif group_method == 'group_face_cnn':\n self.launch_aligner()\n temp_list = []\n for img in tqdm(self.find_images(input_dir),\n desc=\"Reloading\",\n file=sys.stdout):\n landmarks = self.get_landmarks(img)\n temp_list.append([img, np.array(landmarks)\n if landmarks\n else np.zeros((68, 2))])\n elif group_method == 'group_face_yaw':\n self.launch_aligner()\n temp_list = []\n for img in tqdm(self.find_images(input_dir),\n desc=\"Reloading\",\n file=sys.stdout):\n landmarks = self.get_landmarks(img)\n temp_list.append(\n [img,\n self.calc_landmarks_face_yaw(np.array(landmarks))])\n elif group_method == 'group_hist':\n temp_list = [\n [img,\n cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None, [256], [0, 256])]\n for img in\n tqdm(self.find_images(input_dir),\n desc=\"Reloading\",\n file=sys.stdout)\n ]\n else:\n raise ValueError(\"{} group_method not found.\".format(group_method))\n\n return self.splice_lists(img_list, temp_list)\n\n @staticmethod\n def splice_lists(sorted_list, new_vals_list):\n \"\"\"\n This method replaces the value at index 1 in each sub-list in the\n sorted_list with the value that is calculated for the same img_path,\n but found in new_vals_list.\n\n Format of lists: [[img_path, value], [img_path2, value2], ...]\n\n :param sorted_list: list that has been sorted by one of the sort\n methods.\n :param new_vals_list: list that has been loaded by a different method\n than the sorted_list.\n :return: list that is sorted in the same way as the input sorted list\n but the values corresponding to each image are from new_vals_list.\n \"\"\"\n new_list = []\n # Make new list of just image paths to serve as an index\n val_index_list = [i[0] for i in new_vals_list]\n for i in tqdm(range(len(sorted_list)),\n desc=\"Splicing\",\n file=sys.stdout):\n current_img = sorted_list[i] if isinstance(sorted_list[i], str) else sorted_list[i][0]\n new_val_index = val_index_list.index(current_img)\n new_list.append([current_img, new_vals_list[new_val_index][1]])\n\n return new_list\n\n @staticmethod\n def find_images(input_dir):\n \"\"\" Return list of images at specified location \"\"\"\n result = []\n extensions = [\".jpg\", \".png\", \".jpeg\"]\n for root, _, files in os.walk(input_dir):\n for file in files:\n if os.path.splitext(file)[1].lower() in extensions:\n result.append(os.path.join(root, file))\n return result\n\n @staticmethod\n def estimate_blur(image_file):\n \"\"\"\n Estimate the amount of blur an image has with the variance of the Laplacian.\n Normalize by pixel number to offset the effect of image size on pixel gradients & variance\n \"\"\"\n image = cv2_read_img(image_file, raise_error=True)\n if image.ndim == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur_map = cv2.Laplacian(image, cv2.CV_32F)\n score = np.var(blur_map) / np.sqrt(image.shape[0] * image.shape[1])\n return score\n\n @staticmethod\n def calc_landmarks_face_pitch(flm):\n \"\"\" UNUSED - Calculate the amount of pitch in a face \"\"\"\n var_t = ((flm[6][1] - flm[8][1]) + (flm[10][1] - flm[8][1])) / 2.0\n var_b = flm[8][1]\n return var_b - var_t\n\n @staticmethod\n def calc_landmarks_face_yaw(flm):\n \"\"\" Calculate the amount of yaw in a face \"\"\"\n var_l = ((flm[27][0] - flm[0][0])\n + (flm[28][0] - flm[1][0])\n + (flm[29][0] - flm[2][0])) / 3.0\n var_r = ((flm[16][0] - flm[27][0])\n + (flm[15][0] - flm[28][0])\n + (flm[14][0] - flm[29][0])) / 3.0\n return var_r - var_l\n\n @staticmethod\n def set_process_file_method(log_changes, keep_original):\n \"\"\"\n Assigns the final file processing method based on whether changes are\n being logged and whether the original files are being kept in the\n input directory.\n Relevant cli arguments: -k, -l\n :return: function reference\n \"\"\"\n if log_changes:\n if keep_original:\n def process_file(src, dst, changes):\n \"\"\" Process file method if logging changes\n and keeping original \"\"\"\n copyfile(src, dst)\n changes[src] = dst\n\n else:\n def process_file(src, dst, changes):\n \"\"\" Process file method if logging changes\n and not keeping original \"\"\"\n os.rename(src, dst)\n changes[src] = dst\n\n else:\n if keep_original:\n def process_file(src, dst, changes): # pylint: disable=unused-argument\n \"\"\" Process file method if not logging changes\n and keeping original \"\"\"\n copyfile(src, dst)\n\n else:\n def process_file(src, dst, changes): # pylint: disable=unused-argument\n \"\"\" Process file method if not logging changes\n and not keeping original \"\"\"\n os.rename(src, dst)\n return process_file\n\n @staticmethod\n def set_renaming_method(log_changes):\n \"\"\" Set the method for renaming files \"\"\"\n if log_changes:\n def renaming(src, output_dir, i, changes):\n \"\"\" Rename files method if logging changes \"\"\"\n src_basename = os.path.basename(src)\n\n __src = os.path.join(output_dir,\n '{:05d}_{}'.format(i, src_basename))\n dst = os.path.join(\n output_dir,\n '{:05d}{}'.format(i, os.path.splitext(src_basename)[1]))\n changes[src] = dst\n return __src, dst\n else:\n def renaming(src, output_dir, i, changes): # pylint: disable=unused-argument\n \"\"\" Rename files method if not logging changes \"\"\"\n src_basename = os.path.basename(src)\n\n src = os.path.join(output_dir,\n '{:05d}_{}'.format(i, src_basename))\n dst = os.path.join(\n output_dir,\n '{:05d}{}'.format(i, os.path.splitext(src_basename)[1]))\n return src, dst\n return renaming\n\n @staticmethod\n def get_avg_score_hist(img1, references):\n \"\"\" Return the average histogram score between a face and\n reference image \"\"\"\n scores = []\n for img2 in references:\n score = cv2.compareHist(img1, img2, cv2.HISTCMP_BHATTACHARYYA)\n scores.append(score)\n return sum(scores) / len(scores)\n\n @staticmethod\n def get_avg_score_faces_cnn(fl1, references):\n \"\"\" Return the average CNN similarity score\n between a face and reference image \"\"\"\n scores = []\n for fl2 in references:\n score = np.sum(np.absolute((fl2 - fl1).flatten()))\n scores.append(score)\n return sum(scores) / len(scores)\n\n\ndef bad_args(args): # pylint: disable=unused-argument\n \"\"\" Print help on bad arguments \"\"\"\n PARSER.print_help()\n exit(0)\n\n\nif __name__ == \"__main__\":\n __WARNING_STRING = \"Important: face-cnn method will cause an error when \"\n __WARNING_STRING += \"this tool is called directly instead of through the \"\n __WARNING_STRING += \"tools.py command script.\"\n print(__WARNING_STRING)\n print(\"Images sort tool.\\n\")\n\n PARSER = FullHelpArgumentParser()\n SUBPARSER = PARSER.add_subparsers()\n SORT = cli.SortArgs(\n SUBPARSER, \"sort\", \"Sort images using various methods.\")\n PARSER.set_defaults(func=bad_args)\n ARGUMENTS = PARSER.parse_args()\n ARGUMENTS.func(ARGUMENTS)\n" ]
[ [ "numpy.array", "numpy.sqrt", "numpy.zeros", "numpy.var" ] ]
Simardeep27/pan-tensor
[ "1719d82fdedb5c7882699de193e01aa78c0d9f91" ]
[ "models/ffm.py" ]
[ "'''\nThis code is for FFM model in PAN.\n'''\nimport tensorflow as tf\n\n__all__ = ['FFM']\n\nclass FFM(tf.keras.Model):\n def __init__(self):\n super(FFM, self).__init__()\n\n def _upsample(self, x, size, scale=1):\n _, Hout, Wout, _ = size\n _, Hx, Wx, _ = x.shape\n return tf.keras.layers.UpSampling2D(size=(2*scale, 2*scale), interpolation='bilinear')(x)\n\n def call(self, f1_1, f2_1, f3_1, f4_1, f1_2, f2_2, f3_2, f4_2):\n f1 = f1_1 + f1_2\n f2 = f2_1 + f2_2\n f3 = f3_1 + f3_2\n f4 = f4_1 + f4_2\n f2 = self._upsample(f2, f1.shape, scale=1)\n f3 = self._upsample(f3, f1.shape, scale=2)\n f4 = self._upsample(f4, f1.shape, scale=4)\n f = tf.concat([f1, f2, f3, f4], 3)\n\n return f\n\n# unit testing\nif __name__ == '__main__':\n batch_size = 32\n Height = 640\n Width = 640\n Channel = 128\n f1_1 = tf.random.uniform(shape=[batch_size,Height//4,Width//4,Channel])\n f2_1 = tf.random.uniform(shape=[batch_size,Height//8,Width//8,Channel])\n f3_1 = tf.random.uniform(shape=[batch_size,Height//16,Width//16,Channel])\n f4_1 = tf.random.uniform(shape=[batch_size,Height//32,Width//32,Channel])\n\n f1_2 = tf.random.uniform(shape=[batch_size,Height//4,Width//4,Channel])\n f2_2 = tf.random.uniform(shape=[batch_size,Height//8,Width//8,Channel])\n f3_2 = tf.random.uniform(shape=[batch_size,Height//16,Width//16,Channel])\n f4_2 = tf.random.uniform(shape=[batch_size,Height//32,Width//32,Channel])\n\n ffm_model = FFM()\n f = ffm_model(f1_1, f2_1, f3_1, f4_1, f1_2, f2_2, f3_2, f4_2)\n print(\"FFM input layer 1 shape:\", f1_1.shape)\n print(\"FFM input layer 2 shape:\", f2_1.shape)\n print(\"FFM input layer 3 shape:\", f3_1.shape)\n print(\"FFM input layer 4 shape:\", f4_1.shape)\n print(\"FFM output shape:\", f.shape)" ]
[ [ "tensorflow.concat", "tensorflow.random.uniform", "tensorflow.keras.layers.UpSampling2D" ] ]
bernhardschaefer/yamlu
[ "14d621f9e17cdc7a652e0e5450eb4ce7b6b30f7c" ]
[ "tests/test_pytorch.py" ]
[ "import torch\n\nfrom yamlu.pytorch import isin, indices_to_mask\n\n\n# noinspection PyArgumentList\ndef test_indices_to_mask():\n indices = torch.LongTensor([0, 2, 3])\n mask_length = 5\n mask = indices_to_mask(indices, mask_length)\n assert torch.equal(mask, torch.BoolTensor([True, False, True, True, False]))\n\n\n# noinspection PyArgumentList\ndef test_isin():\n element = torch.LongTensor([0, 1, 3, 2, 1, 2])\n test_elements = torch.LongTensor([0, 1])\n res = isin(element, test_elements)\n assert res.tolist() == [1, 1, 0, 0, 1, 0]\n\n res = isin(element.to(torch.int), test_elements.to(torch.int))\n assert res.tolist() == [1, 1, 0, 0, 1, 0]\n\n res = isin(element, [0, 1])\n assert res.tolist() == [1, 1, 0, 0, 1, 0]\n" ]
[ [ "torch.LongTensor", "torch.BoolTensor" ] ]
Xtuden-com/tensor2robot
[ "a3674958a046de711e37445d39afd4e529d8dd09" ]
[ "preprocessors/abstract_preprocessor_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Tensor2Robot Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensor2robot.preprocessors.abstract_preprocessor.\"\"\"\n\nfrom tensor2robot.preprocessors import abstract_preprocessor\nimport tensorflow.compat.v1 as tf\n\n\nclass AbstractPreprocessorTest(tf.test.TestCase):\n\n def test_init_abstract(self):\n with self.assertRaises(TypeError):\n abstract_preprocessor.AbstractPreprocessor()\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.test.main" ] ]
ZigmundRat/MPMD-AutoBedLevel-Cal
[ "7e3e416df2b3b9d5b8911f8eed11d4ae7f355b92" ]
[ "auto_cal_p5.py" ]
[ "#!/usr/bin/python\r\n\r\n# Updated version of the original script\r\n\r\n# Original Project Found here:\r\n# https://github.com/TechnoSwiss/MPMD-AutoBedLevel-Cal\r\n# This version was made to be compatible with Dennis Brown's G29 P5 Spreadsheet:\r\n# https://www.facebook.com/groups/mpminideltaowners/permalink/2186865287995612/\r\n# G29 P5 V4 Converted to manual probes for cross-firmware compatibility:\r\n# https://github.com/mcheah/Marlin4MPMD/wiki/Calibration#user-content-m665m666-delta-parameter-calibrations\r\n#\r\n# Full Instructions: https://www.facebook.com/groups/mpminideltaowners/permalink/2574670629215074/\r\n#\r\n# REQUIRES PYTHON3\r\n# REQUIRES SCIPY AND SERIAL\r\n#\r\n# sudo apt-get install python3-serial\r\n# sudo apt-get install python3-scipy\r\n#\r\n# Stock Firmware <=V41, V45 at 60 degrees C w/ Dennis's Defaults:\r\n# python3 auto_cal_p5.py -p /dev/ttyACM0 -ff 0 -tf 0 -r 63.5 -l 123.0 -s 57.14 -bt 60\r\n#\r\n# Stock V43 & V44 at 60 degrees C w/ Dennis's Defaults:\r\n# python3 auto_cal_p5.py -p /dev/ttyACM0 -ff 0 -tf 0 -r 63.5 -l 123.0 -s 114.28 -bt 60\r\n#\r\n# For Marlin, use the appropriate line for your stock firmware and replace \"-ff 0\" with \"-ff 1\"\r\n\r\nfrom serial import Serial, SerialException, PARITY_ODD, PARITY_NONE\r\nimport sys\r\nimport argparse\r\nimport traceback\r\nimport json\r\nimport statistics\r\nimport numpy as np\r\nfrom scipy.interpolate import griddata\r\n\r\n\r\n\r\ndef establish_serial_connection(port, speed=115200, timeout=10, writeTimeout=10000):\r\n # Hack for USB connection\r\n # There must be a way to do it cleaner, but I can't seem to find it\r\n try:\r\n temp = Serial(port, speed, timeout=timeout, writeTimeout=writeTimeout, parity=PARITY_ODD)\r\n if sys.platform == 'win32':\r\n temp.close()\r\n conn = Serial(port, speed, timeout=timeout, writeTimeout=writeTimeout, parity=PARITY_NONE)\r\n conn.setRTS(False)#needed on mac\r\n if sys.platform != 'win32':\r\n temp.close()\r\n return conn\r\n except SerialException as e:\r\n print (\"Could not connect to {0} at baudrate {1}\\nSerial error: {2}\".format(port, str(speed), e))\r\n return None\r\n except IOError as e:\r\n print (\"Could not connect to {0} at baudrate {1}\\nIO error: {2}\".format(port, str(speed), e))\r\n return None\r\n\r\ndef get_points(port):\r\n while True:\r\n out = port.readline().decode()\r\n if 'Bed ' in out:\r\n break\r\n\r\n return out.split(' ')\r\n\r\ndef get_current_values(port, firmFlag):\r\n # Replacing G29 P5 with manual probe points for cross-firmware compatibility\r\n # G28 ; home\r\n # G1 Z15 F6000; go to safe distance\r\n # Start Loop\r\n # G1 X## Y##; go to specified location\r\n # G30 ;probe bed for z values\r\n # G30 ;probe bed again for z values\r\n # End Loop\r\n # G28 ; return home\r\n \r\n # Initialize G29 P5 V4 Table\r\n number_cols = 7 \r\n number_rows = 21\r\n x_list = [None]*number_rows\r\n y_list = [None]*number_rows\r\n z1_list = [None]*number_rows\r\n z2_list = [None]*number_rows\r\n z_avg_list = [None]*number_rows\r\n dtap_list = [None]*number_rows\r\n dz_list = [None]*number_rows\r\n dz_test = [None]*number_rows\r\n \r\n # Define Table Indices\r\n ix = 0\r\n iy = 1\r\n iz1 = 2\r\n iz2 = 3\r\n izavg = 4 \r\n idtap = 5\r\n idz = 6\r\n \r\n # Assign X Coordinates (G29 P5)\r\n x_list[0] = -25\r\n x_list[1] = 0\r\n x_list[2] = 25\r\n x_list[3] = 50\r\n x_list[4] = 25\r\n x_list[5] = 0\r\n x_list[6] = -25\r\n x_list[7] = -50\r\n x_list[8] = -50\r\n x_list[9] = -25\r\n x_list[10] = 0\r\n x_list[11] = 25\r\n x_list[12] = 50\r\n x_list[13] = 50\r\n x_list[14] = 25\r\n x_list[15] = 0\r\n x_list[16] = -25\r\n x_list[17] = -50\r\n x_list[18] = -25\r\n x_list[19] = 0\r\n x_list[20] = 25\r\n \r\n # Assign Y Coordinates (G29 P5)\r\n y_list[0] = -50\r\n y_list[1] = -50\r\n y_list[2] = -50\r\n y_list[3] = -25\r\n y_list[4] = -25\r\n y_list[5] = -25\r\n y_list[6] = -25\r\n y_list[7] = -25\r\n y_list[8] = 0\r\n y_list[9] = 0\r\n y_list[10] = 0\r\n y_list[11] = 0\r\n y_list[12] = 0\r\n y_list[13] = 25\r\n y_list[14] = 25\r\n y_list[15] = 25\r\n y_list[16] = 25\r\n y_list[17] = 25\r\n y_list[18] = 50\r\n y_list[19] = 50\r\n y_list[20] = 50\r\n\r\n # Send Gcodes\r\n port.write(('G28\\n').encode()) # Home\r\n \r\n if firmFlag == 1: \r\n # Marlin\r\n port.write(('G1 Z15 F6000\\n').encode()) # Move to safe distance\r\n else:\r\n # Stock Firmware\r\n port.write(('G29 P5 V4\\n').encode())\r\n\r\n while True:\r\n out = port.readline().decode()\r\n #print(\"{0}\\n\".format(out))\r\n if 'G29 Auto Bed Leveling' in out:\r\n break\r\n \r\n # Loop through all \r\n for ii in range(len(x_list)):\r\n \r\n if firmFlag == 1: \r\n # Marlin\r\n \r\n # Move to desired position\r\n port.write(('G1 X{0} Y{1}\\n'.format(x_list[ii], y_list[ii])).encode()) \r\n #print('Sending G1 X{0} Y{1}\\n'.format(x_list[ii], y_list[ii]))\r\n \r\n # Probe Z values\r\n port.write(('G30\\n').encode())\r\n z_axis_1 = get_points(port)\r\n port.write(('G30\\n').encode())\r\n z_axis_2 = get_points(port)\r\n else:\r\n # Stock Firmware\r\n z_axis_1 = get_points(port)\r\n z_axis_2 = get_points(port)\r\n \r\n # Populate most of the table values\r\n z1_list[ii] = float(z_axis_1[6])\r\n z2_list[ii] = float(z_axis_2[6])\r\n z_avg_list[ii] = float(\"{0:.4f}\".format((z1_list[ii] + z2_list[ii]) / 2.0))\r\n dtap_list[ii] = z2_list[ii] - z1_list[ii]\r\n #print('Received: X:{0} X:{1} Y:{2} Y:{3} Z1:{4} Z2:{5}\\n\\n'.format(str(x_list[ii]), str(z_axis_1[2]), str(y_list[ii]), str(z_axis_1[4]), z1_list[ii], z2_list[ii]))\r\n \r\n # Find the Median Reference\r\n z_med = statistics.median(z_avg_list)\r\n \r\n # Calculate z diff\r\n for ii in range(len(x_list)):\r\n dz_list[ii] = z_avg_list[ii] - z_med\r\n \r\n # Empty out remaining lines for stock firmware\r\n if firmFlag == 0: \r\n for ii in range(6):\r\n out = port.readline().decode()\r\n \r\n return x_list, y_list, z1_list, z2_list, z_avg_list, dtap_list, dz_list\r\n\r\ndef xyz_list2array(xl,yl,zl):\r\n # Create Contour Lookup/Interpolation Function\r\n coord_xy_list = []\r\n coord_z_list = []\r\n for ii in range(len(xl)):\r\n coord_xy_list.append([float(xl[ii]),float(yl[ii])])\r\n coord_z_list.append(float(zl[ii]))\r\n \r\n # Convert to Array\r\n xy_array = np.array(coord_xy_list)\r\n z_array = np.array(coord_z_list)\r\n \r\n return xy_array, z_array\r\n\r\ndef linear_interp(x0, x1, z0, z1, xq):\r\n zq = z0 + (xq-x0)*(z1-z0)/(x1-x0)\r\n return zq\r\n \r\ndef calculate_contour(x_list, y_list, dz_list, runs, xhigh, yhigh, zhigh, minterp, tower_flag):\r\n \r\n # Redefine Lists\r\n x_list_new = x_list.copy()\r\n y_list_new = y_list.copy()\r\n dz_list_new = dz_list.copy()\r\n \r\n # Define contour boundaries and steps\r\n xmin = min(x_list_new)\r\n xmax = max(x_list_new)\r\n ymin = min(y_list_new)\r\n ymax = max(y_list_new)\r\n dprobe = 25.0; # Distance Betqeen Probe Points\r\n ngrid = 3.0 # Grid Cell Spacing for the Contour\r\n dx = dprobe/ngrid\r\n dy = dx\r\n nmax = int(round((ymax-ymin)/dy))\r\n \r\n # Create Contour Lookup/Interpolation Function\r\n coord_xy, coord_z = xyz_list2array(x_list_new,y_list_new,dz_list_new)\r\n \r\n # Copy Equations from Dennis's Spreadsheet and put them in the lookup grid\r\n # Put inside if statement incase we want to try other interpolation methods\r\n # Anything other than 1 simply uses Python's griddata with the probed points.\r\n if minterp == 1: \r\n \r\n # Fill in based on known values across the horizontal\r\n iside = -1\r\n nknown = int(round((ymax-ymin)/dprobe))\r\n for iy in range(nknown):\r\n # Current Fixed Value\r\n ytmp = ymin + float(iy)*dprobe\r\n y0 = ytmp\r\n y1 = ytmp\r\n yq = ytmp\r\n # Set Start/End indices for circular base\r\n if ytmp == ymin or ytmp == ymax:\r\n iStart = int(round(ngrid))\r\n iEnd = nmax-int(round(ngrid))\r\n else:\r\n iStart = 0\r\n iEnd = nmax-1\r\n # Loop through all x-values\r\n for ix in range(nmax): \r\n xq = xmin + float(ix)*dx\r\n #print(\"x={0} y={1} ix={2} iy={3} mod={4} iStart = {5} iEnd = {6}\\n\\n\".format(str(xq),str(yq),str(ix),str(iy),str(ix%ngrid),str(iStart),str(iEnd)))\r\n if ix >= iStart and ix <= iEnd:\r\n if int(round(ix%ngrid)) == 0: # Set Known Values\r\n if ix == iStart:\r\n x0 = xmin + dx*float(ix)\r\n x1 = x0 + dprobe\r\n #print(\"iStart={0}\\n\".format(str(iStart)))\r\n #print(\"Known Point x0 = {0}\".format(str(x0)))\r\n else:\r\n x0 = x1\r\n x1 = x0 + dprobe\r\n #print(\"Known Point\\n\")\r\n #print(\"Known Point x0 = {0}\".format(str(x0)))\r\n z0 = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n z1 = float(griddata(coord_xy, coord_z, (x1 , y1)))\r\n else: # Interpolate Between Known Values\r\n zq = linear_interp(x0, x1, z0, z1, xq)\r\n x_list_new.append(xq)\r\n y_list_new.append(yq)\r\n dz_list_new.append(zq)\r\n #print(\"Interp Test: {0} {1} {2}\\n\".format(str(xq),str(yq),str(zq)))\r\n #print(\"z0={0} z1={1}\".format(str(z0),str(z1)))\r\n #else:\r\n #print(\"Outside of grid\\n\")\r\n \r\n # Fill in based on known values across the vertical\r\n nknown = int(round((xmax-xmin)/dprobe))\r\n for ix in range(nknown):\r\n # Current Fixed Value\r\n xtmp = xmin + float(ix)*dprobe\r\n x0 = xtmp\r\n x1 = xtmp\r\n xq = xtmp\r\n # Set Start/End indices for circular base\r\n if xtmp == xmin or xtmp == xmax:\r\n iStart = int(round(ngrid))\r\n iEnd = nmax-int(round(ngrid))\r\n else:\r\n iStart = 0\r\n iEnd = nmax-1\r\n # Loop through all y-values\r\n for iy in range(nmax): \r\n yq = ymin + float(iy)*dy\r\n #print(\"x={0} y={1} ix={2} iy={3} mod={4} iStart = {5} iEnd = {6}\\n\\n\".format(str(xq),str(yq),str(ix),str(iy),str(ix%ngrid),str(iStart),str(iEnd)))\r\n if iy >= iStart and iy <= iEnd:\r\n if int(round(iy%ngrid)) == 0: # Set Known Values\r\n if iy == iStart:\r\n y0 = ymin + dy*float(iy)\r\n y1 = y0 + dprobe\r\n #print(\"iStart={0}\\n\".format(str(iStart)))\r\n #print(\"Known Point y0 = {0} y1 = {1} yq = {2}\".format(str(y0), str(y1), str(yq)))\r\n else:\r\n y0 = y1\r\n y1 = y0 + dprobe\r\n #print(\"Known Point\\n\")\r\n #print(\"Known Point y0 = {0} y1 = {1} yq = {2}\".format(str(y0), str(y1), str(yq)))\r\n z0 = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n #print(\"x0={0} y0={1} z0={2}\".format(str(x0), str(y0), str(z0)))\r\n z1 = float(griddata(coord_xy, coord_z, (x1 , y1)))\r\n #print(\"x1={0} y1={1} z1={2}\".format(str(x1), str(y1), str(z1)))\r\n else: # Interpolate Between Known Values\r\n zq = linear_interp(y0, y1, z0, z1, yq)\r\n x_list_new.append(xq)\r\n y_list_new.append(yq)\r\n dz_list_new.append(zq)\r\n #if xtmp == 0.0:\r\n #print(\"Interp Test: {0} {1} {2}\".format(str(xq),str(yq),str(zq)))\r\n #print(\"z0={0} z1={1}\\n\".format(str(z0),str(z1)))\r\n #else:\r\n #print(\"Outside of grid\\n\")\r\n \r\n \r\n # Manually set corner points\r\n # Top Left\r\n L6 = float(griddata(coord_xy, coord_z, (-50.0, 25.0)))\r\n O3 = float(griddata(coord_xy, coord_z, (-25.0, 50.0)))\r\n x_list_new.append(-50.0+dx)\r\n y_list_new.append(25.0+dy)\r\n dz_list_new.append((O3-L6)/3.0+L6)\r\n x_list_new.append(-50.0+2.0*dx)\r\n y_list_new.append(25.0+2.0*dy)\r\n dz_list_new.append((L6-O3)/3+O3)\r\n # Top Right\r\n X6 = float(griddata(coord_xy, coord_z, (50.0, 25.0)))\r\n U3 = float(griddata(coord_xy, coord_z, (25.0, 50.0)))\r\n x_list_new.append(50.0-dx)\r\n y_list_new.append(25.0+dy)\r\n dz_list_new.append((U3-X6)/3+X6)\r\n x_list_new.append(50.0-2.0*dx)\r\n y_list_new.append(25.0+2.0*dy)\r\n dz_list_new.append((X6-U3)/3+U3)\r\n # Bottom Right\r\n X12 = float(griddata(coord_xy, coord_z, (50.0, -25.0)))\r\n U15 = float(griddata(coord_xy, coord_z, (25.0, -50.0)))\r\n x_list_new.append(50.0-dx)\r\n y_list_new.append(-25.0-dy)\r\n dz_list_new.append((U15-X12)/3+X12)\r\n x_list_new.append(50.0-2.0*dx)\r\n y_list_new.append(-25.0-2.0*dy)\r\n dz_list_new.append((X12-U15)/3+U15)\r\n # Bottom Left\r\n L12 = float(griddata(coord_xy, coord_z, (-50.0, -25.0)))\r\n O15 = float(griddata(coord_xy, coord_z, (-25.0, -50.0)))\r\n x_list_new.append(-50.0+dx)\r\n y_list_new.append(-25.0-dy)\r\n dz_list_new.append((O15-L12)/3+L12)\r\n x_list_new.append(-50.0+2.0*dx)\r\n y_list_new.append(-25.0-2.0*dy)\r\n dz_list_new.append((L12-O15)/3+O15)\r\n \r\n # Reset gridddata arrays now that we're using calculated values\r\n coord_xy, coord_z = xyz_list2array(x_list_new,y_list_new,dz_list_new)\r\n \r\n # Fill in remaining points used in actual calculations\r\n \r\n # Tower X\r\n M9 = float(griddata(coord_xy, coord_z, (-50.0+dx, 0.0)))\r\n M12 = float(griddata(coord_xy, coord_z, (-50.0+dx, -25.0)))\r\n x_list_new.append(-50.0+dx)\r\n y_list_new.append(-25.0+dy)\r\n dz_list_new.append((M9-M12)/3.0+M12)\r\n #print(\"M9={0} M12={1} x={2} y={3} z={4}\".format(str(M9),str(M12),str(-50.0+dx),str(-25.0+dy),str((M9-M12)/3.0+M12)))\r\n \r\n # Tower Y\r\n W9 = float(griddata(coord_xy, coord_z, (50.0-dx, 0.0)))\r\n W12 = float(griddata(coord_xy, coord_z, (50.0-dx, -25.0)))\r\n x_list_new.append(50.0-dx)\r\n y_list_new.append(-25.0+dy)\r\n dz_list_new.append((W9-W12)/3.0+W12)\r\n #print(\"W9={0} W12={1} x={2} y={3} z={4}\".format(str(W9),str(W12),str(50.0-dx),str(-25.0+dy),str((W9-W12)/3.0+W12)))\r\n \r\n # Tower Z\r\n Q3 = float(griddata(coord_xy, coord_z, (0.0-dx, 50.0)))\r\n Q6 = float(griddata(coord_xy, coord_z, (0.0-dx, 25.0)))\r\n x_list_new.append(0.0-dx)\r\n y_list_new.append(50.0-dy)\r\n dz_list_new.append((Q6-Q3)/3.0+Q3)\r\n #print(\"Q3={0} Q6={1} x={2} y={3} z={4}\".format(str(Q3),str(Q6),str(0.0-dx),str(50.0-dy),str((Q6-Q3)/3.0+Q3)))\r\n S3 = float(griddata(coord_xy, coord_z, (0.0+dx, 50.0)))\r\n S6 = float(griddata(coord_xy, coord_z, (0.0+dx, 25.0)))\r\n x_list_new.append(0.0+dx)\r\n y_list_new.append(50.0-dy)\r\n dz_list_new.append((S6-S3)/3.0+S3)\r\n #print(\"S3={0} S6={1} x={2} y={3} z={4}\".format(str(Q3),str(Q6),str(0.0+dx),str(50.0-dy),str((S6-S3)/3.0+S3)))\r\n \r\n # Outside Ring\r\n # No additional points\r\n \r\n # Center\r\n Q9 = float(griddata(coord_xy, coord_z, (0.0-dx, 0.0)))\r\n S9 = float(griddata(coord_xy, coord_z, (0.0+dx, 0.0)))\r\n Q7 = (Q9-Q6)/3.0+Q6\r\n S7 = (S9-S6)/3.0+S6\r\n Q12 = float(griddata(coord_xy, coord_z, (0.0-dx, -25.0)))\r\n S12 = float(griddata(coord_xy, coord_z, (0.0+dx, -25.0)))\r\n x_list_new.append(0.0-dx)\r\n y_list_new.append(0.0+dy)\r\n dz_list_new.append((Q7-Q9)/2.0+Q9)\r\n x_list_new.append(0.0+dx)\r\n y_list_new.append(0.0+dy)\r\n dz_list_new.append((S7-S9)/2.0+S9)\r\n x_list_new.append(0.0-dx)\r\n y_list_new.append(0.0-dy)\r\n dz_list_new.append((Q12-Q9)/3.0+Q9)\r\n x_list_new.append(0.0+dx)\r\n y_list_new.append(0.0-dy)\r\n dz_list_new.append((S12-S9)/3+S9)\r\n \r\n # Convert final values to Array\r\n coord_xy, coord_z = xyz_list2array(x_list_new,y_list_new,dz_list_new)\r\n\r\n \r\n # North Tilt (opposite of LCD)\r\n x0 = xmin\r\n y0 = ymin/2\r\n ntower = float(griddata(coord_xy, coord_z, (x0, y0)))\r\n TN_list = [None]*5\r\n TN_list[0] = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n TN_list[1] = float(griddata(coord_xy, coord_z, (x0 , y0+dy)))\r\n TN_list[2] = float(griddata(coord_xy, coord_z, (x0+dx, y0)))\r\n TN_list[3] = float(griddata(coord_xy, coord_z, (x0+dx, y0+dy)))\r\n TN_list[4] = float(griddata(coord_xy, coord_z, (x0+dx, y0-dy)))\r\n #print(\"TN Values\\n\")\r\n #print(*TN_list, sep='\\n\\n')\r\n #print(\"\\n\")\r\n TN = float(statistics.mean(TN_list))\r\n \r\n # West Tilt (left of LCD)\r\n x0 = xmax\r\n y0 = ymin/2\r\n wtower = float(griddata(coord_xy, coord_z, (x0, y0)))\r\n TW_list = [None]*5\r\n TW_list[0] = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n TW_list[1] = float(griddata(coord_xy, coord_z, (x0 , y0+dy)))\r\n TW_list[2] = float(griddata(coord_xy, coord_z, (x0-dx, y0))) # problem\r\n TW_list[3] = float(griddata(coord_xy, coord_z, (x0-dx, y0+dy)))\r\n TW_list[4] = float(griddata(coord_xy, coord_z, (x0-dx, y0-dy)))\r\n #print(\"TW Values\\n\")\r\n #print(*TW_list, sep='\\n\\n')\r\n #print(\"\\n\")\r\n TW = float(statistics.mean(TW_list))\r\n \r\n # East Tilt (right of LCD)\r\n x0 = 0.0\r\n y0 = ymax\r\n etower = float(griddata(coord_xy, coord_z, (x0, y0)))\r\n TE_list = [None]*6\r\n TE_list[0] = float(griddata(coord_xy, coord_z, (x0-dx, y0)))\r\n TE_list[1] = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n TE_list[2] = float(griddata(coord_xy, coord_z, (x0+dx, y0)))\r\n TE_list[3] = float(griddata(coord_xy, coord_z, (x0-dx, y0-dy)))\r\n TE_list[4] = float(griddata(coord_xy, coord_z, (x0 , y0-dy)))\r\n TE_list[5] = float(griddata(coord_xy, coord_z, (x0+dx, y0-dy)))\r\n #print(\"TE Values\\n\")\r\n #print(*TE_list, sep='\\n\\n')\r\n #print(\"\\n\")\r\n TE = float(statistics.mean(TE_list))\r\n \r\n # Bowl Stats - Center\r\n x0 = 0.0\r\n y0 = 0.0\r\n BC_list = [None]*9\r\n BC_list[0] = float(griddata(coord_xy, coord_z, (x0-dx, y0+dy)))\r\n BC_list[1] = float(griddata(coord_xy, coord_z, (x0 , y0+dy)))\r\n BC_list[2] = float(griddata(coord_xy, coord_z, (x0+dx, y0+dy)))\r\n BC_list[3] = float(griddata(coord_xy, coord_z, (x0-dx, y0)))\r\n BC_list[4] = float(griddata(coord_xy, coord_z, (x0 , y0)))\r\n BC_list[5] = float(griddata(coord_xy, coord_z, (x0+dx, y0)))\r\n BC_list[6] = float(griddata(coord_xy, coord_z, (x0-dx, y0-dy)))\r\n BC_list[7] = float(griddata(coord_xy, coord_z, (x0 , y0-dy)))\r\n BC_list[8] = float(griddata(coord_xy, coord_z, (x0+dx, y0-dy)))\r\n #print(\"Bowl Center: \\n\")\r\n #print(*BC_list, sep='\\n\\n')\r\n #print(\"\\n\")\r\n BowlCenter = float(statistics.mean(BC_list))\r\n \r\n # Bowl Stats - Outside Ring\r\n OR_list = [None]*12\r\n # Left\r\n OR_list[0] = float(griddata(coord_xy, coord_z, (xmin, ymin/2.0)))\r\n OR_list[1] = float(griddata(coord_xy, coord_z, (xmin, 0.0)))\r\n OR_list[2] = float(griddata(coord_xy, coord_z, (xmin, ymax/2.0)))\r\n # Right\r\n OR_list[3] = float(griddata(coord_xy, coord_z, (xmax, ymin/2.0)))\r\n OR_list[4] = float(griddata(coord_xy, coord_z, (xmax, 0.0)))\r\n OR_list[5] = float(griddata(coord_xy, coord_z, (xmax, ymax/2.0)))\r\n # Top\r\n OR_list[6] = float(griddata(coord_xy, coord_z, (xmin/2.0, ymax)))\r\n OR_list[7] = float(griddata(coord_xy, coord_z, ( 0.0, ymax)))\r\n OR_list[8] = float(griddata(coord_xy, coord_z, (xmax/2.0, ymax)))\r\n # Bottom\r\n OR_list[9] = float(griddata(coord_xy, coord_z, (xmin/2.0, ymin)))\r\n OR_list[10] = float(griddata(coord_xy, coord_z, ( 0.0, ymin)))\r\n OR_list[11] = float(griddata(coord_xy, coord_z, (xmax/2.0, ymin)))\r\n BowlOR = float(statistics.median(OR_list))\r\n #print(\"Outer Ring Values: \\n\")\r\n #print(*OR_list, sep='\\n\\n')\r\n #print(\"\\n\")\r\n #print(\"BowlOR = {0:.4f}\".format(BowlOR))\r\n \r\n # Assign Towers to the current Tower X/Y/Z configuration (default is stock)\r\n TX = TN\r\n xtower = ntower\r\n TY = TW\r\n ytower = wtower\r\n TZ = TE\r\n ztower = etower\r\n if tower_flag == 1: \r\n TX = TE\r\n xtower = etower\r\n TY = TN\r\n ytower = ntower\r\n TZ = TW\r\n ztower = wtower\r\n elif tower_flag == 2: \r\n TX = TW\r\n xtower = wtower\r\n TY = TE\r\n ytower = etower\r\n TZ = TN\r\n ztower = ntower\r\n \r\n # Define Pass # according to the spreadsheet\r\n pass_num = runs - 1\r\n \r\n if pass_num == 0:\r\n # Check X Tower\r\n if xtower > ytower and xtower > ztower:\r\n xhigh[1] = 1\r\n \r\n # Check Y Tower\r\n if ytower > xtower and ytower > ztower:\r\n yhigh[1] = 1\r\n \r\n # Check Z Tower\r\n if ztower > xtower and ztower > ytower:\r\n zhigh[1] = 1\r\n \r\n # Save Values\r\n xhigh[0] = xhigh[1]\r\n yhigh[0] = yhigh[1]\r\n zhigh[0] = zhigh[1]\r\n else: \r\n xhigh[1] = 0\r\n yhigh[1] = 0\r\n zhigh[1] = 0\r\n\r\n # Calculate High Parameter\r\n iHighTower = -1\r\n if xhigh[0] == 1:\r\n THigh = TX\r\n iHighTower = 0\r\n elif yhigh[0] == 1:\r\n THigh = TY\r\n iHighTower = 1\r\n else:\r\n THigh = TZ\r\n iHighTower = 2\r\n \r\n # Return Results\r\n return TX, TY, TZ, THigh, BowlCenter, BowlOR, xhigh, yhigh, zhigh, iHighTower\r\n \r\n \r\ndef determine_error(TX, TY, TZ, THigh, BowlCenter, BowlOR):\r\n z_error = float(\"{0:.4f}\".format(TZ - THigh))\r\n x_error = float(\"{0:.4f}\".format(TX - THigh))\r\n y_error = float(\"{0:.4f}\".format(TY - THigh))\r\n c_error = float(\"{0:.4f}\".format(BowlCenter - BowlOR))\r\n print('Z-Error: ' + str(z_error) + ' X-Error: ' + str(x_error) + ' Y-Error: ' + str(y_error) + ' C-Error: ' + str(c_error) + '\\n')\r\n\r\n return z_error, x_error, y_error, c_error\r\n \r\n\r\ndef calibrate(port, z_error, x_error, y_error, c_error, trial_x, trial_y, trial_z, l_value, r_value, iHighTower, max_runs, runs):\r\n calibrated = True\r\n if abs(z_error) >= 0.02:\r\n if iHighTower == 2:\r\n new_z = float(\"{0:.4f}\".format(0.0))\r\n else:\r\n new_z = float(\"{0:.4f}\".format(z_error + trial_z))\r\n calibrated = False\r\n else:\r\n new_z = trial_z\r\n\r\n if abs(x_error) >= 0.02:\r\n if iHighTower == 0:\r\n new_x = float(\"{0:.4f}\".format(0.0))\r\n else:\r\n new_x = float(\"{0:.4f}\".format(x_error + trial_x))\r\n calibrated = False\r\n else:\r\n new_x = trial_x\r\n\r\n if abs(y_error) >= 0.02:\r\n if iHighTower == 1:\r\n new_y = float(\"{0:.4f}\".format(0.0))\r\n else:\r\n new_y = float(\"{0:.4f}\".format(y_error + trial_y))\r\n calibrated = False\r\n else:\r\n new_y = trial_y\r\n\r\n if abs(c_error) >= 0.02:\r\n new_r = float(\"{0:.4f}\".format(r_value - 4.0*c_error))\r\n calibrated = False\r\n else:\r\n new_r = r_value\r\n \r\n new_l = float(\"{0:.4f}\".format(1.5*(new_r-r_value) + l_value))\r\n\r\n # making sure I am sending the lowest adjustment value\r\n #diff = 100\r\n #for i in [new_z, new_x ,new_y]:\r\n # if abs(0-i) < diff:\r\n # diff = 0-i\r\n #new_z += diff\r\n #new_x += diff\r\n #new_y += diff\r\n\r\n if calibrated:\r\n print (\"Final values\\nM666 Z{0} X{1} Y{2} \\nM665 L{3} R{4}\".format(str(new_z),str(new_x),str(new_y),str(new_l),str(new_r)))\r\n else:\r\n set_M_values(port, new_z, new_x, new_y, new_l, new_r)\r\n\r\n return calibrated, new_z, new_x, new_y, new_l, new_r\r\n\r\ndef set_M_values(port, z, x, y, l, r):\r\n\r\n print (\"Setting values M666 X{0} Y{1} Z{2}, M665 L{3} R{4}\".format(str(x),str(y),str(z),str(l),str(r)))\r\n\r\n port.write(('M666 X{0} Y{1} Z{2}\\n'.format(str(x), str(y), str(z))).encode())\r\n out = port.readline().decode()\r\n port.write(('M665 L{0} R{1}\\n'.format(str(l),str(r))).encode())\r\n out = port.readline().decode()\r\n \r\ndef output_pass_text(runs, trial_x, trial_y, trial_z, l_value, r_value, iHighTower, x_list, y_list, z1_list, z2_list): \r\n\r\n # Get the pass number corresponding to Dennis's spreadsheet\r\n pass_num = int(runs-1)\r\n \r\n # Create the file\r\n file_object = open(\"auto_cal_p5_pass{0}.txt\".format(str(pass_num)), \"w\")\r\n \r\n # Output current pass values\r\n file_object.write(\"M666 X{0:.2f} Y{1:.2f} Z{2:.2f}\\r\\n\".format(float(trial_x), float(trial_y), float(trial_z))) \r\n file_object.write(\"M665 L{0:.4f} R{1:.4f}\\r\\n\".format(float(l_value), float(r_value))) \r\n file_object.write(\"\\r\\n\") \r\n \r\n # Highest Tower Value\r\n if int(iHighTower) == 0:\r\n file_object.write(\"Highest Tower: X\\r\\n\") \r\n elif int(iHighTower) == 1:\r\n file_object.write(\"Highest Tower: Y\\r\\n\") \r\n else: \r\n file_object.write(\"Highest Tower: Z\\r\\n\") \r\n \r\n # Output Grid Points\r\n file_object.write(\"\\r\\n\") \r\n file_object.write(\"\\r\\n\") \r\n file_object.write(\"< 01:02:03 PM: G29 Auto Bed Leveling\\r\\n\") \r\n for ii in range(len(x_list)):\r\n file_object.write(\"< 01:02:03 PM: Bed X: {0:.3f} Y: {1:.3f} Z: {2:.3f}\\r\\n\".format(float(x_list[ii]), float(y_list[ii]), float(z1_list[ii]))) \r\n file_object.write(\"< 01:02:03 PM: Bed X: {0:.3f} Y: {1:.3f} Z: {2:.3f}\\r\\n\".format(float(x_list[ii]), float(y_list[ii]), float(z2_list[ii]))) \r\n \r\n # Close file stream\r\n file_object.close() \r\n \r\n return\r\n\r\n\r\ndef run_calibration(port, firmFlag, trial_x, trial_y, trial_z, l_value, r_value, xhigh, yhigh, zhigh, max_runs, max_error, bed_temp, minterp, tower_flag, runs=0):\r\n runs += 1\r\n\r\n if runs > max_runs:\r\n sys.exit(\"Too many calibration attempts\")\r\n print('\\nCalibration pass {1}, run {2} out of {0}'.format(str(max_runs), str(runs-1), str(runs)))\r\n \r\n # Make sure the bed doesn't go cold\r\n if bed_temp >= 0: \r\n port.write('M140 S{0}\\n'.format(str(bed_temp)).encode())\r\n \r\n # Read G30 values and calculate values in columns B through H\r\n x_list, y_list, z1_list, z2_list, z_avg_list, dtap_list, dz_list = get_current_values(port, firmFlag)\r\n \r\n # Generate the P5 contour map\r\n TX, TY, TZ, THigh, BowlCenter, BowlOR, xhigh, yhigh, zhigh, iHighTower = calculate_contour(x_list, y_list, dz_list, runs, xhigh, yhigh, zhigh, minterp, tower_flag)\r\n \r\n # Output current pass results\r\n output_pass_text(runs, trial_x, trial_y, trial_z, l_value, r_value, iHighTower, x_list, y_list, z1_list, z2_list)\r\n \r\n # Output Debugging Info\r\n #file_object = open(\"debug_pass{0:d}.csv\".format(int(runs-1)), \"w\")\r\n #file_object.write(\"X,Y,Z1,Z2,Z avg,Tap diff,Z diff,TX,TY,TZ,THigh,BowlCenter,BowlOR\\r\\n\") \r\n #z_med = statistics.median(z_avg_list)\r\n #for ii in range(len(x_list)):\r\n # dz_list[ii] = z_avg_list[ii] - z_med\r\n # file_object.write(\"{0:.4f},{1:.4f},{2:.4f},{3:.4f},\".format(float(x_list[ii]),float(y_list[ii]),float(z1_list[ii]),float(z2_list[ii])))\r\n # file_object.write(\"{0:.4f},{1:.4f},{2:.4f},\".format(float(z_avg_list[ii]),float(dtap_list[ii]),float(dz_list[ii])))\r\n # file_object.write(\"{0:.4f},{1:.4f},{2:.4f},{3:.4f},{4:.4f},{5:.4f}\\r\\n\".format(float(TX),float(TY),float(TZ),float(THigh),float(BowlCenter),float(BowlOR)))\r\n #file_object.close() \r\n \r\n # Calculate Error\r\n z_error, x_error, y_error, c_error = determine_error(TX, TY, TZ, THigh, BowlCenter, BowlOR)\r\n \r\n if abs(max([z_error, x_error, y_error, c_error], key=abs)) > max_error and runs > 1:\r\n sys.exit(\"Calibration error on non-first run exceeds set limit\")\r\n\r\n calibrated, new_z, new_x, new_y, new_l, new_r = calibrate(port, z_error, x_error, y_error, c_error, trial_x, trial_y, trial_z, l_value, r_value, iHighTower, max_runs, runs)\r\n \r\n if calibrated:\r\n print (\"Calibration complete\")\r\n else:\r\n calibrated, new_z, new_x, new_y, new_l, new_r, xhigh, yhigh, zhigh = run_calibration(port, firmFlag, new_x, new_y, new_z, new_l, new_r, xhigh, yhigh, zhigh, max_runs, max_error, bed_temp, minterp, tower_flag, runs)\r\n\r\n return calibrated, new_z, new_x, new_y, new_l, new_r, xhigh, yhigh, zhigh\r\n\r\ndef main():\r\n # Default values\r\n max_runs = 14\r\n max_error = 1\r\n\r\n x0 = 0.0\r\n y0 = 0.0\r\n z0 = 0.0\r\n trial_z = x0\r\n trial_x = y0\r\n trial_y = z0\r\n r_value = 63.5\r\n step_mm = 57.14\r\n l_value = 123.0\r\n xhigh = [0]*2\r\n yhigh = [0]*2\r\n zhigh = [0]*2\r\n bed_temp = -1\r\n minterp = 0\r\n firmFlag = 0\r\n tower_flag = 0\r\n\r\n parser = argparse.ArgumentParser(description='Auto-Bed Cal. for Monoprice Mini Delta')\r\n parser.add_argument('-p','--port',help='Serial port',required=True)\r\n parser.add_argument('-x','--x0',type=float,default=x0,help='Starting x-value')\r\n parser.add_argument('-y','--y0',type=float,default=y0,help='Starting y-value')\r\n parser.add_argument('-z','--z0',type=float,default=z0,help='Starting z-value')\r\n parser.add_argument('-r','--r-value',type=float,default=r_value,help='Starting r-value')\r\n parser.add_argument('-l','--l-value',type=float,default=l_value,help='Starting l-value')\r\n parser.add_argument('-s','--step-mm',type=float,default=step_mm,help='Set steps-/mm')\r\n parser.add_argument('-me','--max-error',type=float,default=max_error,help='Maximum acceptable calibration error on non-first run')\r\n parser.add_argument('-mr','--max-runs',type=int,default=max_runs,help='Maximum attempts to calibrate printer')\r\n parser.add_argument('-bt','--bed-temp',type=int,default=bed_temp,help='Bed Temperature')\r\n parser.add_argument('-im','--minterp',type=int,default=minterp,help='Intepolation Method')\r\n parser.add_argument('-ff','--firmFlag',type=int,default=firmFlag,help='Firmware Flag (0 = Stock; 1 = Marlin)')\r\n parser.add_argument('-tf','--tower_flag',type=int,default=tower_flag,help='Tower Flag (0 = Stock and old Marlin; 1 = Marlin 1.3.3, 2 = experimental)')\r\n parser.add_argument('-f','--file',type=str,dest='file',default=None,\r\n help='File with settings, will be updated with latest settings at the end of the run')\r\n args = parser.parse_args()\r\n\r\n port = establish_serial_connection(args.port) \r\n\r\n if args.file:\r\n try:\r\n with open(args.file) as data_file:\r\n settings = json.load(data_file)\r\n tower_flag = int(settings.get('tower_flag', tower_flag))\r\n firmFlag = int(settings.get('firmFlag', firmFlag))\r\n minterp = int(settings.get('minterp', minterp))\r\n bed_temp = int(settings.get('bed_temp', bed_temp))\r\n max_runs = int(settings.get('max_runs', max_runs))\r\n max_error = float(settings.get('max_error', max_error))\r\n trial_z = float(settings.get('z', trial_z))\r\n trial_x = float(settings.get('x', trial_x))\r\n trial_y = float(settings.get('y', trial_y))\r\n r_value = float(settings.get('r', r_value))\r\n l_value = float(settings.get('l', l_value))\r\n step_mm = float(settings.get('step', step_mm))\r\n\r\n except:\r\n tower_flag = args.tower_flag\r\n firmFlag = args.firmFlag\r\n minterp = args.minterp\r\n bed_temp = args.bed_temp\r\n max_error = args.max_error\r\n max_runs = args.max_runs\r\n trial_z = args.z0\r\n trial_x = args.x0\r\n trial_y = args.y0\r\n r_value = args.r_value\r\n step_mm = args.step_mm\r\n max_runs = args.max_runs\r\n l_value = args.l_value\r\n pass\r\n else: \r\n tower_flag = args.tower_flag\r\n firmFlag = args.firmFlag\r\n minterp = args.minterp\r\n bed_temp = args.bed_temp\r\n max_error = args.max_error\r\n max_runs = args.max_runs\r\n trial_z = args.z0\r\n trial_x = args.x0\r\n trial_y = args.y0\r\n r_value = args.r_value\r\n step_mm = args.step_mm\r\n max_runs = args.max_runs\r\n l_value = args.l_value\r\n \r\n if port:\r\n \r\n # Firmware\r\n if firmFlag == 0:\r\n print(\"Using Monoprice Firmware\\n\")\r\n elif firmFlag == 1:\r\n print(\"Using Marlin Firmware\\n\")\r\n \r\n # Tower Setup\r\n if tower_flag == 0:\r\n print(\"Stock Tower Setup (X opposite of LCD)\\n\")\r\n elif tower_flag == 1:\r\n print(\"Altered Tower Setup (Y opposite of LCD)\\n\")\r\n elif tower_flag == 2:\r\n print(\"Experimental Tower Setup (Z opposite of LCD)\\n\")\r\n \r\n #Set Bed Temperature\r\n if bed_temp >= 0:\r\n print ('Setting bed temperature to {0} C\\n'.format(str(bed_temp)))\r\n port.write('M140 S{0}\\n'.format(str(bed_temp)).encode())\r\n out = port.readline().decode()\r\n \r\n # Display interpolation methods\r\n if minterp == 1: \r\n print(\"Interpolation Method: Dennis's Spreadsheet\\n\")\r\n else:\r\n print(\"Interpolation Method: python3 scipy.interpolate.griddata\\n\")\r\n \r\n # Set the proper step/mm\r\n print ('Setting up M92 X{0} Y{0} Z{0}\\n'.format(str(step_mm)))\r\n port.write(('M92 X{0} Y{0} Z{0}\\n'.format(str(step_mm))).encode())\r\n out = port.readline().decode()\r\n \r\n print ('Setting up M665 L{0} R{1}\\n'.format(str(l_value),str(r_value)))\r\n port.write(('M665 L{0}\\n'.format(str(l_value))).encode())\r\n out = port.readline().decode()\r\n\r\n if firmFlag == 1:\r\n print ('Setting up M206 X0 Y0 Z0\\n')\r\n port.write('M206 X0 Y0 Z0\\n'.encode())\r\n out = port.readline().decode()\r\n \r\n print ('Clearing mesh with M421 C\\n')\r\n port.write('M421 C\\n'.encode())\r\n out = port.readline().decode()\r\n\r\n set_M_values(port, trial_z, trial_x, trial_y, l_value, r_value)\r\n\r\n print ('\\nStarting calibration')\r\n\r\n calibrated, new_z, new_x, new_y, new_l, new_r, xhigh, yhigh, zhigh = run_calibration(port, firmFlag, trial_x, trial_y, trial_z, l_value, r_value, xhigh, yhigh, zhigh, max_runs, args.max_error, bed_temp, minterp, tower_flag)\r\n\r\n port.close()\r\n\r\n if calibrated:\r\n if firmFlag == 1:\r\n print ('Run mesh bed leveling before printing: G29\\n')\r\n if args.file:\r\n data = {'z':new_z, 'x':new_x, 'y':new_y, 'r':new_r, 'l': new_l, 'step':step_mm, 'max_runs':max_runs, 'max_error':max_error, 'bed_temp':bed_temp}\r\n with open(args.file, \"w\") as text_file:\r\n text_file.write(json.dumps(data))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "scipy.interpolate.griddata", "numpy.array" ] ]
bagustris/dimensional-ser
[ "ce9bfae1d962b3581dd7022e4f145429615e2771" ]
[ "code/speech-text/cnn_lstm.py" ]
[ "# lstm_lstm: emotion recognition from speech= lstm, text=lstm\n# created for ATSIT paper 2020\n# coded by Bagus Tris Atmaja ([email protected])\n\nimport numpy as np\nimport pickle\nimport pandas as pd\n\nimport keras.backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, Dense, Masking, CuDNNLSTM, TimeDistributed, \\\n Bidirectional, Flatten, Convolution1D, \\\n Embedding, Dropout, Flatten, BatchNormalization, \\\n RNN, concatenate, Activation\nfrom keras.callbacks import EarlyStopping\nfrom keras.preprocessing import sequence\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nimport random as rn\nimport tensorflow as tf\n\nrn.seed(123)\nnp.random.seed(99)\ntf.set_random_seed(1234)\n\n# load feature and labels\nfeat = np.load('/home/s1820002/atsit/data/feat_34_hfs.npy')\nvad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')\n\n# reshap input for CNN\nfeat = feat.reshape(feat.shape[0], feat.shape[2], feat.shape[1])\n\n# remove outlier, < 1, > 5\nvad = np.where(vad==5.5, 5.0, vad)\nvad = np.where(vad==0.5, 1.0, vad)\n\n# standardization\nscaled_feature = False\n\n# text feature\npath = '/home/s1820002/IEMOCAP-Emotion-Detection/'\nx_train_text = np.load(path+'x_train_text.npy')\ng_word_embedding_matrix = np.load(path+'g_word_embedding_matrix.npy')\n\n# other parameters\nMAX_SEQUENCE_LENGTH = 554\nEMBEDDING_DIM = 300\nnb_words = 3438\n\n\n# set Dropout\ndo = 0.3\n\nif scaled_feature == True:\n scaler = StandardScaler()\n scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))\n scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))\n scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])\n feat = scaled_feat\nelse:\n feat = feat\n\nscaled_vad = True\n\n# standardization\nif scaled_vad:\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))\n scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))\n vad = scaled_vad \nelse:\n vad = vad\n\n# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics\ndef ccc(gold, pred):\n gold = K.squeeze(gold, axis=-1)\n pred = K.squeeze(pred, axis=-1)\n gold_mean = K.mean(gold, axis=-1, keepdims=True)\n pred_mean = K.mean(pred, axis=-1, keepdims=True)\n covariance = (gold-gold_mean)*(pred-pred_mean)\n gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)\n pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)\n ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())\n return ccc\n\n\ndef ccc_loss(gold, pred): \n # input (num_batches, seq_len, 1)\n ccc_loss = K.constant(1.) - ccc(gold, pred)\n return ccc_loss\n\n\n# API model, if use RNN, first two rnn layer must return_sequences=True\ndef model(alpha, beta, gamma):\n # speech network\n input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')\n net_speech = BatchNormalization()(input_speech)\n net_speech = Convolution1D(256, 4, activation='relu')(net_speech)\n net_speech = Convolution1D(256, 8, activation='relu')(net_speech)\n net_speech = Convolution1D(256, 12, activation='relu')(net_speech)\n net_speech = Flatten()(net_speech)\n model_speech = Dropout(0.3)(net_speech)\n \n #text network\n input_text = Input(shape=(MAX_SEQUENCE_LENGTH, ))\n net_text = Embedding(nb_words,\n EMBEDDING_DIM,\n weights = [g_word_embedding_matrix],\n trainable = True)(input_text)\n net_text = CuDNNLSTM(256, return_sequences=True)(net_text)\n net_text = CuDNNLSTM(256, return_sequences=True)(net_text)\n net_text = CuDNNLSTM(256, return_sequences=False)(net_text)\n net_text = Dense(64)(net_text)\n model_text = Dropout(0.3)(net_text)\n\n # combined model\n model_combined = concatenate([model_speech, model_text])\n model_combined = Dense(64, activation='relu')(model_combined)\n model_combined = Dense(32, activation='relu')(model_combined)\n model_combined = Dropout(0.4)(model_combined)\n target_names = ('v', 'a', 'd')\n model_combined = [Dense(1, name=name)(model_combined) for name in target_names]\n\n model = Model([input_speech, input_text], model_combined) \n model.compile(loss=ccc_loss,\n loss_weights={'v': alpha, 'a': beta, 'd': gamma},\n optimizer='rmsprop', metrics=[ccc])\n return model\n \nmodel = model(0.7,0.2,0.1)\nmodel.summary()\n\n# 7869 first data of session 5 (for LOSO)\nearlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,\n restore_best_weights=True)\nhist = model.fit([feat[:7869], x_train_text[:7869]], \n vad[:7869].T.tolist(), batch_size=256, #best:8\n validation_split=0.2, epochs=50, verbose=1, shuffle=True,\n callbacks=[earlystop])\nmetrik = model.evaluate([feat[7869:], x_train_text[7869:]], vad[7869:].T.tolist())\nprint(metrik)\n\n# save prediction, comment to avoid overwriting\npredict = model.predict([feat[6296:], x_train_text[6296:]], batch_size=8)\nnp.save('../../data/predict_cnn_lstm.npy', \n np.array(predict).reshape(3, 3743).T)\n" ]
[ [ "tensorflow.set_random_seed", "numpy.array", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "numpy.load", "numpy.where", "sklearn.preprocessing.MinMaxScaler" ] ]
BoyuanChen/neural-state-variables
[ "10483d93ac8c006f3786c434fb57d70d9ab465ec" ]
[ "analysis/intrinsic_dimension_estimation/__init__.py" ]
[ "import numpy as np\nfrom .methods import *\n\nclass ID_Estimator:\n def __init__(self, method='Levina_Bickel'):\n self.all_methods = ['Levina_Bickel', 'MiND_ML', 'MiND_KL', 'Hein', 'CD']\n self.set_method(method)\n \n def set_method(self, method='Levina_Bickel'):\n if method not in self.all_methods:\n assert False, 'Unknown method!'\n else:\n self.method = method\n \n def fit(self, X, k_list=20, n_jobs=4):\n if self.method in ['Hein', 'CD']:\n dim_Hein, dim_CD = Hein_CD(X)\n return dim_Hein if self.method=='Hein' else dim_CD\n else:\n if np.isscalar(k_list):\n k_list = np.array([k_list])\n else:\n k_list = np.array(k_list)\n kmax = np.max(k_list) + 2\n dists, inds = kNN(X, kmax, n_jobs)\n dims = []\n for k in k_list:\n if self.method == 'Levina_Bickel':\n dims.append(Levina_Bickel(X, dists, k))\n elif self.method == 'MiND_ML':\n dims.append(MiND_ML(X, dists, k))\n elif self.method == 'MiND_KL':\n dims.append(MiND_KL(X, dists, k))\n else:\n pass\n if len(dims) == 1:\n return dims[0]\n else:\n return np.array(dims)\n \n def fit_all_methods(self, X, k_list=[20], n_jobs=4):\n k_list = np.array(k_list)\n kmax = np.max(k_list) + 2\n dists, inds = kNN(X, kmax, n_jobs)\n dim_all_methods = {method:[] for method in self.all_methods}\n dim_all_methods['Hein'], dim_all_methods['CD'] = Hein_CD(X)\n for k in k_list:\n dim_all_methods['Levina_Bickel'].append(Levina_Bickel(X, dists, k))\n dim_all_methods['MiND_ML'].append(MiND_ML(X, dists, k))\n dim_all_methods['MiND_KL'].append(MiND_KL(X, dists, k))\n for method in self.all_methods:\n dim_all_methods[method] = np.array(dim_all_methods[method])\n return dim_all_methods" ]
[ [ "numpy.max", "numpy.array", "numpy.isscalar" ] ]
interaction-lab/RE-BT-Espresso
[ "03ee50af3728a07ced5da00b38124e41994facbc" ]
[ "BehaviorTreeDev/2_UpsampleData.py" ]
[ "import pandas as pd\nimport argparse\nfrom imblearn.over_sampling import SVMSMOTE\nimport numpy as np\nimport os\nimport shutil\nimport argparse\nimport json\nfrom json_manager import JsonManager\nimport pipeline_constants as constants\n\n\ndef process_command_line_args():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-c\", \"--config\", required=True,\n help=\"Full path to json config file, relative paths work as well\")\n ap.add_argument(\"-o\", \"--outputlog\", required=True,\n help=\"Path to log file\")\n args = vars(ap.parse_args())\n return args[\"config\"], args[\"outputlog\"]\n\n\ndef run_upsample(json_file_path, fmt_file_path):\n json_manager = JsonManager(json_file_path)\n\n if json_manager.get_upsample_status() == True:\n print(f\"Upsampling started using {json_file_path} and {fmt_file_path}\")\n upsampled_path = json_manager.get_upsampled_path()\n constants.remove_folder_if_exists(\n constants.UPSAMPLED_CSV_FOLDER_NAME, upsampled_path)\n print(\"Upsampling\")\n\n hot_encoded_folder = os.fsdecode(os.path.join(\n json_manager.get_hot_encoded_path(),\n constants.HOT_ENCODED_CSV_FOLDER_NAME))\n\n hot_encoded_file = os.fsdecode(os.path.join(\n hot_encoded_folder,\n constants.HOT_ENCODED_CSV_FILENAME))\n\n hotEncoded_data = pd.read_csv(hot_encoded_file)\n features_data = pd.read_csv(hot_encoded_file,\n usecols=list(hotEncoded_data.columns)[:-1]) # everything except label\n labels_data = pd.read_csv(hot_encoded_file,\n usecols=[list(hotEncoded_data.columns)[-1]]) # label\n\n sm = SVMSMOTE(random_state=json_manager.get_random_state())\n X_res, y_res = sm.fit_resample(features_data, labels_data)\n csv_ready = np.append(X_res, y_res, axis=constants.COLUMN_AXIS)\n\n upsampled_folder = constants.add_folder_to_directory(\n constants.UPSAMPLED_CSV_FOLDER_NAME, upsampled_path)\n\n upsampled_file_path = os.fsdecode(os.path.join(\n upsampled_folder, constants.UPSAMPLED_CSV_FILENAME))\n\n if os.path.exists(upsampled_file_path):\n os.remove(upsampled_file_path)\n\n f = open(fmt_file_path, \"r\")\n fmt = f.readline()\n f.close()\n\n header = ','.join(str(i) for i in hotEncoded_data.columns)\n np.savetxt(upsampled_file_path, csv_ready,\n fmt=fmt,\n delimiter=constants.CSV_DELIMITER,\n header=header,\n comments='')\n print(f\"Upsampling finished, results in {upsampled_file_path}\")\n\n\ndef main():\n json_file_path, fmt_file_path = process_command_line_args()\n run_upsample(json_file_path, fmt_file_path)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "numpy.savetxt", "numpy.append" ] ]
HKUST-KnowComp/GeoAlign
[ "3f29cfb911476f1555b66d4d39561df29342895a" ]
[ "poincare-embeddings/hype/lorentz.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\nimport logging\n\nimport torch as th\nfrom torch.autograd import Function\nfrom .common import acosh\nfrom .manifold import Manifold\n\nclass LorentzManifold(Manifold):\n __slots__ = [\"eps\", \"_eps\", \"norm_clip\", \"max_norm\", \"debug\"]\n\n @staticmethod\n def dim(dim):\n return dim + 1\n\n def __init__(self, eps=1e-12, _eps=1e-5, norm_clip=1, max_norm=1e6,\n debug=False, **kwargs):\n self.eps = eps\n self._eps = _eps\n self.norm_clip = norm_clip\n self.max_norm = max_norm\n self.debug = debug\n\n @staticmethod\n def ldot(u, v, keepdim=False):\n \"\"\"Lorentzian Scalar Product\"\"\"\n uv = u * v\n uv.narrow(-1, 0, 1).mul_(-1)\n return th.sum(uv, dim=-1, keepdim=keepdim)\n\n def to_poincare_ball(self, u):\n x = u.clone()\n d = x.size(-1) - 1\n return x.narrow(-1, 1, d) / (x.narrow(-1, 0, 1) + 1)\n\n def distance(self, u, v):\n d = -LorentzDot.apply(u, v)\n d.data.clamp_(min=1)\n return acosh(d, self._eps)\n\n def pnorm(self, u):\n return th.sqrt(th.sum(th.pow(self.to_poincare_ball(u), 2), dim=-1))\n\n def normalize(self, w):\n \"\"\"Normalize vector such that it is located on the hyperboloid\"\"\"\n d = w.size(-1) - 1\n narrowed = w.narrow(-1, 1, d)\n if self.max_norm:\n narrowed.view(-1, d).renorm_(p=2, dim=0, maxnorm=self.max_norm)\n tmp = 1 + th.sum(th.pow(narrowed, 2), dim=-1, keepdim=True)\n tmp.sqrt_()\n w.narrow(-1, 0, 1).copy_(tmp)\n return w\n\n def normalize_tan(self, x_all, v_all):\n d = v_all.size(1) - 1\n x = x_all.narrow(1, 1, d)\n xv = th.sum(x * v_all.narrow(1, 1, d), dim=1, keepdim=True)\n tmp = 1 + th.sum(th.pow(x_all.narrow(1, 1, d), 2), dim=1, keepdim=True)\n tmp.sqrt_().clamp_(min=self._eps)\n v_all.narrow(1, 0, 1).copy_(xv / tmp)\n return v_all\n\n def init_weights(self, w, irange=1e-5):\n w.data.uniform_(-irange, irange)\n w.data.copy_(self.normalize(w.data))\n\n def rgrad(self, p, d_p):\n \"\"\"Riemannian gradient for hyperboloid\"\"\"\n if d_p.is_sparse:\n u = d_p._values()\n x = p.index_select(0, d_p._indices().squeeze())\n else:\n u = d_p\n x = p\n u.narrow(-1, 0, 1).mul_(-1)\n u.addcmul_(self.ldot(x, u, keepdim=True).expand_as(x), x)\n return d_p\n\n def expm(self, p, d_p, lr=None, out=None, normalize=False):\n \"\"\"Exponential map for hyperboloid\"\"\"\n logger = logging.getLogger(__name__)\n if out is None:\n out = p\n if d_p.is_sparse:\n ix, d_val = d_p._indices().squeeze(), d_p._values()\n # This pulls `ix` out of the original embedding table, which could\n # be in a corrupted state. normalize it to fix it back to the\n # surface of the hyperboloid...\n # TODO: we should only do the normalize if we know that we are\n # training with multiple threads, otherwise this is a bit wasteful\n p_val = self.normalize(p.index_select(0, ix))\n ldv = self.ldot(d_val, d_val, keepdim=True)\n if self.debug:\n # assert all(ldv > 0), \"Tangent norm must be greater 0\"\n assert all(ldv == ldv), \"Tangent norm includes NaNs\"\n nd_p = ldv.clamp_(min=0).sqrt_()\n t = th.clamp(nd_p, max=self.norm_clip)\n nd_p.clamp_(min=self.eps)\n newp = (th.cosh(t) * p_val).addcdiv_(th.sinh(t) * d_val, nd_p)\n if normalize:\n newp = self.normalize(newp)\n p.index_copy_(0, ix, newp)\n else:\n if lr is not None:\n d_p.narrow(-1, 0, 1).mul_(-1)\n d_p.addcmul_((self.ldot(p, d_p, keepdim=True)).expand_as(p), p)\n d_p.mul_(-lr)\n ldv = self.ldot(d_p, d_p, keepdim=True)\n if self.debug:\n if ldv > 0:\n logger.warning(\"Tangent norm is not greater 0\")\n assert all(ldv == ldv), \"Tangent norm includes NaNs\"\n nd_p = ldv.clamp_(min=0).sqrt_()\n t = th.clamp(nd_p, max=self.norm_clip)\n nd_p.clamp_(min=self.eps)\n newp = (th.cosh(t) * p).addcdiv_(th.sinh(t) * d_p, nd_p)\n if normalize:\n newp = self.normalize(newp)\n p.copy_(newp)\n\n def logm(self, x, y):\n \"\"\"Logarithmic map on the Lorenz Manifold\"\"\"\n xy = th.clamp(self.ldot(x, y).unsqueeze(-1), max=-1)\n v = acosh(-xy, self.eps).div_(\n th.clamp(th.sqrt(xy * xy - 1), min=self._eps)\n ) * th.addcmul(y, xy, x)\n return self.normalize_tan(x, v)\n\n def ptransp(self, x, y, v, ix=None, out=None):\n \"\"\"Parallel transport for hyperboloid\"\"\"\n if ix is not None:\n v_ = v\n x_ = x.index_select(0, ix)\n y_ = y.index_select(0, ix)\n elif v.is_sparse:\n ix, v_ = v._indices().squeeze(), v._values()\n x_ = x.index_select(0, ix)\n y_ = y.index_select(0, ix)\n else:\n raise NotImplementedError\n xy = self.ldot(x_, y_, keepdim=True).expand_as(x_)\n vy = self.ldot(v_, y_, keepdim=True).expand_as(x_)\n vnew = v_ + vy / (1 - xy) * (x_ + y_)\n if out is None:\n return vnew\n else:\n out.index_copy_(0, ix, vnew)\n\n\nclass LorentzDot(Function):\n @staticmethod\n def forward(ctx, u, v):\n ctx.save_for_backward(u, v)\n return LorentzManifold.ldot(u, v)\n\n @staticmethod\n def backward(ctx, g):\n u, v = ctx.saved_tensors\n g = g.unsqueeze(-1).expand_as(u).clone()\n g.narrow(-1, 0, 1).mul_(-1)\n return g * v, g * u\n" ]
[ [ "torch.sqrt", "torch.cosh", "torch.clamp", "torch.sum", "torch.sinh", "torch.addcmul", "torch.pow" ] ]
ipl31/fastparquet
[ "fe8dc1b05044f0608fd4fd87c60606b49a0c0e5f" ]
[ "fastparquet/test/test_api.py" ]
[ "# -*- coding: utf-8 -*-\nimport io\nimport os\nimport subprocess\nimport sys\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\ntry:\n from pandas.tslib import Timestamp\nexcept ImportError:\n from pandas import Timestamp\nimport pytest\n\nfrom fastparquet.test.util import tempdir\nimport fastparquet\nfrom fastparquet import write, ParquetFile\nfrom fastparquet.api import statistics, sorted_partitioned_columns, filter_in, filter_not_in\nfrom fastparquet.util import join_path\n\nTEST_DATA = \"test-data\"\nWIN = os.name == 'nt'\n\n\[email protected](reason=\"new numpy\")\ndef test_import_without_warning():\n # in a subprocess to avoid import chacing issues.\n subprocess.check_call([sys.executable, \"-Werror\", \"-c\", \"import fastparquet\"])\n\n\ndef test_statistics(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3],\n 'y': [1.0, 2.0, 1.0],\n 'z': ['a', 'b', 'c']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n expected = {'distinct_count': {'x': [None, None],\n 'y': [None, None],\n 'z': [None, None]},\n 'max': {'x': [2, 3], 'y': [2.0, 1.0], 'z': ['b', 'c']},\n 'min': {'x': [1, 3], 'y': [1.0, 1.0], 'z': ['a', 'c']},\n 'null_count': {'x': [0, 0], 'y': [0, 0], 'z': [0, 0]}}\n\n assert s == expected\n\n\ndef test_logical_types(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n\n assert isinstance(s['min']['D'][0], (np.datetime64, Timestamp))\n\n\ndef test_text_schema(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n p = ParquetFile(fn)\n t = p.schema.text\n expected = ('- schema: \\n'\n '| - A: DOUBLE, OPTIONAL\\n'\n '| - B: DOUBLE, OPTIONAL\\n'\n '| - C: BYTE_ARRAY, UTF8, OPTIONAL\\n'\n ' - D: INT64, TIMESTAMP_MICROS, OPTIONAL')\n assert t == expected\n assert repr(p.schema) == \"<Parquet Schema with 5 entries>\"\n\n\ndef test_empty_statistics(tempdir):\n p = ParquetFile(os.path.join(TEST_DATA, \"nation.impala.parquet\"))\n\n s = statistics(p)\n assert s == {'distinct_count': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'max': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'min': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]},\n 'null_count': {'n_comment': [None],\n 'n_name': [None],\n 'n_nationkey': [None],\n 'n_regionkey': [None]}}\n\n\ndef test_sorted_row_group_columns(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'v': [{'a': 0}, {'b': -1}, {'c': 5}, {'a': 0}],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], object_encoding={'v': 'json',\n 'z': 'utf8'})\n\n pf = ParquetFile(fn)\n\n # string stats should be stored without byte-encoding\n zcol = [c for c in pf.row_groups[0].columns\n if c.meta_data.path_in_schema == ['z']][0]\n assert zcol.meta_data.statistics.min == b'a'\n\n result = sorted_partitioned_columns(pf)\n expected = {'x': {'min': [1, 3], 'max': [2, 4]},\n 'z': {'min': ['a', 'c'], 'max': ['b', 'd']}}\n\n # NB column v should not feature, as dict are unorderable\n assert result == expected\n\n\ndef test_sorted_row_group_columns_with_filters(tempdir):\n dd = pytest.importorskip('dask.dataframe')\n # create dummy dataframe\n df = pd.DataFrame({'unique': [0, 0, 1, 1, 2, 2, 3, 3],\n 'id': ['id1', 'id2',\n 'id1', 'id2',\n 'id1', 'id2',\n 'id1', 'id2']},\n index=[0, 0, 1, 1, 2, 2, 3, 3])\n df = dd.from_pandas(df, npartitions=2)\n fn = os.path.join(tempdir, 'foo.parquet')\n df.to_parquet(fn,\n engine='fastparquet',\n partition_on=['id'])\n # load ParquetFile\n pf = ParquetFile(fn)\n filters = [('id', '==', 'id1')]\n\n # without filters no columns are sorted\n result = sorted_partitioned_columns(pf)\n expected = {}\n assert result == expected\n\n # with filters both columns are sorted\n result = sorted_partitioned_columns(pf, filters=filters)\n expected = {'__null_dask_index__': {'min': [0, 2], 'max': [1, 3]},\n 'unique': {'min': [0, 2], 'max': [1, 3]}}\n assert result == expected\n\n\ndef test_iter(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], write_index=True)\n pf = ParquetFile(fn)\n out = iter(pf.iter_row_groups(index='index'))\n d1 = next(out)\n pd.testing.assert_frame_equal(d1, df[:2])\n d2 = next(out)\n pd.testing.assert_frame_equal(d2, df[2:])\n with pytest.raises(StopIteration):\n next(out)\n\n\ndef test_pickle(tempdir):\n import pickle\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], write_index=True)\n pf = ParquetFile(fn)\n pf2 = pickle.loads(pickle.dumps(pf))\n assert pf.to_pandas().equals(pf2.to_pandas())\n\n\ndef test_directory_local(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write(os.path.join(tempdir, 'foo1.parquet'), df)\n write(os.path.join(tempdir, 'foo2.parquet'), df)\n pf = ParquetFile(tempdir)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n\ndef test_directory_error(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write(os.path.join(tempdir, 'foo1.parquet'), df)\n write(os.path.join(tempdir, 'foo2.parquet'), df)\n with pytest.raises(ValueError, match=\"fsspec\"):\n ParquetFile(tempdir, open_with=lambda *args: open(*args))\n\n\ndef test_directory_mem():\n import fsspec\n m = fsspec.filesystem(\"memory\")\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write('/dir/foo1.parquet', df, open_with=m.open)\n write('/dir/foo2.parquet', df, open_with=m.open)\n\n # inferred FS\n pf = ParquetFile(\"/dir\", open_with=m.open)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n # inferred FS\n pf = ParquetFile(\"/dir/*\", open_with=m.open)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n\n # explicit FS\n pf = ParquetFile(\"/dir\", fs=m)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n m.store.clear()\n\n\ndef test_directory_mem_nest():\n import fsspec\n m = fsspec.filesystem(\"memory\")\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n df.index.name = 'index'\n write('/dir/field=a/foo1.parquet', df, open_with=m.open)\n write('/dir/field=b/foo2.parquet', df, open_with=m.open)\n\n pf = ParquetFile(\"/dir\", fs=m)\n assert pf.info['rows'] == 8\n assert pf.to_pandas()['z'].tolist() == ['a', 'b', 'c', 'd'] * 2\n assert pf.to_pandas()['field'].tolist() == ['a'] * 4 + ['b'] * 4\n\n\ndef test_attributes(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n pf = ParquetFile(fn)\n assert pf.columns == ['x', 'y', 'z']\n assert len(pf.row_groups) == 2\n assert pf.count() == 4\n assert join_path(fn) == pf.info['name']\n assert join_path(fn) in str(pf)\n for col in df:\n assert pf.dtypes[col] == df.dtypes[col]\n\n\ndef test_open_standard(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], file_scheme='hive',\n open_with=open)\n pf = ParquetFile(fn, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df)\n\n\ndef test_filelike(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3, 4],\n 'y': [1.0, 2.0, 1.0, 2.0],\n 'z': ['a', 'b', 'c', 'd']})\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2])\n with open(fn, 'rb') as f:\n pf = ParquetFile(f, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df)\n\n b = io.BytesIO(open(fn, 'rb').read())\n pf = ParquetFile(b, open_with=open)\n d2 = pf.to_pandas()\n pd.testing.assert_frame_equal(d2, df)\n\n\ndef test_cast_index(tempdir):\n df = pd.DataFrame({'i8': np.array([1, 2, 3, 4], dtype='uint8'),\n 'i16': np.array([1, 2, 3, 4], dtype='int16'),\n 'i32': np.array([1, 2, 3, 4], dtype='int32'),\n 'i64': np.array([1, 2, 3, 4], dtype='int64'),\n 'f16': np.array([1, 2, 3, 4], dtype='float16'),\n 'f32': np.array([1, 2, 3, 4], dtype='float32'),\n 'f64': np.array([1, 2, 3, 4], dtype='float64'),\n })\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n pf = ParquetFile(fn)\n for col in ['i32']: #list(df):\n d = pf.to_pandas(index=col)\n if d.index.dtype.kind == 'i':\n assert d.index.dtype == 'int64'\n elif d.index.dtype.kind == 'u':\n assert d.index.dtype == 'uint64'\n else:\n assert d.index.dtype == 'float64'\n print(col, (d.index == df[col]).all())\n\n # assert (d.index == df[col]).all()\n\n\ndef test_zero_child_leaf(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3]})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n\n pf = ParquetFile(fn)\n assert pf.columns == ['x']\n\n pf._schema[1].num_children = 0\n assert pf.columns == ['x']\n\n\ndef test_request_nonexistent_column(tempdir):\n df = pd.DataFrame({'x': [1, 2, 3]})\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df)\n\n pf = ParquetFile(fn)\n with pytest.raises(ValueError):\n pf.to_pandas(columns=['y'])\n\n\ndef test_read_multiple_no_metadata(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5]})\n write(tempdir, df, file_scheme='hive', row_group_offsets=[0, 2])\n os.unlink(os.path.join(tempdir, '_metadata'))\n os.unlink(os.path.join(tempdir, '_common_metadata'))\n import glob\n flist = list(sorted(glob.glob(os.path.join(tempdir, '*'))))\n pf = ParquetFile(flist)\n assert len(pf.row_groups) == 2\n out = pf.to_pandas()\n pd.testing.assert_frame_equal(out, df)\n\n\ndef test_single_upper_directory(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5], 'y': ['aa'] * 4})\n write(tempdir, df, file_scheme='hive', partition_on='y')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert (out.y == 'aa').all()\n\n os.unlink(os.path.join(tempdir, '_metadata'))\n os.unlink(os.path.join(tempdir, '_common_metadata'))\n import glob\n flist = list(sorted(glob.glob(os.path.join(tempdir, '*/*'))))\n pf = ParquetFile(flist, root=tempdir)\n assert pf.fn == join_path(os.path.join(tempdir, '_metadata'))\n out = pf.to_pandas()\n assert (out.y == 'aa').all()\n\n\ndef test_numerical_partition_name(tempdir):\n df = pd.DataFrame({'x': [1, 5, 2, 5], 'y1': ['aa', 'aa', 'bb', 'aa']})\n write(tempdir, df, file_scheme='hive', partition_on=['y1'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out[out.y1 == 'aa'].x.tolist() == [1, 5, 5]\n assert out[out.y1 == 'bb'].x.tolist() == [2]\n\n\ndef test_floating_point_partition_name(tempdir):\n df = pd.DataFrame({'x': [1e99, 5e-10, 2e+2, -0.1], 'y1': ['aa', 'aa', 'bb', 'aa']})\n write(tempdir, df, file_scheme='hive', partition_on=['y1'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out[out.y1 == 'aa'].x.tolist() == [1e99, 5e-10, -0.1]\n assert out[out.y1 == 'bb'].x.tolist() == [200.0]\n\n\[email protected](WIN, reason=\"path contains ':'\")\ndef test_datetime_partition_names(tempdir):\n dates = pd.to_datetime(['2015-05-09', '2018-10-15', '2020-10-17', '2015-05-09'])\n df = pd.DataFrame({\n 'date': dates,\n 'x': [1, 5, 2, 5]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert set(out.date.tolist()) == set(dates.tolist())\n assert out[out.date == '2015-05-09'].x.tolist() == [1, 5]\n assert out[out.date == '2020-10-17'].x.tolist() == [2]\n\n\ndef test_string_partition_names(tempdir):\n date_strings = ['2015-05-09', '2018-10-15', '2020-10-17', '2015-05-09']\n df = pd.DataFrame({\n 'date': date_strings,\n 'x': [1, 5, 2, 5]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert set(out.date.tolist()) == set(date_strings)\n assert out[out.date == '2015-05-09'].x.tolist() == [1, 5]\n assert out[out.date == '2020-10-17'].x.tolist() == [2]\n\n\[email protected]('partitions', [['2017-01-05', '1421'], ['0.7', '10']])\ndef test_mixed_partition_types(tempdir, partitions):\n df = pd.DataFrame({\n 'partitions': partitions,\n 'x': [1, 2]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['partitions'])\n out = ParquetFile(tempdir).to_pandas()\n assert (out.sort_values(\"x\").set_index(\"x\").partitions == df.sort_values(\"x\").set_index(\"x\").partitions).all()\n\n\ndef test_filter_without_paths(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'letter': ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n })\n write(fn, df)\n\n pf = ParquetFile(fn)\n out = pf.to_pandas(filters=[['x', '>', 3]])\n pd.testing.assert_frame_equal(out, df)\n out = pf.to_pandas(filters=[['x', '>', 30]])\n assert len(out) == 0\n\n\ndef test_filter_special(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'symbol': ['NOW', 'OI', 'OI', 'OI', 'NOW', 'NOW', 'OI']\n })\n write(tempdir, df, file_scheme='hive', partition_on=['symbol'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('symbol', '==', 'NOW')])\n assert out.x.tolist() == [1, 5, 6]\n assert out.symbol.tolist() == ['NOW', 'NOW', 'NOW']\n\n\ndef test_filter_dates(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n 'date': [\n '2015-05-09', '2017-05-15', '2017-05-14',\n '2017-05-13', '2015-05-10', '2015-05-11', '2017-05-12'\n ]\n })\n write(tempdir, df, file_scheme='hive', partition_on=['date'])\n pf = ParquetFile(tempdir)\n out_1 = pf.to_pandas(filters=[('date', '>', '2017-01-01')])\n\n assert set(out_1.x.tolist()) == {2, 3, 4, 7}\n expected_dates = set(['2017-05-15', '2017-05-14', '2017-05-13', '2017-05-12'])\n assert set(out_1.date.tolist()) == expected_dates\n\n out_2 = pf.to_pandas(filters=[('date', '==', pd.to_datetime('may 9 2015'))])\n assert out_2.x.tolist() == [1]\n assert out_2.date.tolist() == ['2015-05-09']\n\n\ndef test_in_filter(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['symbols'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('symbols', 'in', ['a', 'c'])])\n assert set(out.symbols) == {'a', 'c'}\n\n\ndef test_partition_columns(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['symbols'])\n pf = ParquetFile(tempdir)\n\n # partition columns always come after actual columns\n assert pf.to_pandas().columns.tolist() == ['values', 'symbols']\n assert pf.to_pandas(columns=['symbols']).columns.tolist() == ['symbols']\n assert pf.to_pandas(columns=['values']).columns.tolist() == ['values']\n assert pf.to_pandas(columns=[]).columns.tolist() == []\n\n\ndef test_in_filter_numbers(tempdir):\n symbols = ['a', 'a', 'b', 'c', 'c', 'd']\n values = [1, 2, 3, 4, 5, 6]\n df = pd.DataFrame(data={'symbols': symbols, 'values': values})\n write(tempdir, df, file_scheme='hive', partition_on=['values'])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('values', 'in', ['1', '4'])])\n assert set(out.symbols) == {'a', 'c'}\n out = pf.to_pandas(filters=[('values', 'in', [1, 4])])\n assert set(out.symbols) == {'a', 'c'}\n\n\ndef test_filter_stats(tempdir):\n df = pd.DataFrame({\n 'x': [1, 2, 3, 4, 5, 6, 7],\n })\n write(tempdir, df, file_scheme='hive', row_group_offsets=[0, 4])\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(filters=[('x', '>=', 5)])\n assert out.x.tolist() == [5, 6, 7]\n\n\[email protected](\"vals,vmin,vmax,expected_in, expected_not_in\", [\n # no stats\n ([3, 6], None, None, False, False),\n\n # unique values\n ([3, 6], 3, 3, False, True),\n ([3, 6], 2, 2, True, False),\n\n # open-ended intervals\n ([3, 6], None, 7, False, False),\n ([3, 6], None, 2, True, False),\n ([3, 6], 2, None, False, False),\n ([3, 6], 7, None, True, False),\n\n # partial matches\n ([3, 6], 2, 4, False, False),\n ([3, 6], 5, 6, False, True),\n ([3, 6], 2, 3, False, True),\n ([3, 6], 6, 7, False, True),\n\n # non match\n ([3, 6], 1, 2, True, False),\n ([3, 6], 7, 8, True, False),\n\n # spanning interval\n ([3, 6], 1, 8, False, False),\n\n # empty values\n ([], 1, 8, True, False),\n\n])\ndef test_in_filters(vals, vmin, vmax, expected_in, expected_not_in):\n assert filter_in(vals, vmin, vmax) == expected_in\n assert filter_in(list(reversed(vals)), vmin, vmax) == expected_in\n\n assert filter_not_in(vals, vmin, vmax) == expected_not_in\n assert filter_not_in(list(reversed(vals)), vmin, vmax) == expected_not_in\n\n\ndef test_in_filter_rowgroups(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({\n 'x': range(10),\n })\n write(fn, df, row_group_offsets=2)\n pf = ParquetFile(fn)\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [2])]))\n assert len(row_groups) == 1\n assert row_groups[0].x.tolist() == [2, 3]\n\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [9])]))\n assert len(row_groups) == 1\n assert row_groups[0].x.tolist() == [8, 9]\n\n row_groups = list(pf.iter_row_groups(filters=[('x', 'in', [2, 9])]))\n assert len(row_groups) == 2\n assert row_groups[0].x.tolist() == [2, 3]\n assert row_groups[1].x.tolist() == [8, 9]\n\n\ndef test_unexisting_filter_cols(tempdir):\n fn = os.path.join(tempdir, 'test.parq') \n df = pd.DataFrame({'a': range(5), 'b': [1, 1, 2, 2, 2]})\n write(fn, df, file_scheme='hive', partition_on='b')\n pf = ParquetFile(fn)\n with pytest.raises(ValueError, match=\"{'c'}.$\"):\n rec_df = ParquetFile(fn).to_pandas(filters=[(('a', '>=', 0),\n ('c', '==', 0),)])\n \n\ndef test_index_not_in_columns(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]}).set_index('a')\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(columns=['b'])\n assert out.index.tolist() == ['x', 'y', 'z']\n out = pf.to_pandas(columns=['b'], index=False)\n assert out.index.tolist() == [0, 1, 2]\n\n\ndef test_no_index_name(tempdir):\n df = pd.DataFrame({'__index_level_0__': ['x', 'y', 'z'],\n 'b': [4, 5, 6]}).set_index('__index_level_0__')\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out.index.name is None\n assert out.index.tolist() == ['x', 'y', 'z']\n\n df = pd.DataFrame({'__index_level_0__': ['x', 'y', 'z'],\n 'b': [4, 5, 6]})\n write(tempdir, df, file_scheme='hive')\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(index='__index_level_0__', columns=['b'])\n assert out.index.name is None\n assert out.index.tolist() == ['x', 'y', 'z']\n\n pf = ParquetFile(tempdir)\n out = pf.to_pandas()\n assert out.index.name is None\n assert out.index.tolist() == [0, 1, 2]\n\n\ndef test_input_column_list_not_mutated(tempdir):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n write(tempdir, df, file_scheme='hive')\n cols = ['a']\n pf = ParquetFile(tempdir)\n out = pf.to_pandas(columns=cols)\n assert cols == ['a']\n\n\ndef test_drill_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x')\n fn1 = os.path.join(dir1, 'part.0.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert out.dir0.tolist() == ['x'] * 3 + ['y'] * 3\n\n\ndef test_multi_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x')\n write(dir1, df, file_scheme='hive')\n dir2 = os.path.join(tempdir, 'y')\n write(dir2, df, file_scheme='hive')\n dir3 = os.path.join(tempdir, 'z', 'deep')\n write(dir3, df, file_scheme='hive')\n\n pf = ParquetFile([dir1, dir2])\n out = pf.to_pandas() # this version may have extra column!\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n pf = ParquetFile([dir1, dir2, dir3])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 3\n\n\ndef test_hive_and_drill_list(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x=0')\n fn1 = os.path.join(dir1, 'part.0.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert out.dir0.tolist() == ['x=0'] * 3 + ['y'] * 3\n\n\ndef test_bad_file_paths(tempdir):\n df = pd.DataFrame({'a': ['x', 'y', 'z'], 'b': [4, 5, 6]})\n dir1 = os.path.join(tempdir, 'x=0')\n fn1 = os.path.join(dir1, 'part.=.parquet')\n os.makedirs(dir1)\n write(fn1, df)\n dir2 = os.path.join(tempdir, 'y/z')\n fn2 = os.path.join(dir2, 'part.0.parquet')\n os.makedirs(dir2)\n write(fn2, df)\n\n pf = ParquetFile([fn1, fn2])\n assert pf.file_scheme == 'other'\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n assert 'dir0' not in out\n\n path1 = os.path.join(tempdir, 'data')\n fn1 = os.path.join(path1, 'out.parq')\n os.makedirs(path1)\n write(fn1, df)\n path2 = os.path.join(tempdir, 'data2')\n fn2 = os.path.join(path2, 'out.parq')\n os.makedirs(path2)\n write(fn2, df)\n pf = ParquetFile([fn1, fn2])\n out = pf.to_pandas()\n assert out.a.tolist() == ['x', 'y', 'z'] * 2\n\n\ndef test_compression_zstd(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"zstd\",\n \"args\": {\n \"level\": 5,\n }\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2)\n\n\ndef test_compression_lz4(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"lz4\",\n \"args\": {\n \"compression\": 5,\n \"store_size\": False,\n }\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2)\n\n\ndef test_compression_snappy(tempdir):\n df = pd.DataFrame(\n {\n 'x': np.arange(1000),\n 'y': np.arange(1, 1001),\n 'z': np.arange(2, 1002),\n }\n )\n\n fn = os.path.join(tempdir, 'foocomp.parquet')\n\n c = {\n \"x\": {\n \"type\": \"gzip\",\n \"args\": {\n \"compresslevel\": 5,\n }\n },\n \"y\": {\n \"type\": \"snappy\",\n \"args\": None\n },\n \"_default\": {\n \"type\": \"gzip\",\n \"args\": None\n }\n }\n write(fn, df, compression=c)\n\n p = ParquetFile(fn)\n\n df2 = p.to_pandas()\n\n pd.testing.assert_frame_equal(df, df2)\n\n\ndef test_int96_stats(tempdir):\n df = pd.util.testing.makeMixedDataFrame()\n\n fn = os.path.join(tempdir, 'foo.parquet')\n write(fn, df, row_group_offsets=[0, 2], times='int96')\n\n p = ParquetFile(fn)\n\n s = statistics(p)\n assert isinstance(s['min']['D'][0], (np.datetime64, Timestamp))\n assert 'D' in sorted_partitioned_columns(p)\n\n\ndef test_only_partition_columns(tempdir):\n df = pd.DataFrame({'a': np.random.rand(20),\n 'b': np.random.choice(['hi', 'ho'], size=20),\n 'c': np.random.choice(['a', 'b'], size=20)})\n write(tempdir, df, file_scheme='hive', partition_on=['b'])\n pf = ParquetFile(tempdir)\n df2 = pf.to_pandas(columns=['b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n write(tempdir, df, file_scheme='hive', partition_on=['a', 'b'])\n pf = ParquetFile(tempdir)\n df2 = pf.to_pandas(columns=['a', 'b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n df2 = pf.to_pandas(columns=['b'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n df2 = pf.to_pandas(columns=['b', 'c'])\n df.b.value_counts().to_dict() == df2.b.value_counts().to_dict()\n\n with pytest.raises(ValueError):\n # because this leaves no data to write\n write(tempdir, df[['b']], file_scheme='hive', partition_on=['b'])\n\n\ndef test_path_containing_metadata_df():\n p = ParquetFile(os.path.join(TEST_DATA, \"dir_metadata\", \"empty.parquet\"))\n df = p.to_pandas()\n assert list(p.columns) == ['a', 'b', 'c', '__index_level_0__']\n assert len(df) == 0\n\n\ndef test_empty_df():\n p = ParquetFile(os.path.join(TEST_DATA, \"empty.parquet\"))\n df = p.to_pandas()\n assert list(p.columns) == ['a', 'b', 'c', '__index_level_0__']\n assert len(df) == 0\n\n\ndef test_unicode_cols(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n df = pd.DataFrame({u\"région\": [1, 2, 3]})\n write(fn, df)\n pf = ParquetFile(fn)\n pf.to_pandas()\n\n\ndef test_multi_cat(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df['a'] = df.a.astype('category')\n df['b'] = df.b.astype('category')\n df = df.set_index(['a', 'b'])\n write(fn, df)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert df1.equals(df)\n assert df1.loc[1, 'a'].equals(df.loc[1, 'a'])\n\n\ndef test_multi_cat_single(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df)\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert df1.equals(df)\n assert df1.loc[1, 'a'].equals(df.loc[1, 'a'])\n\n\ndef test_multi_cat_split(tempdir):\n # like test above, but across multiple row-groups; we test that the\n # categories are consistent\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df, row_group_offsets=25)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert df1.equals(df)\n assert df1.loc[1, 'a'].equals(df.loc[1, 'a'])\n\n\ndef test_multi(tempdir):\n fn = os.path.join(tempdir, 'test.parq')\n N = 200\n df = pd.DataFrame(\n {'a': np.random.randint(10, size=N),\n 'b': np.random.choice(['a', 'b', 'c'], size=N),\n 'c': np.arange(200)})\n df = df.set_index(['a', 'b'])\n write(fn, df)\n\n pf = ParquetFile(fn)\n df1 = pf.to_pandas()\n assert df1.equals(df)\n assert df1.loc[1, 'a'].equals(df.loc[1, 'a'])\n\n\ndef test_simple_nested():\n fn = os.path.join(TEST_DATA, 'nested1.parquet')\n pf = ParquetFile(fn)\n assert len(pf.dtypes) == 5\n out = pf.to_pandas()\n assert len(out.columns) == 5\n assert '_adobe_corpnew' not in out.columns\n assert all('_adobe_corpnew' + '.' in c for c in out.columns)\n\n\ndef test_pandas_metadata_inference():\n fn = os.path.join(TEST_DATA, 'metas.parq')\n df = ParquetFile(fn).to_pandas()\n assert df.columns.name == 'colindex'\n assert df.index.name == 'rowindex'\n assert df.index.tolist() == [2, 3]\n\n df = ParquetFile(fn).to_pandas(index='a')\n assert df.index.name == 'a'\n assert df.columns.name == 'colindex'\n\n df = ParquetFile(fn).to_pandas(index=False)\n assert df.index.tolist() == [0, 1]\n assert df.index.name is None\n\n\ndef test_write_index_false(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n df = pd.DataFrame(0, columns=['a'], index=range(1, 3))\n write(fn, df, write_index=False)\n rec_df = ParquetFile(fn).to_pandas()\n assert rec_df.index[0] == 0\n\n\ndef test_timestamp_filer(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n ts = [pd.Timestamp('2021/01/01 08:00:00'),\n pd.Timestamp('2021/01/05 10:00:00')]\n val = [10, 34]\n df = pd.DataFrame({'val': val, 'ts': ts})\n # two row-groups\n write(fn, df, row_group_offsets=1, file_scheme='hive')\n\n ts_filter = pd.Timestamp('2021/01/03 00:00:00')\n pf = ParquetFile(fn)\n filt = [[('ts', '<', ts_filter)], [('ts', '>=', ts_filter)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [10, 34]\n\n filt = [[('ts', '>=', ts_filter)], [('ts', '<', ts_filter)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [10, 34]\n\n ts_filter_down = pd.Timestamp('2021/01/03 00:00:00')\n ts_filter_up = pd.Timestamp('2021/01/06 00:00:00')\n # AND filter\n filt = [[('ts', '>=', ts_filter_down), ('ts', '<', ts_filter_up)]]\n assert pf.to_pandas(filters=filt).val.tolist() == [34]\n\n\[email protected](condition=fastparquet.writer.DATAPAGE_VERSION == 2, reason=\"not implemented\")\ndef test_row_filter(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n df = pd.DataFrame({\n 'a': ['o'] * 10 + ['i'] * 5,\n 'b': range(15)\n })\n write(fn, df, row_group_offsets=8)\n pf = ParquetFile(fn)\n assert pf.count(filters=[[\"a\", \"==\", \"o\"]]) == 15\n assert pf.count(filters=[[\"a\", \"==\", \"o\"]], row_filter=True) == 10\n assert pf.count(filters=[[\"a\", \"==\", \"i\"]], row_filter=True) == 5\n assert pf.count(filters=[[\"b\", \"in\", [1, 3, 4]]]) == 8\n assert pf.count(filters=[[\"b\", \"in\", [1, 3, 4]]], row_filter=True) == 3\n assert pf.to_pandas(filters=[[\"b\", \"in\", [1, 3, 4]]], row_filter=True\n ).b.tolist() == [1, 3, 4]\n assert pf.to_pandas(filters=[[\"a\", \"<\", \"o\"]], row_filter=True).b.tolist() == [\n 10, 11, 12, 13, 14\n ]\n\n\ndef test_select(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n val = [2, 10, 34, 76]\n df = pd.DataFrame({'val': val})\n write(fn, df, row_group_offsets=1)\n\n pf = ParquetFile(fn)\n assert len(pf[0].row_groups) == 1\n assert pf[0].to_pandas().val.tolist() == [2]\n assert pf[1].to_pandas().val.tolist() == [10]\n assert pf[-1].to_pandas().val.tolist() == [76]\n assert pf[:].to_pandas().val.tolist() == val\n assert pf[::2].to_pandas().val.tolist() == val[::2]\n\n\ndef test_head(tempdir):\n fn = os.path.join(tempdir, 'test.parquet')\n val = [2, 10, 34, 76]\n df = pd.DataFrame({'val': val})\n write(fn, df)\n\n pf = ParquetFile(fn)\n assert pf.head(1).val.tolist() == [2]\n" ]
[ [ "pandas.to_datetime", "pandas.testing.assert_frame_equal", "numpy.array", "numpy.random.rand", "numpy.random.choice", "pandas.DataFrame", "pandas.util.testing.makeMixedDataFrame", "pandas.Timestamp", "numpy.arange", "numpy.random.randint" ] ]
missingdaysqxy/ordinal_clouds
[ "566d8ac22c54e8e7f2a0ad79a7da309205684543" ]
[ "makedataset.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2018/10/9 22:31\n# @Author : qxliu\n# @Email : [email protected]\n# @File : makedataset.py\n# @Software: PyCharm\n\nimport tensorflow as tf\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nimport logging\nimport os\nimport sys\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0' # 可以使用的GPU\nINPUT_DIR = './datasets/mode_2003/'\nOUTPUT_DIR = './datasets/separate_relabel2/'\nOUTPUT_CHANNEL = 3\nIMG_RESIZE = [512, 512]\nCUT_NUM = [4, 2]\n\nCLASS_LIST = ['A', 'B', 'C', 'D', 'E', 'nodata']\nCLASS_LIST_MAY = ['may_nodata', 'may_abcde', 'may_A', 'may_B', 'may_C', 'may_D', 'may_E']\n\n\ndef processBar(num, total, msg='', length=30):\n rate = num / total\n rate_num = int(rate * 100)\n clth = int(rate * length)\n if len(msg) > 0:\n msg += ':'\n if rate_num == 100:\n r = '\\r%s[%s%d%%]\\n' % (msg, '*' * length, rate_num,)\n else:\n r = '\\r%s[%s%s%d%%]' % (msg, '*' * clth, '-' * (length - clth), rate_num,)\n sys.stdout.write(r)\n sys.stdout.flush\n\n\ndef mk_childfolders(parent_dir, child_list=[]):\n os.makedirs(parent_dir, exist_ok=True)\n for dir in child_list:\n path = os.path.join(parent_dir, dir)\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n\n\ndef _cut_images(image_to_cut, img_resize=[64, 64], cut_num=[2, 2], cut_order='horizontal'):\n '''\n :param img: image string from tf.image.decode\n :param img_resize: size to resize the orgin image, [height, width]\n :param cut_num: numbers to cut in horizontal and vertical, [vertical, horizontal]\n :param cut_order: the output squeence for cutted images, 'horizontal' first or 'vertical' first\n :param channel: output images channel, 1 for grayscale or 3 for rgb\n :return: s list of small images in type of uint8 tensor\n '''\n assert type(img_resize) in [list, np.ndarray] and len(img_resize) == 2\n assert type(cut_num) in [list, np.ndarray] and len(cut_num) == 2\n assert img_resize[0] % cut_num[0] == 0 and img_resize[1] % cut_num[1] == 0\n assert cut_order in ['horizontal', 'vertical']\n x_img_resized = tf.image.resize_images(image_to_cut, size=img_resize,\n method=tf.image.ResizeMethod.BILINEAR) # shape[512,512,?]\n h = img_resize[0] // cut_num[0] # height of small img\n w = img_resize[1] // cut_num[1] # width of small img\n if cut_order == 'horizontal':\n off_hs = list((np.arange(cut_num[0]) * h).repeat(cut_num[1]))\n off_ws = list(np.arange(cut_num[1]) * w) * cut_num[0]\n else:\n off_ws = list((np.arange(cut_num[1]) * h).repeat(cut_num[0]))\n off_hs = list(np.arange(cut_num[0]) * w) * cut_num[1]\n x_img_cuts = [tf.image.crop_to_bounding_box(x_img_resized, hs, ws, h, w) \\\n for hs, ws in zip(off_hs, off_ws)] # [shape[128,256,?] * batch_size]\n\n x_img_cuts = [tf.cast(x, tf.uint8) for x in x_img_cuts]\n return x_img_cuts\n\n\n\ndef init_separate_dataset(tag='train', start_index=None, max_count=None, datatype='jpg'):\n def init_csv_reader(input_dir, is_train, start_index=None, max_count=None, channels=3):\n '''read records from .csv file and separate original images into 8 small images'''\n assert channels in [1, 3]\n\n def _parse_function(file_paths, labels):\n '''Decode images and devide into 8 small images\n :param file_paths: shape[]\n :param labels: shape[8]\n '''\n batch_size = CUT_NUM[0] * CUT_NUM[1]\n x_img_str = tf.read_file(file_paths) # shape[]\n x_img_decoded = tf.image.decode_jpeg(x_img_str, channels=channels) # shape[?,?,channels]\n batch_xs = _cut_images(x_img_decoded, IMG_RESIZE, CUT_NUM, 'horizontal')\n batch_ys = tf.reshape(tf.split(labels, batch_size, axis=0), [-1, 1], name='batch_ys') # shape[batch_size,1]\n return file_paths, batch_xs, batch_ys\n\n # Processing the image filenames\n fs = os.listdir(input_dir)\n csv_name = os.path.join(input_dir, [it for it in fs if '.csv' in it][0])\n\n frame = pd.read_csv(csv_name)\n\n # Add one more column named \"Train\" to split the training set and validation set\n if is_train:\n frame = frame.loc[frame['Train'] == 'T']\n if isinstance(start_index, int) and start_index > 0:\n frame = frame[start_index:]\n if isinstance(max_count, int) and max_count > 0:\n frame = frame[:max_count]\n print(' [*] {} images initialized as training data'.format(frame['num_id'].count()))\n else:\n frame = frame.loc[frame['Train'] == 'F']\n if isinstance(start_index, int) and start_index > 0:\n frame = frame[start_index:]\n if isinstance(max_count, int) and max_count > 0:\n frame = frame[:max_count]\n print(' [*] {} images initialized as validation data'.format(frame['num_id'].count()))\n count = frame['num_id'].count()\n\n num_idx = frame['num_id'].values.astype(str).tolist()\n t_names = [item + '.jpg' for item in num_idx]\n file_names = [os.path.join(input_dir, item) for item in t_names]\n labels = frame['Cloud_Cover'].values.tolist()\n t_labels = [list('F'.join(item.split('*'))) for item in labels]\n for it in range(len(t_labels)):\n t_labels[it] = list(map(lambda x: ord(x) - ord('A'), t_labels[it]))\n # Initialize as a tensorflow tensor object\n data = tf.data.Dataset.from_tensor_slices((tf.constant(file_names, name='file_names'),\n tf.constant(t_labels)))\n data = data.map(_parse_function)\n return data, count\n\n assert tag in ['train', 'validation']\n assert datatype in ['jpg', 'jpeg', 'png']\n\n _output_dir = os.path.join(OUTPUT_DIR, tag)\n mk_childfolders(_output_dir, child_list=CLASS_LIST + CLASS_LIST_MAY)\n reader, count = init_csv_reader(INPUT_DIR, tag == 'train', start_index, max_count, channels=OUTPUT_CHANNEL)\n batch_path, batch_xs, batch_ys = reader.make_one_shot_iterator().get_next()\n # param batch_path: shape []\n # param batch_xs: shape [batch_size, 128, 256, channels] type tf.uint8\n # param batch_ys: shape [batch_size, 1] type tf.int32\n xs = [tf.squeeze(x, axis=0) for x in\n tf.split(batch_xs, batch_xs.shape[0], axis=0)] # a list of single images, [shape[1] * batch_size]\n ys = [tf.squeeze(y, axis=0) for y in\n tf.split(batch_ys, batch_ys.shape[0], axis=0)] # a list of single label, [shape[1] * batch_size]\n logging.basicConfig(filename=os.path.join(OUTPUT_DIR, 'log.txt'), level=logging.DEBUG)\n extname = '.' + datatype\n with tf.Session() as sess:\n perc = count / 100\n perc = 1 if perc < 1 else int(perc)\n step = 0\n while True:\n try:\n org_path, imgs, labels = sess.run([batch_path, xs, ys])\n org_name = os.path.basename(org_path.decode()).split('.')[0]\n for i in range(len(imgs)):\n new_name = CLASS_LIST[labels[i][0]] + '/' + org_name + '_' + str(i) + extname\n if imgs[i].sum() == 0: # 全是0\n if labels[i][0] != 5: # 原label不是nodata\n logging.error('{} is nodata, not a-e'.format(new_name))\n new_name = CLASS_LIST[5] + '/' + org_name + '_' + str(i) + extname\n else: # 不全是0\n if 0 in [x.sum() for x in imgs[i]] and labels[i][0] != 5: # 有一行是0,可能是nodata\n new_name = 'may_nodata' + '/' + org_name + '_' + str(i) + extname\n elif labels[i][0] == 5: # 没有一行0且原label是nodata\n new_name = 'may_abcde' + '/' + org_name + '_' + str(i) + extname\n save_path = os.path.join(_output_dir, new_name)\n im = Image.fromarray(imgs[i])\n im.save(save_path)\n if int(org_name) % perc == 0:\n # print('progress: {}/{}'.format(step, batch_count))\n processBar(step, count, msg=\"Initizing \" + _output_dir)\n step += 1\n except tf.errors.OutOfRangeError:\n processBar(step, count, msg=\"Initizing \" + _output_dir)\n print('Finish!')\n break\n except Exception as e:\n print('an error accrue when open file %s' % org_path.decode())\n print(e)\n pass\n\n\n\n\ndef tfrecord_reader(filepaths, batch_size=24, num_epochs=1):\n # ToDO: try this\n assert batch_size >= 0 and type(batch_size) is int\n reader = tf.TFRecordReader()\n if type(filepaths) is not list:\n filepaths = [filepaths]\n fqueue = tf.train.string_input_producer(filepaths, num_epochs=num_epochs) # 此处不指定epochs就会一直持续下去\n _, serialized_example = reader.read(fqueue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'name': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string),\n 'image': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'channel': tf.FixedLenFeature([], tf.int64),\n })\n name = tf.decode_raw(features['name'], tf.uint8)\n label = tf.decode_raw(features['label'], tf.uint8)\n image = tf.decode_raw(features['image'], tf.uint8)\n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n channel = tf.cast(features['channel'], tf.int32)\n image_shape = tf.stack([height, width, channel])\n image = tf.reshape(image, image_shape)\n # set_shape是为了固定维度,方便后面使用tf.train.batch\n #label.set_shape([10])\n #name.set_shape([10])\n image.set_shape([32, 32, 3])\n # num_thread可以选择用几个线程同时读取example queue,\n # min_after_dequeue表示读取一次之后队列至少需要剩下的样例数目,capacity表示队列的容量\n names, images, labels = tf.train.batch([name, image, label], batch_size=batch_size,\n capacity=512 + 4 * batch_size)\n return names, images, labels\n\n\ndef check_tfrecord():\n path = r'D:\\qxliu\\ordinal_clouds\\datasets\\clouds.shuffle.train.tfrecord'\n name_op, label_op, image_op = tfrecord_reader(path)\n import cv2 as cv\n with tf.Session() as sess:\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n try:\n while True:\n name, label, img = sess.run([name_op, label_op, image_op])\n img = np.squeeze(img, axis=0)\n label = np.squeeze(label, axis=0)\n print(name.tostring().decode('utf8'))\n print(label.tostring().decode('utf8'))\n # plt.imshow(img)\n cv.imshow('img', img)\n cv.waitKey(50)\n except tf.errors.OutOfRangeError:\n print('End')\n finally:\n coord.request_stop()\n coord.join(threads) # 等待各线程关闭\n\n\ndef init_binary_dataset(save_name, tag, datatype, shuffle):\n def init_img_reader(input_dir, class_list, img_resize=None, channels=3, shuffle=False):\n assert channels in [1, 3]\n resize = img_resize is not None and type(img_resize) in [list, np.ndarray] and len(img_resize) == 2\n\n def _parse_function(file_path, label):\n '''Decode image\n :param file_path: shape[]\n :param label: shape[]\n :return a string of image file path, a tensor of image, a label string of image\n '''\n x_img_str = tf.read_file(file_path) # shape[]\n x_img = tf.image.decode_jpeg(x_img_str, channels=channels) # shape[?,?,channels]\n if resize:\n x_img = tf.image.resize_images(x_img, size=img_resize,\n method=tf.image.ResizeMethod.BILINEAR) # shape[img_resize,channels]\n if shuffle: # 随机亮度对比度色相翻转\n # ToDO: all images do with these\n x_img = tf.image.random_brightness(x_img, max_delta=0.25)\n x_img = tf.image.random_contrast(x_img, lower=0.75, upper=1.5)\n # x_img = tf.image.random_hue(x_img, max_delta=0.5)\n x_img = tf.image.random_flip_up_down(x_img)\n x_img = tf.image.random_flip_left_right(x_img)\n return file_path, x_img, label\n\n files = []\n labels = []\n for cls in class_list:\n dir = os.path.join(input_dir, cls)\n if not os.path.exists(dir):\n print('path %s not exist' % dir)\n continue\n fs = os.listdir(dir)\n fs = [os.path.join(dir, item) for item in fs]\n files.extend(fs)\n labels.extend([cls] * len(fs))\n count = len(files)\n if shuffle:\n import random\n idx = list(range(count))\n random.shuffle(idx)\n sfl_files = []\n sfl_labels = []\n for i in idx:\n sfl_files.append(files[i])\n sfl_labels.append(labels[i])\n files = sfl_files\n labels = sfl_labels\n # Initialize as a tensorflow tensor object\n data = tf.data.Dataset.from_tensor_slices((tf.constant(files, dtype=tf.string, name='file_path'),\n tf.constant(labels, name='label')))\n data = data.map(_parse_function)\n # if shuffle:\n # data = data.shuffle(batch_count)\n return data, count\n\n assert tag in ['train', 'validation']\n assert datatype in ['tfrecord', 'json', 'h5']\n\n def _bytes_feature(value): # 生成字符串型的属性\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n def _int64_feature(value): # 生成整数型的属性\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n # ToDO: arrange more datatype\n input_dir = os.path.join(INPUT_DIR, tag)\n output_dir = OUTPUT_DIR\n mk_childfolders(output_dir)\n reader, count = init_img_reader(input_dir, class_list=CLASS_LIST,\n img_resize=[32, 32], channels=OUTPUT_CHANNEL, shuffle=shuffle)\n fpath, xs, ys = reader.make_one_shot_iterator().get_next()\n # param batch_xs: shape [32, 32, channels] type tf.uint8\n # param batch_ys: shape [1] type tf.int32\n logging.basicConfig(filename=os.path.join(OUTPUT_DIR, 'log.txt'), level=logging.DEBUG)\n if shuffle:\n save_name += '.shuffle'\n else:\n save_name += '.noshuffle'\n output_path = os.path.join(output_dir, '{}.{}.{}'.format(save_name, tag, datatype))\n\n with tf.Session() as sess:\n if datatype == 'tfrecord':\n with tf.python_io.TFRecordWriter(output_path) as writer:\n perc = count / 100\n perc = 1 if perc < 1 else int(perc)\n step = 0\n while True:\n try:\n org_path, img, label = sess.run([fpath, xs, ys])\n org_name = os.path.basename(org_path.decode()).split('.')[0]\n example = tf.train.Example(features=tf.train.Features(feature={\n 'name': _bytes_feature(org_name.encode('utf8')),\n 'label': _bytes_feature(label),\n 'height': _int64_feature(32),\n 'width': _int64_feature(32),\n 'channel': _int64_feature(3),\n 'image': _bytes_feature(img.tostring())\n }))\n writer.write(example.SerializeToString())\n if int(org_name) % perc == 0:\n processBar(step, count, msg=\"Initizing \" + output_path)\n step += 1\n except tf.errors.OutOfRangeError:\n processBar(step, count, msg=\"Initizing \" + output_path)\n print('Finish!')\n break\n elif datatype == 'json':\n pass\n elif datatype == 'h5':\n pass\n\n\ndef main():\n def _make_separate_dataset():\n print('Begin to initialize training dataset...')\n init_separate_dataset('train', 0, -1)\n print('Begin to initialize validation dataset...')\n init_separate_dataset('validation', 0, -1)\n\n def _make_tfrecord_dataset():\n print('Begin to initialize training dataset...')\n init_binary_dataset('clouds', tag='train', datatype='tfrecord', shuffle=True)\n print('Begin to initialize validation dataset...')\n init_binary_dataset(save_name='clouds', tag='validation', datatype='tfrecord', shuffle=True)\n\n begintime = datetime.now()\n _make_separate_dataset()\n # _make_tfrecord_dataset()\n # check_tfrecord()\n endtime = datetime.now()\n print('All dataset initialized! Span Time:%s' % (endtime - begintime))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.train.Int64List", "tensorflow.image.random_flip_left_right", "tensorflow.image.random_flip_up_down", "tensorflow.reshape", "tensorflow.stack", "tensorflow.local_variables_initializer", "pandas.read_csv", "tensorflow.global_variables_initializer", "tensorflow.image.decode_jpeg", "tensorflow.cast", "tensorflow.read_file", "tensorflow.FixedLenFeature", "tensorflow.image.random_brightness", "tensorflow.constant", "tensorflow.squeeze", "numpy.arange", "tensorflow.split", "tensorflow.train.batch", "tensorflow.train.Coordinator", "tensorflow.Session", "tensorflow.python_io.TFRecordWriter", "tensorflow.train.string_input_producer", "tensorflow.image.resize_images", "numpy.squeeze", "tensorflow.train.BytesList", "tensorflow.decode_raw", "tensorflow.image.random_contrast", "tensorflow.TFRecordReader", "tensorflow.image.crop_to_bounding_box" ] ]
RE-OWOD/RE-OWOD
[ "5f502491a29cb47fed56e2bbbf2b4122906b3e78" ]
[ "datasets/coco_utils/create_t3_imageset.py" ]
[ "from pycocotools.coco import COCO\nimport numpy as np\n\nT3_CLASS_NAMES = [\n \"frisbee\", \"skis\", \"snowboard\", \"sports ball\", \"kite\",\n \"baseball bat\", \"baseball glove\", \"skateboard\", \"surfboard\", \"tennis racket\",\n \"banana\", \"apple\", \"sandwich\", \"orange\", \"broccoli\",\n \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\"\n]\n\n# Train\ncoco_annotation_file = '/home/datasets/mscoco/annotations/instances_train2017.json'\ndest_file = '/home/OWOD/datasets/coco17_voc_style/ImageSets/t3_train_sel.txt'\n\ncoco_instance = COCO(coco_annotation_file)\n\nimage_ids = []\ncls = []\nfor index, image_id in enumerate(coco_instance.imgToAnns):\n image_details = coco_instance.imgs[image_id]\n classes = [coco_instance.cats[annotation['category_id']]['name'] for annotation in coco_instance.imgToAnns[image_id]]\n if not set(classes).isdisjoint(T3_CLASS_NAMES):\n image_ids.append(image_details['file_name'].split('.')[0])\n cls.extend(classes)\n\n(unique, counts) = np.unique(cls, return_counts=True)\nprint({x:y for x,y in zip(unique, counts)})\n\nwith open(dest_file, 'w') as file:\n for image_id in image_ids:\n file.write(str(image_id)+'\\n')\n\nprint('Created train file')\n\n# Test\ncoco_annotation_file = '/home/datasets/mscoco/annotations/instances_val2017.json'\ndest_file = '/home/OWOD/datasets/coco17_voc_style/ImageSets/t3_test.txt'\n\ncoco_instance = COCO(coco_annotation_file)\n\nimage_ids = []\ncls = []\nfor index, image_id in enumerate(coco_instance.imgToAnns):\n image_details = coco_instance.imgs[image_id]\n classes = [coco_instance.cats[annotation['category_id']]['name'] for annotation in coco_instance.imgToAnns[image_id]]\n if not set(classes).isdisjoint(T3_CLASS_NAMES):\n image_ids.append(image_details['file_name'].split('.')[0])\n cls.extend(classes)\n\n(unique, counts) = np.unique(cls, return_counts=True)\nprint({x:y for x,y in zip(unique, counts)})\n\nwith open(dest_file, 'w') as file:\n for image_id in image_ids:\n file.write(str(image_id)+'\\n')\nprint('Created test file')\n\ndest_file = '/home/OWOD/datasets/coco17_voc_style/ImageSets/t3_test_unk.txt'\nwith open(dest_file, 'w') as file:\n for image_id in image_ids:\n file.write(str(image_id)+'\\n')\n\nprint('Created test_unk file')\n" ]
[ [ "numpy.unique" ] ]
wckdouglas/cfNA
[ "1e3d25bdb067b43d24c394bf586a33f372e3d4e2" ]
[ "peak_callings/EV_peaks.py" ]
[ "#!/usr/bin/env python\n\nimport pandas as pd\nimport pysam\nimport sys\n\nif len(sys.argv) != 3:\n sys.exit('python %s <peak_coor> <bed.gz file> ' %sys.argv[0])\n\nclass peak_count:\n '''\n parse peak coordinates (peak file),\n count tabix reads for each peak\n '''\n def __init__(self, peaks, tabix):\n self.tabix = pysam.Tabixfile(tabix)\n self.peak_df = pd.read_csv(peaks, sep='\\t') \n\n def count_reads(self, chrom, start, end, strand):\n try:\n reads = self.tabix.fetch(chrom, start, end)\n read_count = sum(1 for r in reads if r.strip().split('\\t')[5] == strand)\n except ValueError:\n read_count = 0\n return read_count\n\n def peak_counting(self):\n return self.peak_df \\\n .assign(EV_count = lambda d: list(map(self.count_reads, \n d.chrom, \n d.start, \n d.end, \n d.strand))) \n\n\npc = peak_count(sys.argv[1], sys.argv[2])\npc.peak_counting()\\\n .to_csv(sys.stdout, sep='\\t', index=False)\n\n \n" ]
[ [ "pandas.read_csv" ] ]
MCZhi/nuplan-devkit
[ "3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c" ]
[ "nuplan/planning/metrics/evaluation_metrics/common/ego_lon_jerk.py" ]
[ "from typing import List\n\nimport numpy as np\nfrom nuplan.planning.metrics.abstract_metric import AbstractMetricBuilder\nfrom nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, Statistic, TimeSeries\nfrom nuplan.planning.metrics.utils.state_extractors import extract_ego_jerk, extract_ego_time_point\nfrom nuplan.planning.simulation.history.simulation_history import SimulationHistory\n\n\nclass EgoLonJerkStatistics(AbstractMetricBuilder):\n\n def __init__(self, name: str, category: str) -> None:\n \"\"\"\n Ego longitudinal jerk metric.\n :param name: Metric name.\n :param category: Metric category.\n \"\"\"\n\n self._name = name\n self._category = category\n\n @property\n def name(self) -> str:\n \"\"\"\n Returns the metric name.\n :return: the metric name.\n \"\"\"\n\n return self._name\n\n @property\n def category(self) -> str:\n \"\"\"\n Returns the metric category.\n :return: the metric category.\n \"\"\"\n\n return self._category\n\n def compute(self, history: SimulationHistory) -> List[MetricStatistics]:\n \"\"\"\n Returns the estimated metric.\n :param history: History from a simulation engine.\n :return: the estimated metric.\n \"\"\"\n\n # Ego velocities are defined in ego's local frame\n acceleration_x = np.array([sample.ego_state.dynamic_car_state.center_acceleration_2d.x\n for sample in history.data])\n lon_jerk = extract_ego_jerk(history, accelerations=acceleration_x)\n timestamps = extract_ego_time_point(history=history)\n statistics = {MetricStatisticsType.MAX:\n Statistic(name=\"ego_max_lon_jerk\", unit=\"meters_per_second_cubed\",\n value=np.amax(lon_jerk)),\n MetricStatisticsType.MIN:\n Statistic(name=\"ego_min_lon_jerk\", unit=\"meters_per_second_cubed\",\n value=np.amin(lon_jerk)),\n MetricStatisticsType.P90:\n Statistic(name=\"ego_p90_lon_jerk\", unit=\"meters_per_second_cubed\",\n value=np.percentile(np.abs(lon_jerk), 90)), # type:ignore\n }\n\n time_series = TimeSeries(unit='meters_per_second_cubed',\n time_stamps=list(timestamps),\n values=list(lon_jerk))\n result = MetricStatistics(metric_computator=self.name,\n name=\"ego_lon_jerk_statistics\",\n statistics=statistics,\n time_series=time_series, metric_category=self.category)\n return [result]\n" ]
[ [ "numpy.array", "numpy.amax", "numpy.abs", "numpy.amin" ] ]
MuZiGuYue/VideoSuperResolution
[ "dc8bf94aa65c1a4e92e6024ca77b402f5b252fcf" ]
[ "VSRTorch/Models/Vespcn.py" ]
[ "# Copyright (c): Wenyi Tang 2017-2019.\n# Author: Wenyi Tang\n# Email: [email protected]\n# Update Date: 2019/4/3 下午5:10\n\nimport torch\nimport torch.nn.functional as F\n\nfrom .Model import SuperResolution\nfrom .vespcn import ops\nfrom ..Framework.Summary import get_writer\nfrom ..Util import Metrics\n\n\nclass VESPCN(SuperResolution):\n def __init__(self, scale, channel, depth=3, **kwargs):\n super(VESPCN, self).__init__(scale, channel, **kwargs)\n self.vespcn = ops.VESPCN(scale, channel, depth)\n self.opt = torch.optim.Adam(self.trainable_variables(), 1e-4)\n self.depth = depth\n\n def train(self, inputs, labels, learning_rate=None):\n frames = torch.split(inputs[0], 1, dim=1)\n frames = [f.squeeze(1) for f in frames]\n sr, warps, flows = self.vespcn(*frames)\n targets = torch.split(labels[0], 1, dim=1)\n targets = [t.squeeze(1) for t in targets]\n target = targets[self.depth // 2]\n ref = frames[self.depth // 2]\n\n loss_content = F.mse_loss(sr, target)\n loss_flow = torch.sum(torch.stack([F.mse_loss(ref, w) for w in warps]))\n loss_tv = torch.sum(torch.stack([Metrics.total_variance(f) for f in flows]))\n\n loss = loss_content + loss_flow + 0.01 * loss_tv\n if learning_rate:\n for param_group in self.opt.param_groups:\n param_group[\"lr\"] = learning_rate\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n return {\n 'image': loss_content.detach().cpu().numpy(),\n 'flow': loss_flow.detach().cpu().numpy(),\n 'tv': loss_tv.detach().cpu().numpy(),\n }\n\n def eval(self, inputs, labels=None, **kwargs):\n metrics = {}\n frames = torch.split(inputs[0], 1, dim=1)\n frames = [torch.squeeze(f, dim=1) for f in frames]\n sr, warps, flows = self.vespcn(*frames)\n sr = sr.cpu().detach()\n if labels is not None:\n targets = torch.split(labels[0], 1, dim=1)\n targets = [t.squeeze(1) for t in targets]\n hr = targets[self.depth // 2]\n metrics['psnr'] = Metrics.psnr(sr, hr)\n writer = get_writer(self.name)\n if writer is not None:\n step = kwargs['epoch']\n writer.image('clean', sr, step=step)\n writer.image('warp/0', warps[0], step=step)\n writer.image('warp/1', warps[-1], step=step)\n return [sr.numpy()], metrics\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.split", "torch.squeeze" ] ]
JoOkuma/torch-em
[ "68b723683f9013723a0e4fc8cfef1d6a2a9c9dff" ]
[ "torch_em/transform/raw.py" ]
[ "import numpy as np\n\n\n#\n# normalization functions\n#\n\n\ndef standardize(raw, mean=None, std=None, axis=None, eps=1e-7):\n raw = raw.astype('float32')\n\n mean = raw.mean(axis=axis, keepdims=True) if mean is None else mean\n raw -= mean\n\n std = raw.std(axis=axis, keepdims=True) if std is None else std\n raw /= (std + eps)\n\n return raw\n\n\ndef normalize(raw, minval=None, maxval=None, axis=None, eps=1e-7):\n raw = raw.astype('float32')\n\n minval = raw.min(axis=axis, keepdims=True) if minval is None else minval\n raw -= minval\n\n maxval = raw.max(axis=axis, keepdims=True) if maxval is None else maxval\n raw /= (maxval + eps)\n\n return raw\n\n\ndef normalize_percentile(raw, lower=1.0, upper=99.0, axis=None, eps=1e-7):\n v_lower = np.percentile(raw, lower, axis=axis, keepdims=True)\n v_upper = np.percentile(raw, upper, axis=axis, keepdims=True) - v_lower\n return normalize(raw, v_lower, v_upper, eps=eps)\n\n\n# TODO\n#\n# intensity augmentations / noise augmentations\n#\n\n\n#\n# defect augmentations\n#\n\n\n# TODO more defect types\nclass EMDefectAugmentation:\n def __init__(self, p_drop_slice):\n self.p_drop_slice = p_drop_slice\n\n def __call__(self, raw):\n for z in range(raw.shape[0]):\n if np.random.rand() < self.p_drop_slice:\n raw[z] = 0\n return raw\n\n\n#\n# default transformation:\n# apply intensity augmentations and normalize\n#\n\nclass RawTransform:\n def __init__(self, normalizer, augmentation1=None, augmentation2=None):\n self.normalizer = normalizer\n self.augmentation1 = augmentation1\n self.augmentation2 = augmentation2\n\n def __call__(self, raw):\n if self.augmentation1 is not None:\n raw = self.augmentation1(raw)\n raw = self.normalizer(raw)\n if self.augmentation2 is not None:\n raw = self.augmentation2(raw)\n return raw\n\n\ndef get_raw_transform(normalizer=standardize, augmentation1=None, augmentation2=None):\n return RawTransform(normalizer,\n augmentation1=augmentation1,\n augmentation2=augmentation2)\n" ]
[ [ "numpy.percentile", "numpy.random.rand" ] ]
OOXXXXOO/WSNet
[ "b64aa7d80fe0a7aa8a440f2bb6df1f1e497a7620" ]
[ "Src/.ipynb_checkpoints/config-checkpoint.py" ]
[ "# **************************************************************************** #\n# #\n# ::: :::::::: #\n# config.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: winshare <[email protected]> +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/02/28 11:45:40 by winshare #+# #+# #\n# Updated: 2020/05/28 15:01:24 by winshare ### ########.fr #\n# #\n# **************************************************************************** #\n\n# Copyright 2020 winshare\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\n\nimport sys\nimport os\nimport json\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nimport torchvision.datasets as dataset\n\n# ---------------------------- Official Reference ---------------------------- #\n\n\n\n\nfrom Data.DataSets.NPY.segmentation_dataset import Costum_NPY_DataSet\nfrom Data.DataSets.CitysCapes.cityscapes import CityscapesSegmentation\nfrom Data.DataSets.COCO.coco import CocoDataset\nfrom Data.DataSets.PascalVoc.pascal import VOCSegmentation\n\n# ------------------------------ Local Reference ----------------------------- #\n\nclass CFG():\n def __init__(self):\n\n # ---------------------------------------------------------------------------- #\n # init process #\n # ---------------------------------------------------------------------------- #\n for i in range(5):\n print(\"#####------------------------------------------------------------------#####\")\n print(\"#####-------------------------<===== WSNET =====>----------------------#####\")\n for i in range(5):\n print(\"#####------------------------------------------------------------------#####\")\n\n \n print(\"\\n\\n# -----Decode Config File :\",self.configfile,\"-----#\")\n\n\n\n\n\n # ---------------------------------------------------------------------------- #\n # init process #\n # ---------------------------------------------------------------------------- #\n\n # ---------------------------------------------------------------------------- #\n # Pytorch Function Dictionary #\n # ---------------------------------------------------------------------------- #\n\n self.datasets_function_dict={\n \"Classification\":{\n \"MINST\":dataset.MNIST,\n \"FashionMINST\":dataset.FashionMNIST,\n \"KMINST\":dataset.KMNIST,\n \"EMINST\":dataset.EMNIST,\n \"CIFAR10\":dataset.CIFAR10,\n \"CIFAR100\":dataset.CIFAR100,\n \"ImageNet\":dataset.ImageNet\n },\n \"Detection\":{\n \"CocoDetection\":CocoDataset,\n \"VOC_Detection\":dataset.VOCDetection\n },\n \"Segmentation\":{\n \"VOC_Segmentation\":dataset.VOCSegmentation,\n \"Cityscapes\":dataset.Cityscapes,\n \"Costum_NPY_DataSet\":Costum_NPY_DataSet,\n \"CocoSegmentation\":CocoDataset\n },\n \"Caption\":{\n \"CocoCaptions\":dataset.CocoCaptions\n },\n \"InstenceSegmentation\":{\n \"CocoDetection\":CocoDataset\n }\n }\n self.dataset_support_list=self.datasets_function_dict.keys()\n\n\n # ---------------------------------------------------------------------------- #\n self.OptimDict={\n \"SGD\":optim.SGD, \n \"ASGD\":optim.ASGD,\n \"Adam\":optim.Adam,\n \"Adadelta\":optim.Adadelta,\n \"Adagrad\":optim.Adagrad,\n \"AdamW\":optim.AdamW,\n \"LBFGS\":optim.LBFGS,\n \"RMSprop\":optim.RMSprop,\n \"SparseAdam\":optim.SparseAdam,\n \"Adamax\":optim.Adamax\n }\n # ---------------------------------------------------------------------------- #\n self.Loss_Function_Dict={\n \"AdaptiveLogSoftmaxWithLoss\":nn.AdaptiveLogSoftmaxWithLoss\n ,\"BCELoss\":nn.BCELoss \n ,\"BCEWithLogitsLoss\":nn.BCEWithLogitsLoss \n ,\"CosineEmbeddingLoss\":nn.CosineEmbeddingLoss \n ,\"CrossEntropyLoss\":nn.CrossEntropyLoss \n ,\"CTCLoss\":nn.CTCLoss \n ,\"cosine_embedding_loss\":F.cosine_embedding_loss \n ,\"ctc_loss\":F.ctc_loss\n ,\"hinge_embedding_loss\":F.hinge_embedding_loss \n ,\"l1_loss\":F.l1_loss \n ,\"margin_ranking_loss\":F.margin_ranking_loss \n ,\"mse_loss\":F.mse_loss \n ,\"multi_margin_loss\":F.mse_loss \n ,\"multilabel_margin_loss\":F.multilabel_margin_loss \n ,\"multilabel_soft_margin_loss\":F.multilabel_margin_loss \n ,\"nll_loss\":F.nll_loss \n ,\"poisson_nll_loss\":F.poisson_nll_loss \n ,\"smooth_l1_loss\":F.smooth_l1_loss \n ,\"soft_margin_loss\":F.soft_margin_loss \n ,\"triplet_margin_loss\":F.triplet_margin_loss \n ,\"HingeEmbeddingLoss\":nn.HingeEmbeddingLoss \n ,\"KLDivLoss\":nn.KLDivLoss \n ,\"L1Loss\":nn.L1Loss \n ,\"MarginRankingLoss\":nn.MarginRankingLoss \n ,\"MSELoss\":nn.MSELoss \n ,\"MultiLabelMarginLoss\":nn.MultiLabelMarginLoss \n ,\"MultiLabelSoftMarginLoss\":nn.MultiLabelSoftMarginLoss \n ,\"MultiMarginLoss\":nn.MultiMarginLoss \n ,\"NLLLoss\":nn.MultiMarginLoss \n ,\"PoissonNLLLoss\":nn.PoissonNLLLoss \n ,\"SmoothL1Loss\":nn.SmoothL1Loss \n ,\"SoftMarginLoss\":nn.SoftMarginLoss \n ,\"TripletMarginLoss\":nn.TripletMarginLoss\n }\n \n\n\n\n # ---------------------------------------------------------------------------- #\n\n self.Lr_Dict={\n \"StepLR\":optim.lr_scheduler.StepLR,\n \"MultiStepLR\":optim.lr_scheduler.MultiStepLR,\n \"ExponentialLR\":optim.lr_scheduler.ExponentialLR,\n \"CosineAnnealingLR\":optim.lr_scheduler.CosineAnnealingLR,\n \"ReduceLROnPlateau\":optim.lr_scheduler.ReduceLROnPlateau,\n \"CyclicLR\":optim.lr_scheduler.CyclicLR,\n \"OneCycleLR\":optim.lr_scheduler.OneCycleLR,\n \"CosineAnnealingWarmRestarts\":optim.lr_scheduler.CosineAnnealingWarmRestarts\n }\n\n # ---------------------------------------------------------------------------- #\n # Config in 3 Level #\n # ---------------------------------------------------------------------------- #\n\n\n # -------------------------------- File Level -------------------------------- #\n self.__configfile=self.configfile\n self.__json=json.load(open(self.__configfile,'r'))\n self.usegpu=False\n\n self.MissionType=self.__json['MissionType']\n self.InstanceID=self.__json['instance_id']\n self.Content=self.__json['content']\n\n\n # ------------------------------- Second Level ------------------------------- #\n self.Net=self.Content['Net']\n self.DataSetConfig=self.Content['Dataset']\n self.Config=self.Content['Config']\n\n print('\\n\\n# ---------------------------------- config ---------------------------------- #')\n \n print(\"# ------------------------------ NETWORK CONFIG ------------------------------ #\")\n self.print_dict(self.Net)\n print(\"# ------------------------------ NETWORK CONFIG ------------------------------ #\")\n\n print(\"# ------------------------------ DATASET CONFIG ------------------------------ #\")\n self.print_dict(self.DataSetConfig)\n print(\"# ------------------------------ DATASET CONFIG ------------------------------ #\")\n\n print(\"# ------------------------------ GENERAL CONFIG ------------------------------ #\")\n self.print_dict(self.Config)\n print(\"# ------------------------------ GENERAL CONFIG ------------------------------ #\")\n\n print('# ---------------------------------- config ---------------------------------- #')\n\n\n # -------------------------------- Third Level ------------------------------- #\n # ---------------------------------------------------------------------------- #\n # NET #\n # ---------------------------------------------------------------------------- #\n\n # self.NetType=self.Net['NetType']\n self.DefaultNetwork=self.Net[\"DefaultNetwork\"]\n \n \n self.BatchSize=self.Net['BatchSize']\n if self.Net['BackBone']=='None':\n self.BackBone=None\n else:\n self.BackBone=self.Net['BackBone']\n self.NetType=self.Net[\"NetType\"]\n \n\n # --------------------------------- Optimizer -------------------------------- #\n\n self.optimizer=self.OptimDict[self.Net['Optimizer']]\n self.learning_rate=self.Net['learning_rate']\n self.momentum=self.Net['momentum']\n self.weight_decay=self.Net['weight_decay']\n\n # ------------------------------- lr_scheduler ------------------------------- #\n\n self.lr_scheduler=self.Net['lr_scheduler']\n self.lr_steps=self.Net['lr_steps']\n self.lr_gamma=self.Net['lr_gamma']\n self.lr_scheduler=self.Lr_Dict[self.lr_scheduler]\n self.class_num=self.Net['class_num']\n \n # ------------------------------- Loss Function ------------------------------ #\n\n self.Loss_Function=self.Loss_Function_Dict[self.Net['Loss_Function']]()\n \n\n\n # ---------------------------------------------------------------------------- #\n # Dataset #\n # ---------------------------------------------------------------------------- #\n\n \n self.DataSetType=self.DataSetConfig['Type']\n self.DataSet_Root=self.DataSetConfig['root']\n \n\n self.Dataset_Train_file=os.path.join(self.DataSet_Root,self.DataSetConfig['train_index_file'])\n self.Dataset_Val_file=os.path.join(self.DataSet_Root,self.DataSetConfig['val_index_file'])\n self.DefaultDataset=self.DataSetConfig['DefaultDataset']\n self.NPY=self.DataSetConfig[\"NPY\"]\n if os.path.exists(self.NPY):\n self.NPY_Data=np.load(self.NPY,allow_pickle=True)\n \n \n self.SFT_Enable=self.DataSetConfig[\"SFT_Enable\"]\n\n # --------------------------------- Transform (Aborted)------------------------#\n # ---------------------------------------------------------------------------- #\n\n\n \"\"\"\n Because the defalut detection network has transform flow \n so the image list should include 3d tensors\n \n [\n [C, H, W],\n [C, H, W].....\n ]\n\n Target should be \n list of dict :\n {\n boxes: list of box tensor[n,4] (float32)\n masks: list of segmentation mask points [n,n] (float32)\n keypoints: list of key pointss[n,n] (float32)\n labels: list of index of label[n] (int64)\n }\n \n For Default Detection:\n\n The transformations it perform are:\n - input normalization (mean subtraction and std division)\n - input / target resizing to match min_size / max_size\n\n It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets\n \n \"\"\"\n # print('\\n\\n--------------------------------- Transform --------------------------------')\n # self.TransformDict=self.DataSetConfig['Transform']\n # functionlist=[list(i.keys())[0] for i in self.TransformDict]\n # paralist=[list(i.values())[0] for i in self.TransformDict]\n \n \n \n \n # self.transforms=GeneralTransform(self.TransformDict)\n \n\n\n\n\n # ---------------------------------------------------------------------------- #\n # Config #\n # ---------------------------------------------------------------------------- #\n \n self.DistributedDataParallel=self.Config['DistributedDataParallel']\n self.resume=self.Config['Resume']\n self.checkpoint=self.Config['checkpoint_path']\n self.MultiScale_Training=self.Config['multiscale_training']\n self.logdir=self.Config['logdir']\n self.devices=self.Config['devices']\n self.pre_estimation=self.Config['pre_estimation']\n\n if not os.path.exists(self.checkpoint):\n os.makedirs(self.checkpoint)\n\n\n \n if self.devices=='GPU':\n self.usegpu=True\n self.gpu_id=self.Config['gpu_id']\n # os.environ['CUDA_VISIBLE_DEVICES']=str(self.gpu_id)\n self.device = torch.device(\"cuda:\"+str(self.gpu_id) if torch.cuda.is_available() else \"cpu\")\n print('#-----Device:\\n',self.device)\n \n if self.devices=='CPU':\n self.device=torch.device(\"cpu\")\n\n\n self.download_pretrain_model=self.Config['down_pretrain_model']\n self.visualization=self.Config['visualization']\n self.worker_num=self.Config['worker_num']\n self.epochs=self.Config['epochs']\n self.aspect_ratio_factor=self.Config['group_factor']\n\n print(\"# ---------------------------------------------------------------------------- #\")\n print(\"# Configure Class Init Successful #\")\n print(\"# ---------------------------------------------------------------------------- #\")\n self.Enviroment_Info()\n\n # ---------------------------------------------------------------------------- #\n # Config Class Function #\n # ---------------------------------------------------------------------------- #\n\n def GenerateDefaultConfig(self,mode='detection'):\n print('Generate Default Config with mode :',mode)\n \n def configinfo(self):\n print('***** Already read Config file ,'+self.__configfile,'*****')\n print('***** Instance ID : ',self.InstanceID,'*****')\n print('***** Mission Type : ',self.MissionType,'*****')\n\n def Enviroment_Info(self):\n print(\"\\n\\n# --------------------------------- NVCC INFO -------------------------------- #\\n\\n\")\n os.system('nvcc -V')\n print(\"\\n\\n# --------------------------------- NVCC INFO -------------------------------- #\\n\\n\")\n \n print(\"\\n\\n# --------------------------------- GPU INFO --------------------------------- #\")\n os.system('nvidia-smi')\n print(\"# --------------------------------- GPU INFO --------------------------------- #\\n\\n\")\n \n def print_dict(self,d,n=0):\n length=74\n for k,v in d.items():\n # print ('\\t'*n)\n if type(v)==type({}):\n print(\"%s : {\" % k)\n self.print_dict(v,n+1)\n else:\n strl=len(str(k))+len(str(v))\n space=length-strl\n print(\"# %s : %s\" % (k,v)+\" \"*space+\"#\")\n if n!=0:\n print('\\t'*(n-1)+ '}')\n" ]
[ [ "torch.device", "torch.cuda.is_available", "numpy.load" ] ]
attardi/iwpt-shared-task-2020
[ "3a70c42d53716678776afcccf02d896655777353" ]
[ "edparser/layers/embeddings/contextual_string_embedding.py" ]
[ "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2019-12-19 03:24\nfrom typing import List\n\nimport tensorflow as tf\nimport numpy as np\nfrom edparser.components.rnn_language_model import RNNLanguageModel\nfrom edparser.common.constant import PAD\nfrom edparser.utils.io_util import get_resource\nfrom edparser.utils.tf_util import copy_mask, hanlp_register, str_tensor_2d_to_list\nfrom edparser.utils.util import infer_space_after\n\n\n@hanlp_register\nclass ContextualStringEmbedding(tf.keras.layers.Layer):\n\n def __init__(self, forward_model_path=None, backward_model_path=None, max_word_len=10,\n trainable=False, name=None, dtype=None,\n dynamic=True, **kwargs):\n assert dynamic, 'ContextualStringEmbedding works only in eager mode'\n super().__init__(trainable, name, dtype, dynamic, **kwargs)\n assert any([forward_model_path, backward_model_path]), 'At least one model is required'\n self.forward_model_path = forward_model_path\n self.backward_model_path = backward_model_path\n self.forward_model = self._load_lm(forward_model_path) if forward_model_path else None\n self.backward_model = self._load_lm(backward_model_path) if backward_model_path else None\n if trainable:\n self._fw = self.forward_model.model\n self._bw = self.backward_model.model\n for m in self._fw, self._bw:\n m.trainable = True\n self.supports_masking = True\n self.max_word_len = max_word_len\n\n def call(self, inputs, **kwargs):\n str_inputs = str_tensor_2d_to_list(inputs)\n outputs = self.embed(str_inputs)\n copy_mask(inputs, outputs)\n return outputs\n\n def _load_lm(self, filepath):\n filepath = get_resource(filepath)\n lm = RNNLanguageModel()\n lm.load(filepath)\n model: tf.keras.Sequential = lm.model\n for idx, layer in enumerate(model.layers):\n if isinstance(layer, tf.keras.layers.LSTM):\n lm.model = tf.keras.Sequential(model.layers[:idx + 1]) # discard dense layer\n return lm\n\n def embed(self, texts: List[List[str]]):\n \"\"\"\n Embedding sentences (list of words) with contextualized string embedding\n\n Parameters\n ----------\n texts :\n List of words, not chars\n\n Returns\n -------\n tf.Tensor\n A 3d tensor of (batch, num_words, hidden)\n \"\"\"\n fw = None\n if self.forward_model:\n fw = self._run_rnn(texts, model=self.forward_model)\n bw = None\n if self.backward_model:\n bw = self._run_rnn(texts, model=self.backward_model)\n if not all(x is not None for x in [fw, bw]):\n return fw if fw is not None else bw\n else:\n return tf.concat([fw, bw], axis=-1)\n\n def _run_rnn(self, texts, model):\n embeddings = []\n inputs = []\n offsets = []\n tokenizer = model.transform.tokenize_func()\n backward = not model.config['forward']\n for sent in texts:\n raw, off = self._get_raw_string(sent, tokenizer)\n inputs.append(raw)\n offsets.append(off)\n outputs = model.model.predict(model.transform.inputs_to_dataset(inputs))\n if backward:\n outputs = tf.reverse(outputs, axis=[1])\n maxlen = len(max(texts, key=len))\n for hidden, off, sent in zip(outputs, offsets, texts):\n embed = []\n for (start, end), word in zip(off, sent):\n embed.append(hidden[end - 1, :])\n if len(embed) < maxlen:\n embed += [np.zeros_like(embed[-1])] * (maxlen - len(embed))\n embeddings.append(np.stack(embed))\n return tf.stack(embeddings)\n\n def _get_raw_string(self, sent: List[str], tokenizer):\n raw_string = []\n offsets = []\n whitespace_after = infer_space_after(sent)\n start = 0\n for word, space in zip(sent, whitespace_after):\n chars = tokenizer(word)\n chars = chars[:self.max_word_len]\n if space:\n chars += [' ']\n end = start + len(chars)\n offsets.append((start, end))\n start = end\n raw_string += chars\n return raw_string, offsets\n\n def get_config(self):\n config = {\n 'forward_model_path': self.forward_model_path,\n 'backward_model_path': self.backward_model_path,\n 'max_word_len': self.max_word_len,\n }\n base_config = super(ContextualStringEmbedding, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @property\n def output_dim(self):\n dim = 0\n for model in self.forward_model, self.backward_model:\n if model:\n dim += model.config['rnn_units']\n return dim\n\n def compute_output_shape(self, input_shape):\n return input_shape + self.output_dim\n\n def compute_mask(self, inputs, mask=None):\n\n return tf.not_equal(inputs, PAD)\n" ]
[ [ "numpy.zeros_like", "tensorflow.concat", "tensorflow.not_equal", "tensorflow.keras.Sequential", "tensorflow.reverse", "numpy.stack", "tensorflow.stack" ] ]
BonneelP/pyleecan
[ "29e6b4358420754993af1a43048aa12d1538774e" ]
[ "Tests/Validation/Simulation/test_EM_SPMSM_FL_002.py" ]
[ "# -*- coding: utf-8 -*-\n\n# External import\nimport pytest\nfrom numpy import array, ones, pi\nfrom os.path import join\n\n# Pyleecan import\nfrom pyleecan.Classes.ImportGenVectLin import ImportGenVectLin\nfrom pyleecan.Classes.ImportMatrixVal import ImportMatrixVal\nfrom pyleecan.Classes.Simu1 import Simu1\nfrom pyleecan.Classes.InputCurrent import InputCurrent\nfrom pyleecan.Classes.MagFEMM import MagFEMM\nfrom pyleecan.Classes.Output import Output\nfrom pyleecan.Functions.load import load\nfrom pyleecan.definitions import DATA_DIR\nfrom Tests import save_validation_path as save_path\n\n\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_Magnetic_FEMM_sym():\n \"\"\"Validation of a polar SIPMSM with surface magnet\n Linear lamination material\n\n From publication\n Lubin, S. Mezani, and A. Rezzoug,\n “2-D Exact Analytical Model for Surface-Mounted Permanent-Magnet Motors with Semi-Closed Slots,”\n IEEE Trans. Magn., vol. 47, no. 2, pp. 479–492, 2011.\n Test compute the Flux in FEMM, with and without symmetry\n and with MANATEE semi-analytical subdomain model\n \"\"\"\n SPMSM_003 = load(join(DATA_DIR, \"Machine\", \"SPMSM_003.json\"))\n simu = Simu1(name=\"EM_SPMSM_FL_002\", machine=SPMSM_003)\n\n # Definition of the enforced output of the electrical module\n N0 = 3000\n Is = ImportMatrixVal(\n value=array(\n [\n [6.97244193e-06, 2.25353053e02, -2.25353060e02],\n [-2.60215295e02, 1.30107654e02, 1.30107642e02],\n [-6.97244208e-06, -2.25353053e02, 2.25353060e02],\n [2.60215295e02, -1.30107654e02, -1.30107642e02],\n ]\n )\n )\n time = ImportGenVectLin(start=0, stop=0.015, num=4, endpoint=True)\n Na_tot = 1024\n\n simu.input = InputCurrent(\n Is=Is,\n Ir=None, # No winding on the rotor\n N0=N0,\n angle_rotor=None, # Will be computed\n time=time,\n Na_tot=Na_tot,\n angle_rotor_initial=0.5216 + pi,\n )\n\n # Definition of the magnetic simulation (no symmetry)\n simu.mag = MagFEMM(\n type_BH_stator=2,\n type_BH_rotor=2,\n is_periodicity_a=False,\n is_get_mesh=True,\n )\n simu.force = None\n simu.struct = None\n # Copy the simu and activate the symmetry\n assert SPMSM_003.comp_periodicity() == (1, True, 1, True)\n simu_sym = Simu1(init_dict=simu.as_dict())\n simu_sym.mag.is_periodicity_a = True\n\n out = Output(simu=simu_sym)\n out.post.legend_name = \"1/2 symmetry\"\n out.post.line_color = \"r--\"\n simu_sym.run()\n\n out.mag.meshsolution.plot_mesh(\n save_path=join(save_path, \"EM_SPMSM_FL_002_mesh.png\")\n )\n\n out.mag.meshsolution.plot_mesh(\n group_names=\"stator\",\n save_path=join(save_path, \"EM_SPMSM_FL_002_mesh_stator.png\"),\n )\n\n out.mag.meshsolution.plot_mesh(\n group_names=[\"stator\", \"/\", \"airgap\", \"stator_windings\"],\n save_path=join(save_path, \"EM_SPMSM_FL_002_mesh_stator_interface.png\"),\n )\n\n out.mag.meshsolution.plot_contour(\n label=\"\\mu\", save_path=join(save_path, \"EM_SPMSM_FL_002_mu.png\")\n )\n out.mag.meshsolution.plot_contour(\n label=\"B\", save_path=join(save_path, \"EM_SPMSM_FL_002_B.png\")\n )\n out.mag.meshsolution.plot_contour(\n label=\"H\", save_path=join(save_path, \"EM_SPMSM_FL_002_H.png\")\n )\n" ]
[ [ "numpy.array" ] ]
zachbellay/pytorch-lightning
[ "479a35d94e0b83593414833c050de4065d3b6c8a" ]
[ "pytorch_lightning/loggers/neptune.py" ]
[ "\"\"\"\nLog using `neptune-logger <https://www.neptune.ml>`_\n\n.. _neptune:\n\nNeptuneLogger\n--------------\n\"\"\"\nimport argparse\nfrom logging import getLogger\nfrom typing import Optional, List, Dict, Any, Union, Iterable\n\ntry:\n import neptune\n from neptune.experiments import Experiment\nexcept ImportError:\n raise ImportError('You want to use `neptune` logger which is not installed yet,'\n ' please install it e.g. `pip install neptune-client`.')\n\nfrom torch import is_tensor\n\n# from .base import LightningLoggerBase, rank_zero_only\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only\n\nlogger = getLogger(__name__)\n\n\nclass NeptuneLogger(LightningLoggerBase):\n r\"\"\"\n Neptune logger can be used in the online mode or offline (silent) mode.\n To log experiment data in online mode, NeptuneLogger requries an API key:\n \"\"\"\n\n def __init__(self, api_key: Optional[str] = None, project_name: Optional[str] = None,\n offline_mode: bool = False, experiment_name: Optional[str] = None,\n upload_source_files: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None,\n properties: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None, **kwargs):\n r\"\"\"\n\n Initialize a neptune.ml logger.\n\n .. note:: Requires either an API Key (online mode) or a local directory path (offline mode)\n\n .. code-block:: python\n\n # ONLINE MODE\n from pytorch_lightning.loggers import NeptuneLogger\n # arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class\n\n neptune_logger = NeptuneLogger(\n api_key=os.environ[\"NEPTUNE_API_TOKEN\"],\n project_name=\"USER_NAME/PROJECT_NAME\",\n experiment_name=\"default\", # Optional,\n params={\"max_epochs\": 10}, # Optional,\n tags=[\"pytorch-lightning\",\"mlp\"] # Optional,\n )\n trainer = Trainer(max_epochs=10, logger=neptune_logger)\n\n .. code-block:: python\n\n # OFFLINE MODE\n from pytorch_lightning.loggers import NeptuneLogger\n # arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class\n\n neptune_logger = NeptuneLogger(\n project_name=\"USER_NAME/PROJECT_NAME\",\n experiment_name=\"default\", # Optional,\n params={\"max_epochs\": 10}, # Optional,\n tags=[\"pytorch-lightning\",\"mlp\"] # Optional,\n )\n trainer = Trainer(max_epochs=10, logger=neptune_logger)\n\n Use the logger anywhere in you LightningModule as follows:\n\n .. code-block:: python\n\n def train_step(...):\n # example\n self.logger.experiment.log_metric(\"acc_train\", acc_train) # log metrics\n self.logger.experiment.log_image(\"worse_predictions\", prediction_image) # log images\n self.logger.experiment.log_artifact(\"model_checkpoint.pt\", prediction_image) # log model checkpoint\n self.logger.experiment.whatever_neptune_supports(...)\n\n def any_lightning_module_function_or_hook(...):\n self.logger.experiment.log_metric(\"acc_train\", acc_train) # log metrics\n self.logger.experiment.log_image(\"worse_predictions\", prediction_image) # log images\n self.logger.experiment.log_artifact(\"model_checkpoint.pt\", prediction_image) # log model checkpoint\n self.logger.experiment.whatever_neptune_supports(...)\n\n Args:\n api_key (str | None): Required in online mode. Neputne API token, found on https://neptune.ml.\n Read how to get your API key\n https://docs.neptune.ml/python-api/tutorials/get-started.html#copy-api-token.\n project_name (str): Required in online mode. Qualified name of a project in a form of\n \"namespace/project_name\" for example \"tom/minst-classification\".\n If None, the value of NEPTUNE_PROJECT environment variable will be taken.\n You need to create the project in https://neptune.ml first.\n offline_mode (bool): Optional default False. If offline_mode=True no logs will be send to neptune.\n Usually used for debug purposes.\n experiment_name (str|None): Optional. Editable name of the experiment.\n Name is displayed in the experiment’s Details (Metadata section) and in experiments view as a column.\n upload_source_files (list|None): Optional. List of source files to be uploaded.\n Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab.\n If None is passed, Python file from which experiment was created will be uploaded.\n Pass empty list ([]) to upload no files. Unix style pathname pattern expansion is supported.\n For example, you can pass '\\*.py'\n to upload all python source files from the current directory.\n For recursion lookup use '\\**/\\*.py' (for Python 3.5 and later).\n For more information see glob library.\n params (dict|None): Optional. Parameters of the experiment. After experiment creation params are read-only.\n Parameters are displayed in the experiment’s Parameters section and each key-value pair can be\n viewed in experiments view as a column.\n properties (dict|None): Optional default is {}. Properties of the experiment.\n They are editable after experiment is created. Properties are displayed in the experiment’s Details and\n each key-value pair can be viewed in experiments view as a column.\n tags (list|None): Optional default []. Must be list of str. Tags of the experiment.\n They are editable after experiment is created (see: append_tag() and remove_tag()).\n Tags are displayed in the experiment’s Details and can be viewed in experiments view as a column.\n \"\"\"\n super().__init__()\n self.api_key = api_key\n self.project_name = project_name\n self.offline_mode = offline_mode\n self.experiment_name = experiment_name\n self.upload_source_files = upload_source_files\n self.params = params\n self.properties = properties\n self.tags = tags\n self._experiment = None\n self._kwargs = kwargs\n\n if offline_mode:\n self.mode = \"offline\"\n neptune.init(project_qualified_name='dry-run/project',\n backend=neptune.OfflineBackend())\n else:\n self.mode = \"online\"\n neptune.init(api_token=self.api_key,\n project_qualified_name=self.project_name)\n\n logger.info(f\"NeptuneLogger was initialized in {self.mode} mode\")\n\n @property\n def experiment(self) -> Experiment:\n r\"\"\"\n\n Actual neptune object. To use neptune features do the following.\n\n Example::\n\n self.logger.experiment.some_neptune_function()\n\n \"\"\"\n\n if self._experiment is not None:\n return self._experiment\n else:\n self._experiment = neptune.create_experiment(name=self.experiment_name,\n params=self.params,\n properties=self.properties,\n tags=self.tags,\n upload_source_files=self.upload_source_files,\n **self._kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params: argparse.Namespace):\n for key, val in vars(params).items():\n self.experiment.set_property(f\"param__{key}\", val)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None):\n \"\"\"Log metrics (numeric values) in Neptune experiments\n\n Args:\n metrics: Dictionary with metric names as keys and measured quantities as values\n step: Step number at which the metrics should be recorded, must be strictly increasing\n \"\"\"\n\n for key, val in metrics.items():\n if is_tensor(val):\n val = val.cpu().detach()\n\n if step is None:\n self.experiment.log_metric(key, val)\n else:\n self.experiment.log_metric(key, x=step, y=val)\n\n @rank_zero_only\n def finalize(self, status: str):\n self.experiment.stop()\n\n @property\n def name(self) -> str:\n if self.mode == \"offline\":\n return \"offline-name\"\n else:\n return self.experiment.name\n\n @property\n def version(self) -> str:\n if self.mode == \"offline\":\n return \"offline-id-1234\"\n else:\n return self.experiment.id\n\n @rank_zero_only\n def log_metric(self, metric_name: str, metric_value: float, step: Optional[int] = None):\n \"\"\"Log metrics (numeric values) in Neptune experiments\n\n Args:\n metric_name: The name of log, i.e. mse, loss, accuracy.\n metric_value: The value of the log (data-point).\n step: Step number at which the metrics should be recorded, must be strictly increasing\n \"\"\"\n if step is None:\n self.experiment.log_metric(metric_name, metric_value)\n else:\n self.experiment.log_metric(metric_name, x=step, y=metric_value)\n\n @rank_zero_only\n def log_text(self, log_name: str, text: str, step: Optional[int] = None):\n \"\"\"Log text data in Neptune experiment\n\n Args:\n log_name: The name of log, i.e. mse, my_text_data, timing_info.\n text: The value of the log (data-point).\n step: Step number at which the metrics should be recorded, must be strictly increasing\n \"\"\"\n if step is None:\n self.experiment.log_metric(log_name, text)\n else:\n self.experiment.log_metric(log_name, x=step, y=text)\n\n @rank_zero_only\n def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None):\n \"\"\"Log image data in Neptune experiment\n\n Args:\n log_name: The name of log, i.e. bboxes, visualisations, sample_images.\n image (str|PIL.Image|matplotlib.figure.Figure): The value of the log (data-point).\n Can be one of the following types: PIL image, matplotlib.figure.Figure, path to image file (str)\n step: Step number at which the metrics should be recorded, must be strictly increasing\n \"\"\"\n if step is None:\n self.experiment.log_image(log_name, image)\n else:\n self.experiment.log_image(log_name, x=step, y=image)\n\n @rank_zero_only\n def log_artifact(self, artifact: str, destination: Optional[str] = None):\n \"\"\"Save an artifact (file) in Neptune experiment storage.\n\n Args:\n artifact: A path to the file in local filesystem.\n destination: Optional default None. A destination path.\n If None is passed, an artifact file name will be used.\n \"\"\"\n self.experiment.log_artifact(artifact, destination)\n\n @rank_zero_only\n def set_property(self, key: str, value: Any):\n \"\"\"Set key-value pair as Neptune experiment property.\n\n Args:\n key: Property key.\n value: New value of a property.\n \"\"\"\n self.experiment.set_property(key, value)\n\n @rank_zero_only\n def append_tags(self, tags: Union[str, Iterable[str]]):\n \"\"\"appends tags to neptune experiment\n\n Args:\n tags: Tags to add to the current experiment. If str is passed, singe tag is added.\n If multiple - comma separated - str are passed, all of them are added as tags.\n If list of str is passed, all elements of the list are added as tags.\n \"\"\"\n if not isinstance(tags, Iterable):\n tags = [tags] # make it as an iterable is if it is not yet\n self.experiment.append_tags(*tags)\n" ]
[ [ "torch.is_tensor" ] ]
deep-privacy/espnet
[ "48a25ef7b11e09a2221327d0319ca51a11d1ecc4" ]
[ "espnet2/utils/fileio.py" ]
[ "from __future__ import annotations\n\nimport collections.abc\nfrom io import StringIO\nimport logging\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import Union\nimport warnings\n\nimport numpy as np\nimport soundfile\nfrom typeguard import check_argument_types\nfrom typeguard import check_return_type\n\n\nclass DatadirWriter:\n \"\"\"Writer class to create kaldi like data directory.\n\n Examples:\n >>> with DatadirWriter(\"output\") as writer:\n ... # output/sub.txt is created here\n ... subwriter = writer[\"sub.txt\"]\n ... # Write \"uttidA some/where/a.wav\"\n ... subwriter[\"uttidA\"] = \"some/where/a.wav\"\n ... subwriter[\"uttidB\"] = \"some/where/b.wav\"\n\n \"\"\"\n\n def __init__(self, p: Union[Path, str]):\n assert check_argument_types()\n self.path = Path(p)\n self.chilidren = {}\n self.fd = None\n self.has_children = False\n self.keys = set()\n\n def __enter__(self):\n return self\n\n def __getitem__(self, key: str) -> DatadirWriter:\n assert check_argument_types()\n if self.fd is not None:\n raise RuntimeError(\"This writer points out a file\")\n\n if key not in self.chilidren:\n w = DatadirWriter((self.path / key))\n self.chilidren[key] = w\n self.has_children = True\n\n retval = self.chilidren[key]\n assert check_return_type(retval)\n return retval\n\n def __setitem__(self, key: str, value: str):\n assert check_argument_types()\n if self.has_children:\n raise RuntimeError(\"This writer points out a directory\")\n if key in self.keys:\n warnings.warn(f\"Duplicated: {key}\")\n\n if self.fd is None:\n self.path.parent.mkdir(parents=True, exist_ok=True)\n self.fd = self.path.open(\"w\", encoding=\"utf-8\")\n\n self.keys.add(key)\n self.fd.write(f\"{key} {value}\\n\")\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n if self.has_children:\n prev_child = None\n for child in self.chilidren.values():\n child.close()\n if prev_child is not None and prev_child.keys != child.keys:\n warnings.warn(\n f\"Ids are mismatching between \"\n f\"{prev_child.path} and {child.path}\"\n )\n prev_child = child\n\n elif self.fd is not None:\n self.fd.close()\n\n\ndef read_2column_text(path: Union[Path, str]) -> Dict[str, str]:\n \"\"\"Read a text file having 2 column as dict object.\n\n Examples:\n wav.scp:\n key1 /some/path/a.wav\n key2 /some/path/b.wav\n\n >>> read_2column_text('wav.scp')\n {'key1': '/some/path/a.wav', 'key2': '/some/path/b.wav'}\n\n \"\"\"\n assert check_argument_types()\n\n data = {}\n with Path(path).open(\"r\", encoding=\"utf-8\") as f:\n for linenum, line in enumerate(f, 1):\n sps = line.rstrip().split(maxsplit=1)\n if len(sps) != 2:\n raise RuntimeError(\n f\"scp file must have two or more columns: \"\n f\"{line} ({path}:{linenum})\"\n )\n k, v = sps\n if k in data:\n raise RuntimeError(f\"{k} is duplicated ({path}:{linenum})\")\n data[k] = v.rstrip()\n assert check_return_type(data)\n return data\n\n\ndef load_num_sequence_text(\n path: Union[Path, str], loader_type: str = \"csv_int\"\n) -> Dict[str, np.ndarray]:\n assert check_argument_types()\n if loader_type == \"text_int\":\n delimiter = \" \"\n dtype = np.long\n elif loader_type == \"text_float\":\n delimiter = \" \"\n dtype = np.float32\n elif loader_type == \"csv_int\":\n delimiter = \",\"\n dtype = np.long\n elif loader_type == \"csv_float\":\n delimiter = \",\"\n dtype = np.float32\n else:\n raise ValueError(f\"Not supported loader_type={loader_type}\")\n\n # path looks like:\n # utta 1,0\n # uttb 3,4,5\n # -> return {'utta': np.ndarray([1, 0]),\n # 'uttb': np.ndarray([3, 4, 5])}\n d = read_2column_text(path)\n\n # Using for-loop instead of dict-comprehension for debuggability\n retval = {}\n for k, v in d.items():\n try:\n retval[k] = np.loadtxt(\n StringIO(v), ndmin=1, dtype=dtype, delimiter=delimiter\n )\n except ValueError:\n logging.error(\n f'Error happened with path=\"{path}\", ' f'id=\"{k}\", value=\"{v}\"'\n )\n raise\n assert check_return_type(retval)\n return retval\n\n\nclass SoundScpReader(collections.abc.Mapping):\n \"\"\"Reader class for 'wav.scp'.\n\n Examples:\n key1 /some/path/a.wav\n key2 /some/path/b.wav\n key3 /some/path/c.wav\n key4 /some/path/d.wav\n ...\n\n >>> reader = SoundScpReader('wav.scp')\n >>> rate, array = reader['key1']\n\n \"\"\"\n\n def __init__(\n self, fname, dtype=np.int16, always_2d: bool = False, normalize: bool = False,\n ):\n assert check_argument_types()\n self.fname = fname\n self.dtype = dtype\n self.always_2d = always_2d\n self.normalize = normalize\n self.data = read_2column_text(fname)\n\n def __getitem__(self, key):\n wav = self.data[key]\n if self.normalize:\n # soundfile.read normalizes data to [-1,1] if dtype is not given\n array, rate = soundfile.read(wav, always_2d=self.always_2d)\n else:\n array, rate = soundfile.read(\n wav, dtype=self.dtype, always_2d=self.always_2d\n )\n\n return rate, array\n\n def get_path(self, key):\n return self.data[key]\n\n def __contains__(self, item):\n return item\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n def keys(self):\n return self.data.keys()\n\n\nclass SoundScpWriter:\n \"\"\"Writer class for 'wav.scp'\n\n Examples:\n key1 /some/path/a.wav\n key2 /some/path/b.wav\n key3 /some/path/c.wav\n key4 /some/path/d.wav\n ...\n\n >>> writer = SoundScpWriter('./data/', './data/feat.scp')\n >>> writer['aa'] = 16000, numpy_array\n >>> writer['bb'] = 16000, numpy_array\n\n \"\"\"\n\n def __init__(\n self,\n outdir: Union[Path, str],\n scpfile: Union[Path, str],\n format=\"wav\",\n dtype=None,\n ):\n assert check_argument_types()\n self.dir = Path(outdir)\n self.dir.mkdir(parents=True, exist_ok=True)\n scpfile = Path(scpfile)\n scpfile.parent.mkdir(parents=True, exist_ok=True)\n self.fscp = scpfile.open(\"w\", encoding=\"utf-8\")\n self.format = format\n self.dtype = dtype\n\n self.data = {}\n\n def __setitem__(self, key: str, value):\n rate, signal = value\n assert isinstance(rate, int), type(rate)\n assert isinstance(signal, np.ndarray), type(signal)\n if signal.ndim not in (1, 2):\n raise RuntimeError(f\"Input signal must be 1 or 2 dimension: {signal.ndim}\")\n if signal.ndim == 1:\n signal = signal[:, None]\n\n wav = self.dir / f\"{key}.{self.format}\"\n wav.parent.mkdir(parents=True, exist_ok=True)\n soundfile.write(str(wav), signal, rate)\n\n self.fscp.write(f\"{key} {wav}\\n\")\n\n # Store the file path\n self.data[key] = str(wav)\n\n def get_path(self, key):\n return self.data[key]\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n self.fscp.close()\n\n\nclass NpyScpWriter:\n \"\"\"Writer class for a scp file of numpy file.\n\n Examples:\n key1 /some/path/a.npy\n key2 /some/path/b.npy\n key3 /some/path/c.npy\n key4 /some/path/d.npy\n ...\n\n >>> writer = NpyScpWriter('./data/', './data/feat.scp')\n >>> writer['aa'] = numpy_array\n >>> writer['bb'] = numpy_array\n\n \"\"\"\n\n def __init__(self, outdir: Union[Path, str], scpfile: Union[Path, str]):\n assert check_argument_types()\n self.dir = Path(outdir)\n self.dir.mkdir(parents=True, exist_ok=True)\n scpfile = Path(scpfile)\n scpfile.parent.mkdir(parents=True, exist_ok=True)\n self.fscp = scpfile.open(\"w\", encoding=\"utf-8\")\n\n self.data = {}\n\n def get_path(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n assert isinstance(value, np.ndarray), type(value)\n p = self.dir / f\"{key}.npy\"\n p.parent.mkdir(parents=True, exist_ok=True)\n np.save(str(p), value)\n self.fscp.write(f\"{key} {p}\\n\")\n\n # Store the file path\n self.data[key] = str(p)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n self.fscp.close()\n\n\nclass NpyScpReader(collections.abc.Mapping):\n \"\"\"Reader class for a scp file of numpy file.\n\n Examples:\n key1 /some/path/a.npy\n key2 /some/path/b.npy\n key3 /some/path/c.npy\n key4 /some/path/d.npy\n ...\n\n >>> reader = NpyScpReader('npy.scp')\n >>> array = reader['key1']\n\n \"\"\"\n\n def __init__(self, fname: Union[Path, str]):\n assert check_argument_types()\n self.fname = Path(fname)\n self.data = read_2column_text(fname)\n\n def get_path(self, key):\n return self.data[key]\n\n def __getitem__(self, key) -> np.ndarray:\n p = self.data[key]\n return np.load(p)\n\n def __contains__(self, item):\n return item\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n def keys(self):\n return self.data.keys()\n" ]
[ [ "numpy.load" ] ]
Lenaxiao/ToxNet-Project
[ "576a92ed825f9b511928bacbc578f9846d219695" ]
[ "modules/ToxNet_02_Internal_Test_Set.py" ]
[ "\n# coding: utf-8\n\n# In[15]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\nfrom rdkit import Chem\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[16]:\n\n\nhomedir = os.path.dirname(os.path.realpath('__file__'))\nhomedir = homedir+\"/data/\"\ndf = pd.read_csv(homedir+\"tox_niehs_all.csv\")\n\n\n# In[17]:\n\n\ndf.head()\n\n\n# # Construct Internal Test Set\n\n# In[18]:\n\n\nsize = 0.10\nseed = 6\nnp.random.seed(seed)\n\n\n# In[19]:\n\n\nmsk = np.random.rand(len(df)) < 0.1\ndf_tv = df[~msk]\ndf_int = df[msk]\n\n\n# In[20]:\n\n\nprint(df.shape, df_tv.shape, df_int.shape)\n\n\n# In[21]:\n\n\ndf_tv.to_csv(homedir+'tox_niehs_all_trainval.csv', index=False)\ndf_int.to_csv(homedir+'tox_niehs_all_int.csv', index=False)\n\n\n# # Evaluate Dataset Characteristics\n\n# In[22]:\n\n\nimport matplotlib.pyplot as plt\n\n\n# In[23]:\n\n\ntask = 'verytoxic'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n\n# In[24]:\n\n\ntask = 'nontoxic'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n\n# In[25]:\n\n\ntask = 'epa'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n\n# In[26]:\n\n\ntask = 'ghs'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n\n# In[27]:\n\n\ntask = 'ld50'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n\n# In[28]:\n\n\ntask = 'logld50'\n\nfig, axes = plt.subplots(nrows=1, ncols=3)\n\ndf[task].hist(normed=True, ax=axes[0])\ndf_tv[task].hist(normed=True, ax=axes[1])\ndf_int[task].hist(normed=True, ax=axes[2])\n\n" ]
[ [ "numpy.random.seed", "pandas.read_csv", "matplotlib.pyplot.subplots" ] ]
rystylee/pytorch-cppn-gan
[ "e02ffe26cc57807bc739e4d5806e25f11991e7ec" ]
[ "src/dcgan_cppn/trainer.py" ]
[ "import os\r\nfrom tqdm import tqdm\r\n\r\nimport torch\r\nimport torchvision\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nfrom src.dcgan_cppn.models import Generator, Discriminator\r\nfrom losses import GANLoss\r\nfrom utils import get_coordinates, endless_dataloader\r\n\r\nfrom data_loader import DataLoader\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, config):\r\n self.config = config\r\n\r\n self.device = config.device\r\n self.max_itr = config.max_itr\r\n self.batch_size = config.batch_size\r\n self.img_size = config.img_size\r\n self.dim_z = config.dim_z\r\n self.dim_c = config.dim_c\r\n self.scale = config.scale\r\n self.n_gen = config.n_gen\r\n\r\n self.start_itr = 1\r\n\r\n dataloader = DataLoader(\r\n config.data_root, config.dataset_name, config.img_size, config.batch_size, config.with_label\r\n )\r\n train_loader, test_loader = dataloader.get_loader(only_train=True)\r\n self.dataloader = train_loader\r\n self.dataloader = endless_dataloader(self.dataloader)\r\n\r\n self.generator = Generator(config).to(config.device)\r\n self.discriminator = Discriminator(config).to(config.device)\r\n\r\n self.optim_g = torch.optim.Adam(self.generator.parameters(), lr=config.lr_g, betas=(config.beta1, config.beta2))\r\n self.optim_d = torch.optim.Adam(self.discriminator.parameters(), lr=config.lr_d, betas=(config.beta1, config.beta2))\r\n self.criterion = GANLoss()\r\n\r\n if not self.config.checkpoint_path == '':\r\n self._load_models(self.config.checkpoint_path)\r\n\r\n self.x, self.y, self.r = get_coordinates(self.img_size, self.img_size, self.scale, self.batch_size)\r\n self.x, self.y, self.r = self.x.to(self.device), self.y.to(self.device), self.r.to(self.device)\r\n\r\n self.writer = SummaryWriter(log_dir=config.log_dir)\r\n\r\n def train(self):\r\n print('Start training!\\n')\r\n with tqdm(total=self.config.max_itr + 1 - self.start_itr) as pbar:\r\n for n_itr in range(self.start_itr, self.config.max_itr + 1):\r\n pbar.set_description(f'iteration [{n_itr}]')\r\n\r\n # ------------------------------------------------\r\n # Train G\r\n # ------------------------------------------------\r\n total_loss_g = 0\r\n for _ in range(self.n_gen):\r\n self.optim_g.zero_grad()\r\n z = torch.bmm(\r\n torch.ones(self.batch_size, self.img_size * self.img_size, 1),\r\n torch.randn(self.batch_size, 1, self.dim_z)\r\n ).to(self.device)\r\n\r\n fake_img = self.generator(z, self.x, self.y, self.r)\r\n fake_img = fake_img.view(-1, self.img_size, self.img_size, self.dim_c).permute((0, 3, 1, 2))\r\n d_fake = self.discriminator(fake_img)\r\n loss_g = self.criterion(d_fake, 'g')\r\n total_loss_g += loss_g.item()\r\n loss_g.backward()\r\n self.optim_g.step()\r\n total_loss_g /= float(self.n_gen)\r\n\r\n # ------------------------------------------------\r\n # Train D\r\n # ------------------------------------------------\r\n total_loss_d = 0\r\n total_loss_d_real = 0\r\n total_loss_d_fake = 0\r\n self.optim_d.zero_grad()\r\n img, label = next(self.dataloader)\r\n real_img, real_label = img.to(self.device), label.to(self.device)\r\n\r\n z = torch.bmm(\r\n torch.ones(self.batch_size, self.img_size * self.img_size, 1),\r\n torch.randn(self.batch_size, 1, self.dim_z)\r\n ).to(self.device)\r\n with torch.no_grad():\r\n fake_img = self.generator(z, self.x, self.y, self.r)\r\n fake_img = fake_img.view(-1, self.img_size, self.img_size, self.dim_c).permute((0, 3, 1, 2))\r\n d_real = self.discriminator(real_img)\r\n d_fake = self.discriminator(fake_img.detach())\r\n loss_d_real = self.criterion(d_real, 'd_real')\r\n loss_d_fake = self.criterion(d_fake, 'd_fake')\r\n loss_d = loss_d_real + loss_d_fake\r\n total_loss_d += loss_d.item()\r\n total_loss_d_fake += loss_d_fake.item()\r\n total_loss_d_real += loss_d_real.item()\r\n loss_d.backward()\r\n self.optim_d.step()\r\n\r\n if n_itr % self.config.checkpoint_interval == 0:\r\n self._save_models(n_itr)\r\n\r\n if n_itr % self.config.log_interval == 0:\r\n tqdm.write('iteration: {}/{}, loss_g: {}, loss_d: {}, loss_d_real: {}, loss_d_fake: {}'.format(\r\n n_itr, self.config.max_itr, total_loss_g, total_loss_d, total_loss_d_real, total_loss_d_fake))\r\n self.writer.add_scalar('loss/loss_g', total_loss_g, n_itr)\r\n self.writer.add_scalar('loss/loss_d', total_loss_d, n_itr)\r\n self.writer.add_scalar('loss/loss_d_real', total_loss_d_real, n_itr)\r\n self.writer.add_scalar('loss/loss_d_fake', total_loss_d_fake, n_itr)\r\n\r\n if n_itr % self.config.sample_interval == 0:\r\n fake_name = f'fake_{n_itr}.jpg'\r\n fake_path = os.path.join(self.config.sample_dir, fake_name)\r\n torchvision.utils.save_image(fake_img.detach(), fake_path, normalize=True, range=(-1.0, 1.0))\r\n real_name = f'real_{n_itr}.jpg'\r\n real_path = os.path.join(self.config.sample_dir, real_name)\r\n torchvision.utils.save_image(real_img, real_path, normalize=True, range=(-1.0, 1.0))\r\n\r\n pbar.update()\r\n\r\n def _load_models(self, model_state_path):\r\n checkpoint = torch.load(model_state_path)\r\n self.start_itr = checkpoint['n_itr'] + 1\r\n self.generator.load_state_dict(checkpoint['generator'])\r\n self.discriminator.load_state_dict(checkpoint['discriminator'])\r\n self.optim_g.load_state_dict(checkpoint['optim_g'])\r\n self.optim_d.load_state_dict(checkpoint['optim_d'])\r\n print(f'start_itr: {self.start_itr}')\r\n print('Loaded pretrained models...\\n')\r\n\r\n def _save_models(self, n_itr):\r\n checkpoint_name = f'{self.config.dataset_name}-{self.config.img_size}_model_ckpt_{n_itr}.pth'\r\n checkpoint_path = os.path.join(self.config.checkpoint_dir, checkpoint_name)\r\n torch.save({\r\n 'n_itr': n_itr,\r\n 'generator': self.generator.state_dict(),\r\n 'discriminator': self.discriminator.state_dict(),\r\n 'optim_g': self.optim_g.state_dict(),\r\n 'optim_d': self.optim_d.state_dict(),\r\n }, checkpoint_path)\r\n tqdm.write(f'Saved models: n_itr_{n_itr}')\r\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter", "torch.no_grad", "torch.ones", "torch.load", "torch.randn" ] ]
arnoldjulian/Interpretable-and-unsupervised-phase-classification
[ "5b10426f759c47779654ccd001aab06563ed30ee" ]
[ "prediction_based_method/DNNs/conf.py" ]
[ "import numpy as np\r\n\r\n# set a random seed\r\nseed = 222\r\n\r\n# dimension of parameter space under consideration, choose between 1 and 2\r\ndim_parameter_space = 1\r\n\r\n# set range of U under investigation. can be modified: 1 < U_min, U_max < 8 and U_min, U_max % 0.2 = 0\r\nU_min = 1.0\r\nU_max = 8.0\r\nU_step = 0.2\r\n\r\n# if dim_parameter_space = 1, additionally give the particular Nf which should be considered for a linescan\r\nnf_tar = 63\r\n\r\n# linear lattice size L; L = 20 for the given data\r\ndim = 20\r\n\r\n# based on the specifications above, define variables which quantify the parameter space\r\nU_scale = 1/np.std(np.arange(U_min, U_max+U_step, U_step))\r\nnum_U = int(round(((U_max-U_min)/U_step)+1))\r\nnum_nf = int(round(dim**2/2))\r\n\r\nif dim_parameter_space == 1:\r\n nf_scale = 0\r\n hder = U_step*U_scale\r\n num_nf = 1\r\n\r\nelif dim_parameter_space == 2:\r\n nf_scale = 1/np.std(range(1, num_nf+1))\r\n hder = [U_step*U_scale, 1*nf_scale]\r\n\r\n\r\n# path to the folder containing the data to be analyzed\r\ndata_dir = '../../data'\r\n\r\n# type of input to be used; choose between 'raw', 'ft', and 'corr_func'\r\ninput_type = 'ft'\r\n\r\n# choose between the 'noise-free' and 'noisy' case; we provide data for the noise-free case\r\ncase = 'noise-free'\r\n\r\n# number of epochs to be trained\r\nepochs = 3000\r\n\r\n# epochs at which a predicted phase diagram should be plotted and saved\r\nepoch_step = 50\r\nsaving_epochs = np.arange(0, epochs+epoch_step, epoch_step)\r\nfor i in range(1, len(saving_epochs)):\r\n saving_epochs[i] -= 1\r\n\r\n# specify NN hyperparameters (see Supplemental Material for appropriate choices)\r\n\r\n# batch size\r\nbatch_size = num_U\r\n\r\n# activation function\r\nactfunc = 'relu'\r\n\r\n# parameters for training in batch mode\r\nparams = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 1}\r\nparams_stand = {'batch_size': 1, 'shuffle': True, 'num_workers': 1}\r\n\r\n# learning rate (Adam optimizer)\r\nlr = 1e-4\r\n\r\n# l2 regularization rate\r\nl2_lambda = 0.0\r\n\r\n# parameters for the learning rate scheduler (ReduceOnPlateau)\r\nlr_scheduler_factor = 0.5\r\nlr_scheduler_patience = 50\r\n\r\n# NN architecture\r\n\r\n# when analyzing a one-dimensional parameter space fcs[-1] = 1 corresponding to the predicted U and in case of a two-dimensionar parameter space fcs[-1]=2 corresponding to the predicted (U,rho)\r\n# channels = [1, 2048]\r\n# kernels = [20]\r\n# strides = [1]\r\n# fcs = [2048, 1024, 512, 512, 256, 2]\r\n\r\nchannels = [1, 512]\r\nkernels = [20]\r\nstrides = [1]\r\nfcs = [512, 256, 64, 1]\r\n" ]
[ [ "numpy.arange" ] ]
timgates42/netcdf4-python
[ "d8b1cb11454f9beec674a29904c91f48db608c2c" ]
[ "examples/bench_compress2.py" ]
[ "# benchmark reads and writes, with and without compression.\n# tests all four supported file formats.\nfrom numpy.random.mtrand import uniform\nimport netCDF4\nfrom timeit import Timer\nimport os, sys\n\n# create an n1dim by n2dim by n3dim random array.\nn1dim = 30\nn2dim = 15\nn3dim = 73\nn4dim = 144\nntrials = 10\nsys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\\n'%(n1dim,n2dim,n3dim,n4dim))\nsys.stdout.write('(average of %s trials)\\n\\n' % ntrials)\narray = uniform(size=(n1dim,n2dim,n3dim,n4dim))\n\n\ndef write_netcdf(filename,complevel,lsd):\n file = netCDF4.Dataset(filename,'w',format='NETCDF4')\n file.createDimension('n1', n1dim)\n file.createDimension('n2', n2dim)\n file.createDimension('n3', n3dim)\n file.createDimension('n4', n4dim)\n foo = file.createVariable('data',\\\n 'f8',('n1','n2','n3','n4'),\\\n zlib=True,shuffle=True,complevel=complevel,\\\n least_significant_digit=lsd)\n foo[:] = array\n file.close()\n\ndef read_netcdf(filename):\n file = netCDF4.Dataset(filename)\n data = file.variables['data'][:]\n file.close()\n\nlsd = None\nsys.stdout.write('using least_significant_digit %s\\n\\n' % lsd)\nfor complevel in range(0,10,2):\n sys.stdout.write('testing compression with complevel %s...\\n' % complevel)\n # writing.\n t = Timer(\"write_netcdf('test.nc',%s,%s)\" % (complevel,lsd),\"from __main__ import write_netcdf\")\n sys.stdout.write('writing took %s seconds\\n' %\\\n repr(sum(t.repeat(ntrials,1))/ntrials))\n # test reading.\n t = Timer(\"read_netcdf('test.nc')\",\"from __main__ import read_netcdf\")\n sys.stdout.write('reading took %s seconds\\n' %\n repr(sum(t.repeat(ntrials,1))/ntrials))\n # print out size of resulting files.\n sys.stdout.write('size of test.nc = %s\\n'%repr(os.stat('test.nc').st_size))\n\ncomplevel = 4\nsys.stdout.write('\\nusing complevel %s\\n\\n' % complevel)\nfor lsd in range(1,6):\n sys.stdout.write('testing compression with least_significant_digit %s...\\n' % lsd)\n # writing.\n t = Timer(\"write_netcdf('test.nc',%s,%s)\" % (complevel,lsd),\"from __main__ import write_netcdf\")\n sys.stdout.write('writing took %s seconds\\n' %\\\n repr(sum(t.repeat(ntrials,1))/ntrials))\n # test reading.\n t = Timer(\"read_netcdf('test.nc')\",\"from __main__ import read_netcdf\")\n sys.stdout.write('reading took %s seconds\\n' %\n repr(sum(t.repeat(ntrials,1))/ntrials))\n # print out size of resulting files.\n sys.stdout.write('size of test.nc = %s\\n'%repr(os.stat('test.nc').st_size))\n" ]
[ [ "numpy.random.mtrand.uniform" ] ]
Pullabhatla/M3R
[ "50ccdd6d97d47cd20e575443b368c30c066ad85a" ]
[ "scripts/momentum1.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom Req import LeakyReLUSoftmaxCCE, load_data\n\n(x_train, y_train), (x_test, y_test) = load_data()\n\nscores = np.empty((20, 101))\nfor i in range(20):\n np.random.seed(1112001)\n mlp = LeakyReLUSoftmaxCCE((28, 28), 10, [16, 16, 16])\n scores[i] = mlp.momentum_sgd_train(x_train, y_train, 1e-3, 100, 32, x_test, y_test, i/20)['test_accuracy']\n\n\nepochs = [i for i in range(101)]\nmomentums = [i/20 for i in range(20)]\n\nidx = np.unravel_index(np.argmax(scores), scores.shape)\n\nplt.contourf(epochs, momentums, scores)\nplt.xlabel('Epochs')\nplt.ylabel(r'$\\beta{}$')\nplt.title('Momentum Test Accuracy')\nplt.colorbar()\nplt.savefig('figures/momentum1.png')\n\nwith open('output/momentum1.txt', 'w') as f:\n f.write(f'The maximal test accuracy of {scores[idx]*100}% is achieved ' + f'with a momentum of {idx[0]/20} after {idx[1]} epochs.')\n" ]
[ [ "matplotlib.pyplot.colorbar", "numpy.empty", "matplotlib.pyplot.contourf", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "numpy.argmax", "matplotlib.pyplot.ylabel" ] ]
akbir/magi
[ "cff26ddb87165bb6e19796dc77521e3191afcffc" ]
[ "magi/agents/td3/networks.py" ]
[ "\"\"\"Default network architectures for TD3.\"\"\"\nfrom typing import Dict, Sequence\n\nfrom acme import specs\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\ndef apply_policy_sample(networks, eval_mode: bool):\n def policy_network(params, key, observation):\n action_mean = networks[\"policy\"].apply(params, observation)\n if eval_mode:\n return action_mean\n else:\n return networks[\"sample\"](action_mean, key)\n\n return policy_network\n\n\ndef make_networks(\n spec: specs.EnvironmentSpec,\n policy_layer_sizes: Sequence[int] = (256, 256),\n critic_layer_sizes: Sequence[int] = (256, 256),\n sigma: float = 0.1,\n) -> Dict[str, hk.Transformed]:\n \"\"\"Make default networks used by TD3.\"\"\"\n action_size = np.prod(spec.actions.shape, dtype=int)\n\n def _critic(h):\n output = hk.nets.MLP(\n list(critic_layer_sizes) + [1],\n w_init=hk.initializers.VarianceScaling(1.0, \"fan_in\", \"uniform\"),\n )(h)\n return jnp.squeeze(output, axis=-1)\n\n def _double_critic(obs, a):\n h = jnp.concatenate([obs, a], axis=-1)\n q1 = _critic(h)\n q2 = _critic(h)\n return q1, q2\n\n def _policy(obs):\n return hk.Sequential(\n [\n hk.nets.MLP(\n policy_layer_sizes,\n w_init=hk.initializers.VarianceScaling(1.0, \"fan_in\", \"uniform\"),\n activate_final=True,\n ),\n hk.Linear(\n action_size,\n hk.initializers.VarianceScaling(1.0, \"fan_in\", \"uniform\"),\n ),\n jnp.tanh,\n ]\n )(obs)\n\n def _sample_fn(action_mean, key):\n exploration_noise = jax.random.normal(key, action_mean.shape) * sigma\n sampled_action = action_mean + exploration_noise\n sampled_action = jnp.clip(\n sampled_action, spec.actions.minimum, spec.actions.maximum\n )\n return sampled_action\n\n critic = hk.without_apply_rng(hk.transform(_double_critic))\n policy = hk.without_apply_rng(hk.transform(_policy))\n # Create dummy observations and actions to create network parameters.\n dummy_action = utils.zeros_like(spec.actions)\n dummy_obs = utils.zeros_like(spec.observations)\n dummy_action = utils.add_batch_dim(dummy_action)\n dummy_obs = utils.add_batch_dim(dummy_obs)\n\n return {\n \"policy\": networks_lib.FeedForwardNetwork(\n lambda key: policy.init(key, dummy_obs), policy.apply\n ),\n \"critic\": networks_lib.FeedForwardNetwork(\n lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply\n ),\n \"sample\": _sample_fn,\n }\n" ]
[ [ "numpy.prod" ] ]
csruiliu/tensorflow-cifar10
[ "6180d425f319542f0af568acd71cc6503d080764" ]
[ "models/zfnet.py" ]
[ "import tensorflow as tf\n\n\nclass ZFNet:\n def __init__(self, num_classes=10):\n self.output_classes = num_classes\n\n @staticmethod\n def fc_layer(layer_input, output_unit):\n layer = tf.keras.layers.Flatten()(layer_input)\n layer = tf.keras.layers.Dense(units=output_unit)(layer)\n\n return layer\n\n def build(self, model_input):\n with tf.variable_scope('conv_1'):\n x = tf.keras.layers.Conv2D(filters=96, kernel_size=7, strides=2, padding='same')(model_input)\n x = tf.keras.activations.relu(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x)\n\n with tf.variable_scope('conv_2'):\n x = tf.keras.layers.Conv2D(filters=256, kernel_size=5, strides=2, padding='same')(x)\n x = tf.keras.activations.relu(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x)\n\n with tf.variable_scope('conv_3'):\n x = tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, padding='same')(x)\n x = tf.keras.activations.relu(x)\n\n # with tf.variable_scope('conv_4'):\n # x = tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, padding='same')(x)\n # x = tf.keras.activations.relu(x)\n\n with tf.variable_scope('conv_5'):\n x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(x)\n x = tf.keras.activations.relu(x)\n # x = tf.keras.layers.MaxPool2D(pool_size=3)(x)\n\n with tf.variable_scope('fc1'):\n x = self.fc_layer(x, output_unit=4096)\n x = tf.keras.activations.relu(x)\n x = tf.keras.layers.Dropout(rate=0.5)(x)\n\n with tf.variable_scope('fc2'):\n x = self.fc_layer(x, output_unit=4096)\n x = tf.keras.activations.relu(x)\n x = tf.keras.layers.Dropout(rate=0.5)(x)\n\n model = self.fc_layer(x, output_unit=self.output_classes)\n\n return model\n" ]
[ [ "tensorflow.keras.activations.relu", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Dropout", "tensorflow.variable_scope" ] ]
ameilij/MDS
[ "3cb426b5de0e1f6bb8ad2229ee8ab9ecb750bc05" ]
[ "AMV/AMV-UNI5-CP1-MNIST.py" ]
[ "# LA CIENCIA DE DATOS\n# TECNICAS DE VISUALIZACION, MINERIA Y ANALISIS\n# UNIDAD 5 MODELOS\n# Caso Practico 1\n\n# Import relevant libraries\nimport pyarrow.parquet as pq\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom numpy.core._multiarray_umath import ndarray\n\n# FUNCTION DEFINITION AREA\n\ndef transform_clean_format(image_set_instance):\n \"\"\"Transform an image to a CLEAN_FORMAT, which is an image of 28x28 zeros or ones.\n This simplifies a lot calculating actual space used by a pic regardless or value.\n\n :parameter image_set_instance\n\n :return image_clean_format\n \"\"\"\n temp = image_set_instance.to_list()\n temp2 = [1 if valores > 0 else 0 for valores in temp]\n image_clean_format: ndarray = np.reshape(temp2, (28, 28))\n return image_clean_format\n\ndef calculate_horizontal_density(some_image_matrix):\n \"\"\"Calculate the average pixel density for the 28 rows in an image matrix\n :parameter\n some_image_matrix\n\n :return\n row_density_counter\n \"\"\"\n row_density_counter = []\n for rows in some_image_matrix:\n temp = sum(rows)/len(rows)\n row_density_counter.append(temp)\n return np.mean(row_density_counter)\n\ndef get_class_average_horizontal_density(image_set, class_set, target):\n \"\"\"Get a target class, evaluate all elements, and return average\n horizontal density of row for that class\n\n :parameter image_set\n :parameter class_set\n :parameter target\n\n :return class_average_horizontal_density_list MEAN\n :return class_average_horizontal_density_list SD\n :return counter_evaluation\n \"\"\"\n class_average_horizontal_density_list = []\n counter_evaluation = 0\n\n for i in range(1, len(image_set)):\n if (class_set[i] == target):\n reduced_image_set = transform_clean_format(image_set.iloc[i])\n temp = calculate_horizontal_density(reduced_image_set)\n class_average_horizontal_density_list.append(temp)\n counter_evaluation = counter_evaluation + 1\n\n return(np.mean(class_average_horizontal_density_list), np.std(class_average_horizontal_density_list), counter_evaluation)\n\ndef classifier(image):\n \"\"\"Simple classification model that analyzes if horizontal density of an image\n belongs to that trained for zeroes or ones. Function assumes that information in\n image is either one or zero, not any other number.\n\n :parameter image\n\n :return prediction\n \"\"\"\n\n # Model parameters for ZERO classification\n zero_mean = 0.24460545843717754\n zero_sd = 0.0429571657861942\n zero_ub = zero_mean + (3 * zero_sd)\n zero_lb = zero_mean - (3 * zero_sd)\n\n # Model parameters for ONE classification\n one_mean = 0.1095654683099595\n one_sd = 0.025619148318372687\n one_ub = one_mean + (3 * one_sd)\n one_lb = one_mean - (3 * one_sd)\n\n temp = calculate_horizontal_density(image)\n prediction = 9\n if(temp >= zero_lb and temp < zero_ub): prediction = 0\n if(temp >= one_lb and temp < one_ub): prediction = 1\n return prediction\n\ndef make_contingency_table(y, y_hat):\n tp, fn, fp, tn = 0,0,0,0\n for i in range(1, len(y)):\n if(y[i] == 1 and y_hat[i] == 1): tp = tp + 1\n if(y[i] == 0 and y_hat[i] == 1): fp = fp + 1\n if(y[i] == 1 and y_hat[i] == 0): fn = fn + 1\n if(y[i] == 0 and y_hat[i] == 0): tn = tn + 1\n return(tp, fn, fp, tn)\n\n# Enter main section\ndef main():\n # Open MNIST dataset with pixel files and class files.\n # The data is stored as parquet files\n image_set = pq.read_table('mnist_pixels.parquet')\n image_set = image_set.to_pandas()\n\n image_class = pq.read_table('mnist_clases.parquet')\n image_class = image_class.to_pandas()\n image_class = pd.to_numeric(image_class['class'])\n\n # Evaluate average horizontal density for images of 0 and 1\n print(\"Begin evaluation of images with class 1.\")\n results_1 = get_class_average_horizontal_density(image_set, image_class, 1)\n print(\"Class Average Horizontal Density: \", results_1[0])\n print(\"Class SD Horizontal Density: \", results_1[1])\n print(\"Class Occurrences: \", results_1[2], \"\\n\")\n\n print(\"Begin evaluation of images with class 0.\")\n results_2 = get_class_average_horizontal_density(image_set, image_class, 0)\n print(\"Class Average Horizontal Density: \", results_2[0])\n print(\"Class SD Horizontal Density: \", results_2[1])\n print(\"Class Occurrences: \", results_2[2], \"\\n\")\n\n # Create a test of all cases with 1 and 0\n pred_value = []\n real_value = []\n for i in range(1, len(image_class)):\n if (image_class.iloc[i] == 1 or image_class.iloc[i] == 0):\n real_value.append(image_class[i])\n temp = transform_clean_format(image_set.iloc[i])\n temp = classifier(temp)\n pred_value.append(temp)\n\n bar = make_contingency_table(real_value, pred_value)\n dat = {'Positive' : [bar[0], bar[2]],\n 'Negative' : [bar[1], bar[3]]}\n df = pd.DataFrame(dat, columns = ['Positive','Negative'], index=['Positive','Negative'])\n print(\"Contingency Table \\n\")\n print (df)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.reshape", "pandas.DataFrame", "numpy.mean", "numpy.std", "pandas.to_numeric" ] ]
dandyhug/kookey
[ "58ffb67e6d984e3067689277631be072beb8deb9" ]
[ "kookey/synonym_util/model.py" ]
[ "#-*-coding:utf-8-*-\r\n\r\nfrom gensim.models import fasttext\r\nfrom abc import abstractmethod\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom kookey.synonym_util import transform_data\r\nfrom kookey.tokenizer import Tokenizer\r\n\r\n\r\nclass Model:\r\n\r\n def __init__(self):\r\n self.model = None\r\n self.candidate = []\r\n\r\n\r\n @abstractmethod\r\n def train(self):\r\n pass\r\n\r\n\r\n @abstractmethod\r\n def extract(self, basic_words, compared_words):\r\n pass\r\n\r\n\r\n @abstractmethod\r\n def save_model(self, path=\"model.bin\"):\r\n pass\r\n\r\n\r\n def save_synonym_candidate(self, path=\"synonym_candidate.tsv\"):\r\n self.check_no_candidiate()\r\n\r\n with open(path, \"w\", encoding=\"utf-8\") as f:\r\n f.write(\"word1\\tword2\\tsimilarity\\n\")\r\n for word1, word2, similarity in self.candidate:\r\n f.write(word1 + \"\\t\" + word2 + \"\\t\" + str(round(similarity, 4)) + \"\\n\")\r\n \r\n \r\n @abstractmethod\r\n def check_no_data(self):\r\n pass\r\n\r\n\r\n def check_no_model(self):\r\n if not self.model:\r\n raise RuntimeError(\"There are no model to save. You should call train(...) first.\")\r\n\r\n\r\n def check_no_candidiate(self):\r\n if not self.candidate:\r\n raise RuntimeError(\"There are no candidate to save. You should call extract(...) first.\")\r\n\r\n\r\nclass Embedding(Model):\r\n\r\n def __init__(self, seed=42, tokenizer=\"kkma\", jamo=True, skip_gram=1, learning_rate=0.05, \r\n dim_size=100, window=5, min_count=5, min_n=3, max_n=6, iter=5, threshold=0.85):\r\n super(Embedding, self).__init__()\r\n\r\n self.tokenizer = Tokenizer(tokenizer).tokenizer\r\n self.jamo = jamo\r\n self.sentences = []\r\n\r\n self.seed = seed\r\n self.sg = skip_gram\r\n self.alpha = learning_rate\r\n self.size = dim_size\r\n self.window = window\r\n self.min_count = min_count\r\n self.min_n = min_n\r\n self.max_n = max_n\r\n self.iter = iter\r\n\r\n self.threshold = threshold\r\n self.eval_detail = {}\r\n self.candidate = []\r\n\r\n \r\n def build_training_data(self, sentences):\r\n self.sentences = transform_data.trans_sents_for_embedding(\r\n sentences, self.tokenizer, self.jamo)\r\n\r\n\r\n def save_training_data(self, path=\"train_sentences.txt\"):\r\n self.check_no_data()\r\n\r\n with open(path, \"w\", encoding=\"utf-8\") as f:\r\n for sent in self.sentences:\r\n f.write(sent + \"\\n\")\r\n\r\n def train(self):\r\n self.check_no_data()\r\n\r\n pre_trained_model = fasttext.FastText(seed=self.seed, sg=self.sg, \r\n alpha=self.alpha, size=self.size, window=self.window, \r\n min_count=self.min_count, min_n=self.min_n, max_n=self.max_n, iter=self.iter)\r\n pre_trained_model.build_vocab(sentences=self.sentences)\r\n pre_trained_model.train(sentences=self.sentences, \r\n total_examples=pre_trained_model.corpus_count, epochs=5)\r\n\r\n self.model = pre_trained_model\r\n\r\n\r\n def check_no_data(self):\r\n if not self.sentences:\r\n raise RuntimeError(\"There are no training data. You should call build_traing_data(...) first.\")\r\n\r\n\r\n def save_model(self, path=\"\"):\r\n self.check_no_model() \r\n fasttext.save_facebook_model(self.model, path)\r\n\r\n\r\n def eval(self, test_data):\r\n self.check_no_model()\r\n\r\n word1, word2 = [], []\r\n for w1, w2, _ in test_data:\r\n word1.append(w1)\r\n word2.append(w2)\r\n\r\n vec1, oov1 = self.get_vector(word1)\r\n vec2, oov2 = self.get_vector(word2)\r\n oov = oov1 + oov2\r\n\r\n cos_sim = self.cosine_similarity(vec1, vec2.T) \r\n\r\n right_count = 0\r\n for i in range(len(test_data)):\r\n if i not in oov:\r\n if cos_sim[i][i] >= self.threshold and test_data[i][2] == 1:\r\n right_count += 1\r\n elif cos_sim[i][i] < self.threshold and test_data[i][2] == 0:\r\n right_count += 1\r\n self.eval_detail[test_data[i]] = cos_sim[i][i]\r\n else:\r\n self.eval_detail[test_data[i]] = \"oov\"\r\n\r\n return right_count / (len(test_data) - len(oov)) * 100\r\n\r\n\r\n def extract(self, basic_words, compared_words):\r\n candidate = []\r\n\r\n vec2, oov2 = self.get_vector(compared_words)\r\n\r\n for word1 in basic_words:\r\n\r\n vec1, oov1 = self.get_vector([word1])\r\n\r\n cos_sim = self.cosine_similarity(vec1, vec2.T)[0]\r\n\r\n for i in range(len(cos_sim)):\r\n if not oov1 and i not in oov2:\r\n if cos_sim[i] >= self.threshold:\r\n candidate.append((word1, compared_words[i], cos_sim[i]))\r\n\r\n self.candidate = candidate\r\n\r\n\r\n def get_vector(self, words):\r\n vec = []\r\n oov = []\r\n\r\n for i in range(len(words)):\r\n try:\r\n if self.jamo:\r\n vec.append(self.model.wv[transform_data.sent_to_jamo(words[i])])\r\n else:\r\n vec.append(self.model.wv[words[i]])\r\n except KeyError:\r\n oov.append(i)\r\n vec.append(np.zeros(self.size))\r\n raise Warning(\"OOV : \" + words[i])\r\n\r\n vec = np.array(vec)\r\n\r\n return vec, oov\r\n\r\n\r\n def cosine_similarity(self, a, b):\r\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))\r\n\r\n\r\nclass Tuning(Model):\r\n\r\n def __init__(self, embedding_model, jamo=True,\r\n mode=\"pooling_and_cos\", seed=42, epochs=10, threshold=0.85):\r\n super(Tuning, self).__init__()\r\n\r\n np.random.seed(seed)\r\n tf.random.set_seed(seed)\r\n\r\n self.jamo = jamo\r\n\r\n self.embedding_model = embedding_model\r\n self.get_embedding(embedding_model)\r\n\r\n self.train_data = []\r\n self.train_label = []\r\n self.test_data = []\r\n self.test_label = []\r\n self.max_length = -1\r\n\r\n self.mode = mode\r\n self.seed = seed\r\n self.epochs = epochs\r\n\r\n self.threshold = threshold\r\n self.eval_detail = {}\r\n self.candidate = []\r\n\r\n\r\n def get_embedding(self, embedding_model):\r\n if embedding_model:\r\n embedding_vocab = list(embedding_model.wv.vocab.keys())\r\n embedding_word_index, embedding_index_word = {}, {}\r\n\r\n for i, word in enumerate(embedding_vocab):\r\n embedding_word_index[word], embedding_index_word[i] = i, word \r\n last = len(embedding_word_index)\r\n embedding_word_index[\"<UNK>\"], embedding_index_word[last] = last, \"<UNK>\"\r\n embedding_word_index[\"<PAD>\"], embedding_index_word[last+1] = last + 1, \"<PAD>\"\r\n\r\n embedding = np.random.random((len(embedding_word_index) + 1,\r\n embedding_model.vector_size))\r\n\r\n for i, word in enumerate(embedding_word_index.keys()):\r\n embedding[i] = embedding_model.wv[word]\r\n\r\n self.embedding_vocab = embedding_vocab\r\n self.embedding_word_index = embedding_word_index\r\n self.embedding_index_word = embedding_index_word\r\n self.embedding_weigth = embedding\r\n\r\n \r\n def build_training_data(self, train_data, train_label, test_data, test_label):\r\n self.train_data, self.test_data, self.max_length = transform_data.trans_words_for_tuning(\r\n train_data, test_data, self.embedding_word_index, self.jamo)\r\n self.train_label = train_label\r\n self.test_label = test_label\r\n\r\n\r\n def train(self):\r\n self.check_no_data()\r\n\r\n if self.mode == \"pooling_and_cos\":\r\n self.train_with_pooling_and_cos()\r\n elif self.mode == \"bi_lstm\":\r\n self.train_with_bi_lstm()\r\n else:\r\n raise RuntimeError(\"training mode must be one of pooling_and_cos, bi_lstm\")\r\n\r\n\r\n def check_no_data(self):\r\n if not self.train_data or not self.train_label or not self.test_data or not self.test_label:\r\n raise RuntimeError(\"There are no training data. You should call build_traing_data(...) first.\")\r\n\r\n if (not (len(self.train_data[0]) == len(self.train_data[1]) == len(self.train_label)) \r\n or not (len(self.test_data[0]) == len(self.test_data[1]) == len(self.test_label))):\r\n raise RuntimeError(\"Word pair data and label must have same length.\")\r\n\r\n\r\n def train_with_pooling_and_cos(self):\r\n input1 = keras.layers.Input(shape=(None,))\r\n input2 = keras.layers.Input(shape=(None,))\r\n\r\n embedding_layers = keras.layers.Embedding(\r\n self.embedding_weigth.shape[0], self.embedding_weigth.shape[1],\r\n weights=[self.embedding_weigth], trainable=True)\r\n embedding_output1 = embedding_layers(input1)\r\n embedding_output2 = embedding_layers(input2)\r\n\r\n pooling_layer = keras.layers.GlobalAveragePooling1D()\r\n pooling_output1 = pooling_layer(embedding_output1)\r\n pooling_output2 = pooling_layer(embedding_output2)\r\n\r\n cosine_output = keras.layers.Dot(axes=1, normalize=True)(\r\n [pooling_output1, pooling_output2])\r\n\r\n output = keras.layers.Dense(1, activation='sigmoid')(cosine_output)\r\n\r\n tuning_model = keras.Model(inputs=[input1, input2], outputs=output)\r\n tuning_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n tuning_model.fit(x=[np.array(self.train_data[0]), np.array(self.train_data[1])], y=np.array(self.train_label),\r\n validation_data=([np.array(self.test_data[0]), np.array(self.test_data[1])], np.array(self.test_label)),\r\n epochs=self.epochs)\r\n\r\n self.model = tuning_model\r\n\r\n\r\n def train_with_bi_lstm(self):\r\n input1 = keras.layers.Input(shape=(None,))\r\n input2 = keras.layers.Input(shape=(None,))\r\n\r\n embedding_layers = keras.layers.Embedding(\r\n self.embedding_weigth.shape[0], self.embedding_weigth.shape[1],\r\n weights=[self.embedding_weigth], trainable=True)\r\n embedding_output1 = embedding_layers(input1)\r\n embedding_output2 = embedding_layers(input2)\r\n\r\n concat_layer = keras.layers.concatenate([embedding_output1, embedding_output2])\r\n\r\n bi_lstm_layer = keras.layers.Bidirectional(keras.layers.LSTM(\r\n 100, dropout=0.3))(concat_layer)\r\n\r\n output = keras.layers.Dense(1, activation='sigmoid')(bi_lstm_layer)\r\n\r\n tuning_model = keras.Model(inputs=[input1, input2], outputs=output)\r\n tuning_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\r\n tuning_model.fit(x=[np.array(self.train_data[0]), np.array(self.train_data[1])], y=np.array(self.train_label),\r\n validation_data=([np.array(self.test_data[0]), np.array(self.test_data[1])], np.array(self.test_label)),\r\n epochs=self.epochs)\r\n\r\n self.model = tuning_model\r\n\r\n\r\n def save_model(self, path=\"model\"):\r\n self.check_no_model()\r\n self.model.save(path)\r\n\r\n\r\n def eval(self, test_data):\r\n self.check_no_model()\r\n\r\n word1, word2 = [], []\r\n for w1, w2, _ in test_data:\r\n word1.append(w1)\r\n word2.append(w2)\r\n\r\n vec1 = self.get_vector(word1)\r\n vec2 = self.get_vector(word2)\r\n\r\n result = self.model.predict([np.array(vec1), np.array(vec2)])\r\n \r\n right_count = 0\r\n for i in range(len(test_data)):\r\n if result[i][0] >= self.threshold:\r\n right_count += 1\r\n elif result[i][0] < self.threshold:\r\n right_count += 1\r\n self.eval_detail[test_data[i]] = result[i][0]\r\n\r\n return right_count / len(test_data) * 100\r\n\r\n\r\n def extract(self, basic_words, compared_words):\r\n candidate = []\r\n\r\n vec2 = self.get_vector(compared_words)\r\n\r\n for word1 in basic_words:\r\n vec1 = self.get_vector(word1)\r\n\r\n result = self.model.predict([np.array(vec1), np.array(vec2)])\r\n\r\n for i in range(len(result)):\r\n if result[i][0] >= self.threshold:\r\n candidate.append((word1, compared_words[i], result[i][0]))\r\n\r\n self.candidate = candidate\r\n\r\n\r\n def get_vector(self, words):\r\n vec = []\r\n\r\n for word in words:\r\n encoded_word = transform_data.get_encoded_word(\r\n word, self.embedding_word_index, self.jamo)\r\n vec.append(transform_data.get_padded_word(\r\n encoded_word, self.embedding_word_index, self.max_length))\r\n\r\n return vec\r\n\r\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.linalg.norm", "tensorflow.keras.layers.GlobalAveragePooling1D", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.LSTM", "numpy.random.seed", "tensorflow.random.set_seed", "numpy.zeros", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.Dot", "tensorflow.keras.layers.concatenate" ] ]
MattePalte/thinking-like-a-developer
[ "548f061992cd1f45dd9878d76559836940608769" ]
[ "utils/visualizer.py" ]
[ "\"\"\"\nIt compares human and machine attention weights.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\nfrom tqdm import tqdm\nimport abc\nfrom abc import ABCMeta\nimport pandas as pd\nimport numpy as np\nimport random\nfrom copy import deepcopy\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.image as mpimg\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport matplotlib.patches as patches\nfrom matplotlib.pyplot import imshow\n\nfrom copy import copy\nimport numpy as np\n\nimport yaml\nimport pymongo\n\nfrom pprint import pprint\nfrom datetime import datetime\nimport argparse\n\n\nlogging.basicConfig() # required\nlogger = logging.getLogger('attention-comparer')\nlogger.setLevel(logging.INFO)\n\n\nclass VisualToken(object):\n\n def __init__(self, index_id, text, x, y, width, height, clicked):\n self.text = text\n if (index_id == \"\"):\n index_id = -1\n self.index_id = int(index_id)\n self.x = int(x)\n self.y = int(y)\n self.width = int(width)\n self.height = int(height)\n self.attention = 0\n self.clicked = clicked\n\n def draw_PIL(self, drw, global_attention=0,\n guessed_right=False,\n human=True,\n almost_correct=False):\n \"\"\"Draw the patch on the plot.\"\"\"\n alpha = 0.1\n if global_attention != 0:\n alpha = int((float(self.attention) / global_attention) * 255)\n if self.attention == 0:\n alpha = 0\n if human:\n # human\n if (almost_correct):\n color = (255, 127, 80, alpha) # orange)\n else:\n if (guessed_right):\n color = (26, 255, 26, alpha) # green\n else:\n color = (255, 102, 102, alpha) # red\n else:\n # Machine\n color = (0, 191, 255, alpha) # blue\n border = None\n if self.clicked:\n border = 'red'\n rect = \\\n drw.rectangle([\n self.x,\n self.y,\n self.x + self.width,\n self.y + self.height],\n outline=border,\n width=2,\n fill=color)\n\n def add_attention(self, attention):\n self.attention = attention\n\n def __repr__(self):\n return 'x:' + str(self.x).zfill(3) \\\n + ' - y:' + str(self.y).zfill(3) \\\n + ' - width:' + str(self.width).zfill(4) \\\n + ' - height:' + str(self.height).zfill(4) \\\n + ' - |' + self.text + '|'\n\n\nclass Visualizer(object):\n\n def __init__(self, df_human):\n self.df_human = deepcopy(df_human)\n\n self.df_human['is_warmup'] = \\\n (self.df_human['id'].astype('int') < 3).astype('int')\n\n self.df_human = self.df_human[self.df_human['is_warmup'] == 0]\n\n self.df_human.sort_values(by='time', ascending=True, inplace=True)\n self.df_human.drop_duplicates(\n subset=['randomcode', 'uuid'], keep='first', inplace=True\n )\n\n def plot_token_heatmap(self,\n survey_code_col,\n correct_col, almost_correct_col,\n user_selection__col,\n formatted_col, attention_col,\n tokens_col, clicked_col,\n id_col,\n sortby,\n only_users=None,\n limit=None):\n \"\"\"Plot Human and Machine heatmaps on token side by side.\"\"\"\n df = deepcopy(self.df_human)\n df.sort_values(by=sortby, inplace=True)\n if only_users is not None:\n df = df[df[survey_code_col].isin(only_users)]\n\n counter = 0\n for row in df.iterrows():\n counter += 1\n if limit is not None and counter > limit:\n break\n idx = row[0]\n record = row[1]\n correct_answered = \\\n record[correct_col] == record[user_selection__col]\n\n almost_correct = \\\n record[user_selection__col].lower() in [\n x.lower().replace('_', '')\n for x in record[almost_correct_col]]\n\n idx = record[id_col]\n user_code = record[survey_code_col]\n print('*' * 50)\n print(f\"Ground Truth: {record[correct_col]} - Provenance: {record['nickname']} - {user_code}\")\n print(f'Similar options: {record[almost_correct_col]}')\n\n fig, ax = self.process_single(\n tokens=record[tokens_col],\n human=True,\n attention=record[attention_col],\n formattedcode=record[formatted_col],\n correct_answered=correct_answered,\n almost_correct=almost_correct,\n final_clicked_tokens=record[clicked_col])\n\n ax.set_title(\n f'Ground Truth: {record[correct_col]} '\n + f'- User Selection: {record[user_selection__col]}')\n\n plt.show()\n\n def process_single(self, tokens, attention, human,\n formattedcode,\n correct_answered, almost_correct,\n final_clicked_tokens=None):\n \"\"\"Display attention of the given function.\"\"\"\n # PREPARE IMAGE\n path_font_file = '../public/FreeMono.ttf'\n surce_code_content = formattedcode\n #img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'\n\n ratio = (8.4/14)\n char_height = 20\n char_width = char_height * ratio\n\n # compute max width required\n lines = surce_code_content.splitlines()\n lines_len = [len(line) for line in lines]\n max_width = int(max(lines_len) * char_width)\n max_height = int(char_height * len(lines))\n\n img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))\n fnt = ImageFont.truetype(path_font_file, char_height)\n drw = ImageDraw.Draw(img, 'RGBA')\n drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))\n # CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)\n\n # check clicked tokens to draw squares around them\n if final_clicked_tokens is not None:\n clicked_tokens = np.array(final_clicked_tokens)\n clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()\n else:\n clicked_tokens_indices = []\n\n # INSTANTIATE TOKENS\n # get the positon form the metadata of tokens\n viz_tokens = []\n # DEBUG print(tokens)\n # DEBUG print(formattedcode)\n for i, t in enumerate(tokens):\n # print(t)\n new_token = \\\n VisualToken(\n index_id=t['index_id'],\n text=t['text'],\n x=char_width * int(t['start_char']),\n y=char_height * int(t['line']),\n width=char_width * len(t['text']),\n height=char_height,\n clicked=(i in clicked_tokens_indices))\n viz_tokens.append(new_token)\n\n # COMPUTE ATTENTION\n global_attention = 1\n # compute attention\n for att, viz_token in zip(attention, viz_tokens):\n viz_token.add_attention(att)\n\n # COMPUTE REFERENCE ATTENTION TO RESCALE\n # sum all the attention received by the tokens\n global_attention = 0\n attentions = []\n for viz_token in viz_tokens:\n attentions.append(viz_token.attention)\n global_attention = max(attentions) * 1.33\n\n # check user was right to decide the color of the tokens (red vs green)\n # correct_answered decides the color\n for viz_token in viz_tokens:\n # print(token)\n viz_token.draw_PIL(drw, global_attention, correct_answered, human, almost_correct)\n\n #img.save(img_name)\n #return img_name\n imshow(np.asarray(img))\n fig = plt.gcf()\n fig.set_size_inches(18.5, 10.5)\n if human:\n plt.title('Human')\n else:\n plt.title('Machine')\n ax = plt.gca()\n return fig, ax\n\n\ndef plot_statistics(df, column_name, ax=None, color='blue'):\n df = deepcopy(df)\n mean = df[column_name].mean()\n median = df[column_name].median()\n\n if ax is None:\n f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {\"height_ratios\": (0.2, 1)})\n sns.boxplot(x=df[column_name], ax=ax_box, color=color)\n ax_box.axvline(mean, color='r', linestyle='--')\n ax_box.axvline(median, color='g', linestyle='-')\n ax_box.set(xlabel='')\n ax_box.yaxis.label.set_size(14)\n ax_box.xaxis.label.set_size(14)\n else:\n ax_hist = ax\n\n sns.histplot(x=df[column_name], ax=ax_hist, color=color)\n ax_hist.axvline(mean, color='r', linestyle='--')\n ax_hist.axvline(median, color='g', linestyle='-')\n ax_hist.legend({f'Mean {mean:.2f}':mean, f'Median {median:.2f}':median})\n ax_hist.yaxis.label.set_size(14)\n ax_hist.xaxis.label.set_size(14)\n\n if ax is None:\n plt.show()\n\n\ndef inspect(df, column_name, comparer,\n machine_col='att_vector_avg',\n human_col='att_vector_w_click',\n n_records_per_side=5,\n center=False,\n center_position=0.5,\n columns_to_observe=None):\n df = df.sort_values(by=column_name, ascending=True)\n df = df.drop_duplicates(subset='uuid')\n\n if center:\n center_position = int(len(df) * center_position)\n uuid_center = \\\n df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['uuid']\n randomcode_center = \\\n df.iloc[center_position - n_records_per_side:center_position + n_records_per_side]['randomcode']\n for uuid, randomcode in zip(uuid_center, randomcode_center):\n comparer.plot_token_heatmap_side_by_side(\n machine_col=machine_col,\n human_col=human_col,\n only_uuids=[uuid],\n only_users=[randomcode],\n columns_to_observe=columns_to_observe\n )\n else:\n # head\n print(f'Low value of {column_name}')\n uuid_head = df.head(n_records_per_side)['uuid']\n randomcode_head = df.head(n_records_per_side)['randomcode']\n for uuid, randomcode in zip(uuid_head, randomcode_head):\n comparer.plot_token_heatmap_side_by_side(\n machine_col=machine_col,\n human_col=human_col,\n only_uuids=[uuid],\n only_users=[randomcode],\n columns_to_observe=columns_to_observe\n )\n #print(uuid_head)\n # tail\n print(f'High value of {column_name}')\n uuid_tail = df.tail(n_records_per_side)['uuid']\n randomcode_tail = df.tail(n_records_per_side)['randomcode']\n #print(uuid_tail)\n for uuid, randomcode in zip(uuid_tail, randomcode_tail):\n comparer.plot_token_heatmap_side_by_side(\n machine_col=machine_col,\n human_col=human_col,\n only_uuids=[uuid],\n only_users=[randomcode],\n columns_to_observe=columns_to_observe\n )\n\n\n# ----------------------------------------------------------------\n\nfrom matplotlib import colors\n\n\nclass FlexibleVisualToken(object):\n\n def __init__(self, index_id, text, x, y, width, height, clicked):\n self.text = text\n if (index_id == \"\"):\n index_id = -1\n self.index_id = int(index_id)\n self.x = int(x)\n self.y = int(y)\n self.width = int(width)\n self.height = int(height)\n self.attention = 0\n self.clicked = clicked\n\n def draw_PIL(self, drw, global_attention=0,\n named_color='lime'):\n \"\"\"Draw the patch on the plot.\"\"\"\n alpha = 0.1\n if global_attention != 0:\n alpha = int((float(self.attention) / global_attention) * 255)\n if self.attention == 0:\n alpha = 0\n color_rgb = list(colors.to_rgb(named_color))\n color_rgb = [int(c * 255) for c in color_rgb]\n color_rgba = color_rgb + [alpha]\n color_rgba = tuple(color_rgba)\n border = None\n if self.clicked:\n border = 'red'\n rect = \\\n drw.rectangle([\n self.x,\n self.y,\n self.x + self.width,\n self.y + self.height],\n outline=border,\n width=2,\n fill=color_rgba)\n\n def add_attention(self, attention):\n self.attention = attention\n\n def __repr__(self):\n return 'x:' + str(self.x).zfill(3) \\\n + ' - y:' + str(self.y).zfill(3) \\\n + ' - width:' + str(self.width).zfill(4) \\\n + ' - height:' + str(self.height).zfill(4) \\\n + ' - |' + self.text + '|'\n\n\ndef plot_maps(df,\n weight_cols=[],\n label_cols=None,\n colors_for_cols=None,\n predictor_entity_name='Entity Predictor',\n prediction_col=None,\n max_records=None,\n output_in_folder=None,\n add_participant_id=True,\n add_square_for_clicks=False,\n limit_visualization_to=3):\n \"\"\"Print the attention weights on the method body.\"\"\"\n assert len(weight_cols) > 0\n assert len(df) > 0\n\n counter_visualized_maps = 0\n\n for i, row in enumerate(df.iterrows()):\n #print(i)\n if max_records is not None and i > max_records:\n break\n content = row[1]\n for j, attention_type in enumerate(weight_cols):\n named_color = colors_for_cols[j] \\\n if colors_for_cols is not None else 'red'\n final_clicked_tokens = content['finalclickedtokens'] \\\n if add_square_for_clicks else []\n fig, ax = plot_single_map(\n tokens_in_code=content['tokens_in_code'],\n attention_weights=content[attention_type],\n formattedcode=content['formattedcode'],\n final_clicked_tokens=final_clicked_tokens,\n named_color=named_color\n )\n if output_in_folder is not None:\n attention_name = label_cols[j] \\\n if label_cols is not None else attention_type\n filename = f'{i}-{predictor_entity_name}-{attention_name}-mtd:{content[\"uuid\"]}'\n if add_participant_id:\n filename += f'-ptc:{content[\"randomcode\"]}'\n filename = \"\".join([c for c in filename if c != ' ']) + '.png'\n filepath = os.path.join(output_in_folder, filename)\n print(filepath)\n prediction = content[prediction_col] \\\n if prediction_col is not None else 'undefined'\n if isinstance(prediction, list):\n prediction = [p for p in prediction if p != '%END%']\n prediction = [p for p in prediction if p != '%UNK%']\n\n title = f'{predictor_entity_name}: {prediction} - Original: {content[\"function_name\"]}'\n title += f' (what you see: {attention_name} weights)'\n plt.title(title)\n fig.savefig(filepath, format='png')\n if counter_visualized_maps < limit_visualization_to:\n plt.show()\n counter_visualized_maps += 1\n\n\ndef plot_single_map(tokens_in_code,\n attention_weights,\n named_color,\n formattedcode,\n final_clicked_tokens=None):\n \"\"\"Display attention of the given function.\"\"\"\n # PREPARE IMAGE\n path_font_file = '../public/FreeMono.ttf'\n surce_code_content = formattedcode\n #img_name = folder + data['id'] + data['rawdictionarykey'][1:] + '.png'\n\n ratio = (8.4/14)\n char_height = 20\n char_width = char_height * ratio\n\n # compute max width required\n lines = surce_code_content.splitlines()\n lines_len = [len(line) for line in lines]\n max_width = int(max(lines_len) * char_width)\n max_height = int(char_height * len(lines))\n\n img = Image.new('RGB', (max_width, max_height), color=(255, 255, 255))\n fnt = ImageFont.truetype(path_font_file, char_height)\n drw = ImageDraw.Draw(img, 'RGBA')\n drw.text((0, 0), surce_code_content, font=fnt, fill=(0, 0, 0))\n # CAN BE DELAYED AT AFTER TOKEN DRAWING img.save(img_name)\n\n # check clicked tokens to draw squares around them\n if final_clicked_tokens is not None:\n clicked_tokens = np.array(final_clicked_tokens)\n clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist()\n else:\n clicked_tokens_indices = []\n\n # INSTANTIATE TOKENS\n # get the positon form the metadata of tokens\n viz_tokens = []\n # DEBUG print(tokens)\n # DEBUG print(formattedcode)\n for i, t in enumerate(tokens_in_code):\n # print(t)\n new_token = \\\n FlexibleVisualToken(\n index_id=t['index_id'],\n text=t['text'],\n x=char_width * int(t['start_char']),\n y=char_height * int(t['line']),\n width=char_width * len(t['text']),\n height=char_height,\n clicked=(i in clicked_tokens_indices))\n viz_tokens.append(new_token)\n\n # COMPUTE ATTENTION\n global_attention = 1\n # compute attention\n for att, viz_token in zip(attention_weights, viz_tokens):\n viz_token.add_attention(att)\n\n # COMPUTE REFERENCE ATTENTION TO RESCALE\n # sum all the attention received by the tokens\n global_attention = 0\n attentions = []\n for viz_token in viz_tokens:\n attentions.append(viz_token.attention)\n global_attention = max(attentions) * 1.33\n\n # check user was right to decide the color of the tokens (red vs green)\n # correct_answered decides the color\n for viz_token in viz_tokens:\n # print(token)\n viz_token.draw_PIL(drw, global_attention, named_color)\n\n #img.save(img_name)\n #return img_name\n imshow(np.asarray(img))\n fig = plt.gcf()\n #print(f'max_width: {max_width}')\n #print(f'max_width: {max_height}')\n FACTOR = 60\n fig.set_size_inches(max_width / FACTOR, max_height / FACTOR)\n\n plt.title('undefined')\n\n ax = plt.gca()\n return fig, ax\n" ]
[ [ "numpy.array", "numpy.asarray", "matplotlib.colors.to_rgb", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "numpy.where", "matplotlib.pyplot.gcf", "matplotlib.pyplot.show", "matplotlib.pyplot.gca" ] ]
kbines/rl-baselines-zoo
[ "1cf77866344e15d5f66cd55318199f9ea7f72fd7" ]
[ "utils/record_video.py" ]
[ "import os\nimport argparse\n\nimport gym\nimport numpy as np\nfrom stable_baselines.common.vec_env import VecVideoRecorder, VecFrameStack, VecNormalize\n\nfrom .utils import ALGOS, create_test_env, get_saved_hyperparams, get_latest_run_id, find_saved_model\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1')\n parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')\n parser.add_argument('-o', '--output-folder', help='Output folder', type=str, default='logs/videos/')\n parser.add_argument('--algo', help='RL Algorithm', default='ppo2',\n type=str, required=False, choices=list(ALGOS.keys()))\n parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,\n type=int)\n parser.add_argument('--n-envs', help='number of environments', default=1,\n type=int)\n parser.add_argument('--deterministic', action='store_true', default=False,\n help='Use deterministic actions')\n parser.add_argument('--seed', help='Random generator seed', type=int, default=0)\n parser.add_argument('--no-render', action='store_true', default=False,\n help='Do not render the environment (useful for tests)')\n parser.add_argument('--exp-id', help='Experiment ID (default: -1, no exp folder, 0: latest)', default=-1,\n type=int)\n args = parser.parse_args()\n\n env_id = args.env\n algo = args.algo\n folder = args.folder\n video_folder = args.output_folder\n seed = args.seed\n deterministic = args.deterministic\n video_length = args.n_timesteps\n n_envs = args.n_envs\n\n if args.exp_id == 0:\n args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_id)\n print('Loading latest experiment, id={}'.format(args.exp_id))\n # Sanity checks\n if args.exp_id > 0:\n log_path = os.path.join(folder, algo, '{}_{}'.format(env_id, args.exp_id))\n else:\n log_path = os.path.join(folder, algo)\n\n model_path = find_saved_model(algo, log_path, env_id)\n\n stats_path = os.path.join(log_path, env_id)\n hyperparams, stats_path = get_saved_hyperparams(stats_path)\n\n\n is_atari = 'NoFrameskip' in env_id\n\n env = create_test_env(env_id, n_envs=n_envs, is_atari=is_atari,\n stats_path=stats_path, seed=seed, log_dir=None,\n should_render=not args.no_render, hyperparams=hyperparams)\n\n model = ALGOS[algo].load(model_path)\n\n obs = env.reset()\n\n # Note: apparently it renders by default\n env = VecVideoRecorder(env, video_folder,\n record_video_trigger=lambda x: x == 0, video_length=video_length,\n name_prefix=\"{}-{}\".format(algo, env_id))\n\n env.reset()\n for _ in range(video_length + 1):\n # action = [env.action_space.sample()]\n action, _ = model.predict(obs, deterministic=deterministic)\n if isinstance(env.action_space, gym.spaces.Box):\n action = np.clip(action, env.action_space.low, env.action_space.high)\n obs, _, _, _ = env.step(action)\n\n # Workaround for https://github.com/openai/gym/issues/893\n if n_envs == 1 and 'Bullet' not in env_id and not is_atari:\n env = env.venv\n # DummyVecEnv\n while isinstance(env, VecNormalize) or isinstance(env, VecFrameStack):\n env = env.venv\n env.envs[0].env.close()\n else:\n # SubprocVecEnv\n env.close()\n" ]
[ [ "numpy.clip" ] ]
HindyDS/ParticleSwarmOptimization
[ "4a19514c01f7082645781408c24d4450530cf6dd" ]
[ "ParticleSwarmOptimization/ParticleSwarmOptimization/Particle.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[ ]:\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nclass Particle:\r\n def __init__(self, estimator, X, y, cv, scoring, search_space):\r\n self.estimator = estimator\r\n self.X = X\r\n self.y = y\r\n self.cv = cv\r\n self.scoring = scoring\r\n self.search_space = search_space\r\n \r\n self.pos_log = []\r\n self.score_log = []\r\n self.gbest = None\r\n self.velo_log = []\r\n self.stop = None\r\n \r\n rand_init = [np.random.choice(self.search_space[k]) for k in self.search_space.keys()]\r\n self.position = {k:[v] for k, v in zip(self.search_space.keys(), rand_init)}\r\n self.position_vector = [v[0] for v in self.position.values()]\r\n \r\n def update_position(self, position):\r\n self.position = position\r\n \r\n def evaluate(self):\r\n if self.stop == None or self.stop == False:\r\n gs = GridSearchCV(self.estimator, self.position, cv=self.cv, scoring=self.scoring)\r\n gs.fit(self.X, self.y)\r\n self.score = gs.best_score_\r\n self.score_log.append(self.score)\r\n self.pos_log.append(self.position)\r\n \r\n def update_global_best(self, gbest):\r\n if gbest != self.gbest or self.gbest == None:\r\n self.gbest = gbest\r\n \r\n def update_velocity(self, max_step):\r\n types = [type(i[0]) for i in list(self.gbest.values())]\r\n velo = [(g[0] - c[0])/max_step for g, c in zip(self.gbest.values(), self.position.values())] \r\n self.velo = [t(v) for v, t in zip(velo, types)]\r\n self.velo_log.append(self.velo)\r\n self.types = types\r\n \r\n def move(self, step, acceleration=1):\r\n if sum(self.velo) == 0:\r\n self.stop = True\r\n \r\n if sum(self.velo) > 0:\r\n self.stop = False\r\n \r\n if self.stop == False or self.stop == None: \r\n self.position = {k:[v] for k, v in zip(self.position, [c[0] + v * step/acceleration for c, v in zip(self.position.values(), self.velo)])}\r\n self.position = {k:v if v[0] >= 0 else [0] for k, v in self.position.items()}\r\n self.position_vals = [t(v) for v, t in zip(self.position.values(), self.types)]\r\n self.position = {k:v for k, v in zip(self.position.keys(), self.position_vals)}\r\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.random.choice" ] ]
ashesh-0/MultiZoomGaze
[ "24494a1346d09e21e4b6d999a742b5d31bbbeff0" ]
[ "code/core/metadata_parser.py" ]
[ "from typing import Dict\n\nimport numpy as np\nfrom scipy.io import loadmat\n\n\ndef rescale_eye(eye: np.array, head_bbox: np.array) -> np.array:\n \"\"\"\n In metadata.mat, the person_eye_left_box or person_eye_left_box contains eye x,y,w,h data for both eyes.\n These coordinates are in normalized form with respect to original full scale image.\n This function converts them to normalized form with respect to head crop as defined by head_bbox. head_bbox is also\n in normalized coordinates with respect to original full scale image.\n\n \"\"\"\n return rescale_bbox(eye, head_bbox)\n\n\ndef rescale_bbox(bbox, enclosing_bbox):\n invalid_entries = np.all(bbox.astype(int) == -1, axis=1)\n bbox = [\n (bbox[:, 0] - enclosing_bbox[:, 0]) * (1 / enclosing_bbox[:, 2]),\n (bbox[:, 1] - enclosing_bbox[:, 1]) * (1 / enclosing_bbox[:, 3]),\n bbox[:, 2] / enclosing_bbox[:, 2],\n bbox[:, 3] / enclosing_bbox[:, 3],\n ]\n bbox = np.vstack(bbox).T\n bbox[invalid_entries, :] = -1\n print('Extra invalid entries', np.any(bbox[~invalid_entries, :] < 0, axis=1).sum())\n bbox[np.any(bbox < 0, axis=1), :] = -1\n return bbox\n\n\ndef get_eye_bbox_dict(\n mdata_path: str = '/tmp2/ashesh/gaze360_data/metadata.mat') -> Dict[int, Dict[int, Dict[int, dict]]]:\n \"\"\"\n Returns a nested dict of all integer keys session=> person => frame\n To get left eye bounding box, one needs to do :\n session = 0\n person = 12\n frame = 10\n data_dict[session][person][frame]['left']\n \"\"\"\n mdata = loadmat(mdata_path)\n leye = mdata['person_eye_left_bbox']\n reye = mdata['person_eye_right_bbox']\n\n head_bbox = mdata['person_head_bbox']\n leye = rescale_eye(leye, head_bbox)\n reye = rescale_eye(reye, head_bbox)\n\n sessions = mdata['recording']\n persons = mdata['person_identity']\n frames = mdata['frame']\n index_data = np.concatenate([sessions, persons, frames], axis=0).T\n bbox_data = np.concatenate([leye, reye], axis=1)\n\n data_dict = {}\n session_list = np.unique(mdata['recording'][0])\n for session in session_list:\n data_dict[session] = {}\n s_f = index_data[:, 0] == session\n s_index_data = index_data[s_f]\n s_bbox_data = bbox_data[s_f]\n persons = np.unique(s_index_data[:, 1])\n for person in persons:\n data_dict[session][person] = {}\n p_f = s_index_data[:, 1] == person\n p_index_data = s_index_data[p_f]\n p_bbox_data = s_bbox_data[p_f]\n for f_idx_data, f_bbox_data in zip(p_index_data, p_bbox_data):\n data_dict[session][person][f_idx_data[2]] = {'left': f_bbox_data[:4], 'right': f_bbox_data[4:]}\n return data_dict\n" ]
[ [ "numpy.concatenate", "scipy.io.loadmat", "numpy.any", "numpy.unique", "numpy.vstack" ] ]