repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
qmlcode/qml-interface
[ "328d3bc522291c896fc6fdfd4f64f005b99bed3f", "328d3bc522291c896fc6fdfd4f64f005b99bed3f" ]
[ "test/test_representations.py", "qml/ml/kernels/kernels.py" ]
[ "# MIT License\n#\n# Copyright (c) 2017 Anders Steen Christensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import print_function\n\nimport itertools\nimport contextlib\nfrom collections import defaultdict\nimport numpy as np\nimport os\n\nimport qml\nimport qml.ml.representations\nimport qml.ml.representations.data\n\nfrom qml.ml.representations import *\nfrom qml.ml.representations.data import NUCLEAR_CHARGE\n\ndef get_asize(mols, pad):\n\n asize = defaultdict()\n\n for mol in mols:\n for key, value in mol.natypes.items():\n try:\n asize[key] = max(asize[key], value + pad)\n except KeyError:\n asize[key] = value + pad\n return asize\n\ndef test_representations():\n files = [\"qm7/0101.xyz\",\n \"qm7/0102.xyz\",\n \"qm7/0103.xyz\",\n \"qm7/0104.xyz\",\n \"qm7/0105.xyz\",\n \"qm7/0106.xyz\",\n \"qm7/0107.xyz\",\n \"qm7/0108.xyz\",\n \"qm7/0109.xyz\",\n \"qm7/0110.xyz\"]\n\n path = test_dir = os.path.dirname(os.path.realpath(__file__))\n\n mols = []\n for xyz_file in files:\n mol = qml.data.Compound(xyz=path + \"/\" + xyz_file)\n mols.append(mol)\n\n size = max(mol.nuclear_charges.size for mol in mols) + 1\n\n asize = get_asize(mols,1)\n\n coulomb_matrix(mols, size, path)\n atomic_coulomb_matrix(mols, size, path)\n eigenvalue_coulomb_matrix(mols, size, path)\n bob(mols, size, asize, path)\n\ndef coulomb_matrix(mols, size, path):\n\n # Generate coulomb matrix representation, sorted by row-norm\n for i, mol in enumerate(mols): \n mol.generate_coulomb_matrix(size = size, sorting = \"row-norm\")\n\n X_test = np.asarray([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/coulomb_matrix_representation_row-norm_sorted.txt\")\n assert np.allclose(X_test, X_ref), \"Error in coulomb matrix representation\"\n\n # Generate coulomb matrix representation, unsorted, using the Compound class\n for i, mol in enumerate(mols): \n mol.generate_coulomb_matrix(size = size, sorting = \"unsorted\")\n\n X_test = np.asarray([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/coulomb_matrix_representation_unsorted.txt\")\n assert np.allclose(X_test, X_ref), \"Error in coulomb matrix representation\"\n\ndef atomic_coulomb_matrix(mols, size, path):\n\n # Generate coulomb matrix representation, sorted by distance\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"distance\")\n\n X_test = np.concatenate([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/atomic_coulomb_matrix_representation_distance_sorted.txt\")\n assert np.allclose(X_test, X_ref), \"Error in atomic coulomb matrix representation\"\n # Compare to old implementation (before 'indices' keyword)\n X_ref = np.loadtxt(path + \"/data/atomic_coulomb_matrix_representation_distance_sorted_no_indices.txt\")\n assert np.allclose(X_test, X_ref), \"Error in atomic coulomb matrix representation\"\n\n\n # Generate coulomb matrix representation, sorted by row-norm\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"row-norm\")\n\n X_test = np.concatenate([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/atomic_coulomb_matrix_representation_row-norm_sorted.txt\")\n assert np.allclose(X_test, X_ref), \"Error in atomic coulomb matrix representation\"\n\n # Generate coulomb matrix representation, sorted by distance, with soft cutoffs\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"distance\",\n central_cutoff = 4.0, central_decay = 0.5,\n interaction_cutoff = 5.0, interaction_decay = 1.0)\n\n X_test = np.concatenate([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/atomic_coulomb_matrix_representation_distance_sorted_with_cutoff.txt\")\n assert np.allclose(X_test, X_ref), \"Error in atomic coulomb matrix representation\"\n\n # Generate coulomb matrix representation, sorted by row-norm, with soft cutoffs\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"row-norm\",\n central_cutoff = 4.0, central_decay = 0.5,\n interaction_cutoff = 5.0, interaction_decay = 1.0)\n\n X_test = np.concatenate([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/atomic_coulomb_matrix_representation_row-norm_sorted_with_cutoff.txt\")\n assert np.allclose(X_test, X_ref), \"Error in atomic coulomb matrix representation\"\n\n # Generate only two atoms in the coulomb matrix representation, sorted by distance\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"distance\")\n representation_subset = mol.representation[1:3]\n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"distance\", indices = [1,2])\n for i in range(2):\n for j in range(153):\n diff = representation_subset[i,j] - mol.representation[i,j]\n if abs(diff) > 1e-9:\n print (i,j,diff, representation_subset[i,j],mol.representation[i,j])\n assert np.allclose(representation_subset, mol.representation), \\\n \"Error in atomic coulomb matrix representation\"\n\n # Generate only two atoms in the coulomb matrix representation, sorted by row-norm\n for i, mol in enumerate(mols): \n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"row-norm\")\n representation_subset = mol.representation[1:3]\n mol.generate_atomic_coulomb_matrix(size = size, sorting = \"row-norm\", indices = [1,2])\n for i in range(2):\n for j in range(153):\n diff = representation_subset[i,j] - mol.representation[i,j]\n if abs(diff) > 1e-9:\n print (i,j,diff, representation_subset[i,j],mol.representation[i,j])\n assert np.allclose(representation_subset, mol.representation), \\\n \"Error in atomic coulomb matrix representation\"\n\ndef eigenvalue_coulomb_matrix(mols, size, path):\n\n # Generate coulomb matrix representation, sorted by row-norm\n for i, mol in enumerate(mols): \n mol.generate_eigenvalue_coulomb_matrix(size = size)\n\n X_test = np.asarray([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/eigenvalue_coulomb_matrix_representation.txt\")\n assert np.allclose(X_test, X_ref), \"Error in eigenvalue coulomb matrix representation\"\n\ndef bob(mols, size, asize, path):\n\n for i, mol in enumerate(mols): \n mol.generate_bob(size=size, asize=asize)\n\n X_test = np.asarray([mol.representation for mol in mols])\n X_ref = np.loadtxt(path + \"/data/bob_representation.txt\")\n assert np.allclose(X_test, X_ref), \"Error in bag of bonds representation\"\n\ndef print_mol(mol):\n n = len(mol.representation.shape)\n if n == 1:\n for item in mol.representation:\n print(\"{:.9e}\".format(item), end=' ')\n print()\n elif n == 2:\n for atom in mol.representation:\n for item in atom:\n print(\"{:.9e}\".format(item), end=' ')\n print()\n\nif __name__ == \"__main__\":\n test_representations()\n\n", "# MIT License\n#\n# Copyright (c) 2016 Anders Steen Christensen, Felix A. Faber, Lars A. Bratholm\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\nfrom .fkernels import fgaussian_kernel\nfrom .fkernels import flaplacian_kernel\nfrom .fkernels import flinear_kernel\nfrom .fkernels import fsargan_kernel\nfrom .fkernels import fmatern_kernel_l2\n\nfrom .fkernels import fget_local_kernels_gaussian\nfrom .fkernels import fget_local_kernels_laplacian\n\ndef laplacian_kernel(A, B, sigma):\n \"\"\" Calculates the Laplacian kernel matrix K, where :math:`K_{ij}`:\n\n :math:`K_{ij} = \\\\exp \\\\big( -\\\\frac{\\\\|A_i - B_j\\\\|_1}{\\sigma} \\\\big)`\n\n Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of representations - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of representations - shape (M, representation size).\n :type B: numpy array\n :param sigma: The value of sigma in the kernel matrix.\n :type sigma: float\n\n :return: The Laplacian kernel matrix - shape (N, M)\n :rtype: numpy array\n \"\"\"\n\n na = A.shape[0]\n nb = B.shape[0]\n\n K = np.empty((na, nb), order='F')\n\n # Note: Transposed for Fortran\n flaplacian_kernel(A.T, na, B.T, nb, K, sigma)\n\n return K\n\ndef gaussian_kernel(A, B, sigma):\n \"\"\" Calculates the Gaussian kernel matrix K, where :math:`K_{ij}`:\n\n :math:`K_{ij} = \\\\exp \\\\big( -\\\\frac{\\\\|A_i - B_j\\\\|_2^2}{2\\sigma^2} \\\\big)`\n\n Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of representations - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of representations - shape (M, representation size).\n :type B: numpy array\n :param sigma: The value of sigma in the kernel matrix.\n :type sigma: float\n\n :return: The Gaussian kernel matrix - shape (N, M)\n :rtype: numpy array\n \"\"\"\n\n na = A.shape[0]\n nb = B.shape[0]\n\n K = np.empty((na, nb), order='F')\n\n # Note: Transposed for Fortran\n fgaussian_kernel(A.T, na, B.T, nb, K, sigma)\n\n return K\n\ndef linear_kernel(A, B):\n \"\"\" Calculates the linear kernel matrix K, where :math:`K_{ij}`:\n\n :math:`K_{ij} = A_i \\cdot B_j`\n\n VWhere :math:`A_{i}` and :math:`B_{j}` are representation vectors. \n\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of representations - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of representations - shape (M, representation size).\n :type B: numpy array\n\n :return: The Gaussian kernel matrix - shape (N, M)\n :rtype: numpy array\n \"\"\"\n\n na = A.shape[0]\n nb = B.shape[0]\n\n K = np.empty((na, nb), order='F')\n\n # Note: Transposed for Fortran\n flinear_kernel(A.T, na, B.T, nb, K)\n\n return K\n\ndef sargan_kernel(A, B, sigma, gammas):\n \"\"\" Calculates the Sargan kernel matrix K, where :math:`K_{ij}`:\n\n :math:`K_{ij} = \\\\exp \\\\big( -\\\\frac{\\\\| A_i - B_j \\\\|_1)}{\\sigma} \\\\big) \\\\big(1 + \\\\sum_{k} \\\\frac{\\gamma_{k} \\\\| A_i - B_j \\\\|_1^k}{\\sigma^k} \\\\big)`\n\n Where :math:`A_{i}` and :math:`B_{j}` are representation vectors.\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of representations - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of representations - shape (M, representation size).\n :type B: numpy array\n :param sigma: The value of sigma in the kernel matrix.\n :type sigma: float\n :param gammas: 1D array of parameters in the kernel matrix.\n :type gammas: numpy array\n\n :return: The Sargan kernel matrix - shape (N, M).\n :rtype: numpy array\n \"\"\"\n\n ng = len(gammas)\n\n if ng == 0:\n return laplacian_kernel(A, B, sigma)\n\n na = A.shape[0]\n nb = B.shape[0]\n\n K = np.empty((na, nb), order='F')\n\n # Note: Transposed for Fortran\n fsargan_kernel(A.T, na, B.T, nb, K, sigma, gammas, ng)\n\n return K\n\ndef matern_kernel(A, B, sigma, order = 0, metric = \"l1\"):\n \"\"\" Calculates the Matern kernel matrix K, where :math:`K_{ij}`:\n\n for order = 0:\n :math:`K_{ij} = \\\\exp\\\\big( -\\\\frac{d}{\\sigma} \\\\big)`\n for order = 1:\n :math:`K_{ij} = \\\\exp\\\\big( -\\\\frac{\\\\sqrt{3} d}{\\sigma} \\\\big) \\\\big(1 + \\\\frac{\\\\sqrt{3} d}{\\sigma} \\\\big)`\n for order = 2:\n :math:`K_{ij} = \\\\exp\\\\big( -\\\\frac{\\\\sqrt{5} d}{d} \\\\big) \\\\big( 1 + \\\\frac{\\\\sqrt{5} d}{\\sigma} + \\\\frac{5 d^2}{3\\sigma^2} \\\\big)`\n\n Where :math:`A_i` and :math:`B_j` are representation vectors, and d is a distance measure.\n\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of representations - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of representations - shape (M, representation size).\n :type B: numpy array\n :param sigma: The value of sigma in the kernel matrix.\n :type sigma: float\n :param order: The order of the polynomial (0, 1, 2)\n :type order: integer\n :param metric: The distance metric ('l1', 'l2')\n :type metric: string\n\n :return: The Matern kernel matrix - shape (N, M)\n :rtype: numpy array\n \"\"\"\n\n if metric == \"l1\":\n if order == 0:\n gammas = []\n elif order == 1:\n gammas = [1]\n sigma /= np.sqrt(3)\n elif order == 2:\n gammas = [1,1/3.0]\n sigma /= np.sqrt(5)\n else:\n print(\"Order:%d not implemented in Matern Kernel\" % order)\n raise SystemExit\n\n return sargan_kernel(A, B, sigma, gammas)\n\n elif metric == \"l2\":\n pass\n else:\n print(\"Error: Unknown distance metric %s in Matern kernel\" % str(metric))\n raise SystemExit\n\n na = A.shape[0]\n nb = B.shape[0]\n\n K = np.empty((na, nb), order='F')\n\n # Note: Transposed for Fortran\n fmatern_kernel_l2(A.T, na, B.T, nb, K, sigma, order)\n\n return K\n\ndef get_local_kernels_gaussian(A, B, na, nb, sigmas):\n \"\"\" Calculates the Gaussian kernel matrix K, for a local representation where :math:`K_{ij}`:\n\n :math:`K_{ij} = \\sum_{a \\in i} \\sum_{b \\in j} \\\\exp \\\\big( -\\\\frac{\\\\|A_a - B_b\\\\|_2^2}{2\\sigma^2} \\\\big)`\n\n Where :math:`A_{a}` and :math:`B_{b}` are representation vectors.\n\n Note that the input array is one big 2D array with all atoms concatenated along the same axis.\n Further more a series of kernels is produced (since calculating the distance matrix is expensive\n but getting the resulting kernels elements for several sigmas is not.)\n\n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of descriptors - shape (total atoms A, representation size).\n :type A: numpy array\n :param B: 2D array of descriptors - shape (total atoms B, representation size).\n :type B: numpy array\n :param na: 1D array containing numbers of atoms in each compound.\n :type na: numpy array\n :param nb: 1D array containing numbers of atoms in each compound.\n :type nb: numpy array\n :param sigma: The value of sigma in the kernel matrix.\n :type sigma: float\n\n :return: The Gaussian kernel matrix - shape (nsigmas, N, M)\n :rtype: numpy array\n \"\"\"\n\n assert np.sum(na) == A.shape[0], \"Error in A input\"\n assert np.sum(nb) == B.shape[0], \"Error in B input\"\n\n assert A.shape[1] == B.shape[1], \"Error in representation sizes\"\n\n nma = len(na)\n nmb = len(nb)\n \n sigmas = np.asarray(sigmas)\n nsigmas = len(sigmas)\n\n return fget_local_kernels_gaussian(A.T, B.T, na, nb, sigmas, nma, nmb, nsigmas)\n\ndef get_local_kernels_laplacian(A, B, na, nb, sigmas):\n \"\"\" Calculates the Local Laplacian kernel matrix K, for a local representation where :math:`K_{ij}`:\n\n :math:`K_{ij} = \\sum_{a \\in i} \\sum_{b \\in j} \\\\exp \\\\big( -\\\\frac{\\\\|A_a - B_b\\\\|_1}{\\sigma} \\\\big)`\n\n Where :math:`A_{a}` and :math:`B_{b}` are representation vectors.\n\n Note that the input array is one big 2D array with all atoms concatenated along the same axis.\n Further more a series of kernels is produced (since calculating the distance matrix is expensive\n but getting the resulting kernels elements for several sigmas is not.)\n \n K is calculated using an OpenMP parallel Fortran routine.\n\n :param A: 2D array of descriptors - shape (N, representation size).\n :type A: numpy array\n :param B: 2D array of descriptors - shape (M, representation size).\n :type B: numpy array\n :param na: 1D array containing numbers of atoms in each compound.\n :type na: numpy array\n :param nb: 1D array containing numbers of atoms in each compound.\n :type nb: numpy array\n :param sigmas: List of the sigmas.\n :type sigmas: list\n\n :return: The Laplacian kernel matrix - shape (nsigmas, N, M)\n :rtype: numpy array\n \"\"\"\n\n assert np.sum(na) == A.shape[0], \"Error in A input\"\n assert np.sum(nb) == B.shape[0], \"Error in B input\"\n\n assert A.shape[1] == B.shape[1], \"Error in representation sizes\"\n\n nma = len(na)\n nmb = len(nb)\n \n sigmas = np.asarray(sigmas)\n nsigmas = len(sigmas)\n\n return fget_local_kernels_laplacian(A.T, B.T, na, nb, sigmas, nma, nmb, nsigmas)\n" ]
[ [ "numpy.asarray", "numpy.concatenate", "numpy.allclose", "numpy.loadtxt" ], [ "numpy.asarray", "numpy.sqrt", "numpy.sum", "numpy.empty" ] ]
dmft-wien2k/dmft-wien2k-v2
[ "83481be27e8a9ff14b9635d6cc1cd9d96f053487" ]
[ "src/putils/MMoment/transformations.py" ]
[ "# -*- coding: utf-8 -*-\n# transformations.py\n\n# Copyright (c) 2006-2012, Christoph Gohlke\n# Copyright (c) 2006-2012, The Regents of the University of California\n# Produced at the Laboratory for Fluorescence Dynamics\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holders nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Homogeneous Transformation Matrices and Quaternions.\n\nA library for calculating 4x4 matrices for translating, rotating, reflecting,\nscaling, shearing, projecting, orthogonalizing, and superimposing arrays of\n3D homogeneous coordinates as well as for converting between rotation matrices,\nEuler angles, and quaternions. Also includes an Arcball control object and\nfunctions to decompose transformation matrices.\n\n:Authors:\n `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,\n Laboratory for Fluorescence Dynamics, University of California, Irvine\n\n:Version: 2012.01.01\n\nRequirements\n------------\n\n* `Python 2.7 or 3.2 <http://www.python.org>`__\n* `Numpy 1.6 <http://numpy.scipy.org>`__\n* `transformations.c 2012.01.01 <http://www.lfd.uci.edu/~gohlke/>`__\n (optional implementation of some functions in C)\n\nNotes\n-----\n\nThe API is not stable yet and is expected to change between revisions.\n\nThis Python code is not optimized for speed. Refer to the transformations.c\nmodule for a faster implementation of some functions.\n\nDocumentation in HTML format can be generated with epydoc.\n\nMatrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using\nnumpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using\nnumpy.dot(M, v) for shape (4, \\*) column vectors, respectively\nnumpy.dot(v, M.T) for shape (\\*, 4) row vectors (\"array of points\").\n\nThis module follows the \"column vectors on the right\" and \"row major storage\"\n(C contiguous) conventions. The translation components are in the right column\nof the transformation matrix, i.e. M[:3, 3].\nThe transpose of the transformation matrices may have to be used to interface\nwith other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].\n\nCalculations are carried out with numpy.float64 precision.\n\nVector, point, quaternion, and matrix function arguments are expected to be\n\"array like\", i.e. tuple, list, or numpy arrays.\n\nReturn types are numpy arrays unless specified otherwise.\n\nAngles are in radians unless specified otherwise.\n\nQuaternions w+ix+jy+kz are represented as [w, x, y, z].\n\nA triple of Euler angles can be applied/interpreted in 24 ways, which can\nbe specified using a 4 character string or encoded 4-tuple:\n\n *Axes 4-string*: e.g. 'sxyz' or 'ryxy'\n\n - first character : rotations are applied to 's'tatic or 'r'otating frame\n - remaining characters : successive rotation axis 'x', 'y', or 'z'\n\n *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)\n\n - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.\n - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed\n by 'z', or 'z' is followed by 'x'. Otherwise odd (1).\n - repetition : first and last axis are same (1) or different (0).\n - frame : rotations are applied to static (0) or rotating (1) frame.\n\nReferences\n----------\n\n(1) Matrices and transformations. Ronald Goldman.\n In \"Graphics Gems I\", pp 472-475. Morgan Kaufmann, 1990.\n(2) More matrices and transformations: shear and pseudo-perspective.\n Ronald Goldman. In \"Graphics Gems II\", pp 320-323. Morgan Kaufmann, 1991.\n(3) Decomposing a matrix into simple transformations. Spencer Thomas.\n In \"Graphics Gems II\", pp 320-323. Morgan Kaufmann, 1991.\n(4) Recovering the data from the transformation matrix. Ronald Goldman.\n In \"Graphics Gems II\", pp 324-331. Morgan Kaufmann, 1991.\n(5) Euler angle conversion. Ken Shoemake.\n In \"Graphics Gems IV\", pp 222-229. Morgan Kaufmann, 1994.\n(6) Arcball rotation control. Ken Shoemake.\n In \"Graphics Gems IV\", pp 175-192. Morgan Kaufmann, 1994.\n(7) Representing attitude: Euler angles, unit quaternions, and rotation\n vectors. James Diebel. 2006.\n(8) A discussion of the solution for the best rotation to relate two sets\n of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.\n(9) Closed-form solution of absolute orientation using unit quaternions.\n BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.\n(10) Quaternions. Ken Shoemake.\n http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf\n(11) From quaternion to matrix and back. JMP van Waveren. 2005.\n http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm\n(12) Uniform random rotations. Ken Shoemake.\n In \"Graphics Gems III\", pp 124-132. Morgan Kaufmann, 1992.\n(13) Quaternion in molecular modeling. CFF Karney.\n J Mol Graph Mod, 25(5):595-604\n(14) New method for extracting the quaternion from a rotation matrix.\n Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.\n(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.\n Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.\n(16) Column Vectors vs. Row Vectors.\n http://steve.hollasch.net/cgindex/math/matrix/column-vec.html\n\nExamples\n--------\n\n>>> alpha, beta, gamma = 0.123, -1.234, 2.345\n>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]\n>>> I = identity_matrix()\n>>> Rx = rotation_matrix(alpha, xaxis)\n>>> Ry = rotation_matrix(beta, yaxis)\n>>> Rz = rotation_matrix(gamma, zaxis)\n>>> R = concatenate_matrices(Rx, Ry, Rz)\n>>> euler = euler_from_matrix(R, 'rxyz')\n>>> numpy.allclose([alpha, beta, gamma], euler)\nTrue\n>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')\n>>> is_same_transform(R, Re)\nTrue\n>>> al, be, ga = euler_from_matrix(Re, 'rxyz')\n>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))\nTrue\n>>> qx = quaternion_about_axis(alpha, xaxis)\n>>> qy = quaternion_about_axis(beta, yaxis)\n>>> qz = quaternion_about_axis(gamma, zaxis)\n>>> q = quaternion_multiply(qx, qy)\n>>> q = quaternion_multiply(q, qz)\n>>> Rq = quaternion_matrix(q)\n>>> is_same_transform(R, Rq)\nTrue\n>>> S = scale_matrix(1.23, origin)\n>>> T = translation_matrix([1, 2, 3])\n>>> Z = shear_matrix(beta, xaxis, origin, zaxis)\n>>> R = random_rotation_matrix(numpy.random.rand(3))\n>>> M = concatenate_matrices(T, R, Z, S)\n>>> scale, shear, angles, trans, persp = decompose_matrix(M)\n>>> numpy.allclose(scale, 1.23)\nTrue\n>>> numpy.allclose(trans, [1, 2, 3])\nTrue\n>>> numpy.allclose(shear, [0, math.tan(beta), 0])\nTrue\n>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))\nTrue\n>>> M1 = compose_matrix(scale, shear, angles, trans, persp)\n>>> is_same_transform(M, M1)\nTrue\n>>> v0, v1 = random_vector(3), random_vector(3)\n>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))\n>>> v2 = numpy.dot(v0, M[:3,:3].T)\n>>> numpy.allclose(unit_vector(v1), unit_vector(v2))\nTrue\n\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport sys\nimport os\nimport warnings\nimport math\n\nimport numpy\n\n\ndef identity_matrix():\n \"\"\"Return 4x4 identity/unit matrix.\n\n >>> I = identity_matrix()\n >>> numpy.allclose(I, numpy.dot(I, I))\n True\n >>> numpy.sum(I), numpy.trace(I)\n (4.0, 4.0)\n >>> numpy.allclose(I, numpy.identity(4))\n True\n\n \"\"\"\n return numpy.identity(4)\n\n\ndef translation_matrix(direction):\n \"\"\"Return matrix to translate by direction vector.\n\n >>> v = numpy.random.random(3) - 0.5\n >>> numpy.allclose(v, translation_matrix(v)[:3, 3])\n True\n\n \"\"\"\n M = numpy.identity(4)\n M[:3, 3] = direction[:3]\n return M\n\n\ndef translation_from_matrix(matrix):\n \"\"\"Return translation vector from translation matrix.\n\n >>> v0 = numpy.random.random(3) - 0.5\n >>> v1 = translation_from_matrix(translation_matrix(v0))\n >>> numpy.allclose(v0, v1)\n True\n\n \"\"\"\n return numpy.array(matrix, copy=False)[:3, 3].copy()\n\n\ndef reflection_matrix(point, normal):\n \"\"\"Return matrix to mirror at plane defined by point and normal vector.\n\n >>> v0 = numpy.random.random(4) - 0.5\n >>> v0[3] = 1.\n >>> v1 = numpy.random.random(3) - 0.5\n >>> R = reflection_matrix(v0, v1)\n >>> numpy.allclose(2, numpy.trace(R))\n True\n >>> numpy.allclose(v0, numpy.dot(R, v0))\n True\n >>> v2 = v0.copy()\n >>> v2[:3] += v1\n >>> v3 = v0.copy()\n >>> v2[:3] -= v1\n >>> numpy.allclose(v2, numpy.dot(R, v3))\n True\n\n \"\"\"\n normal = unit_vector(normal[:3])\n M = numpy.identity(4)\n M[:3, :3] -= 2.0 * numpy.outer(normal, normal)\n M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal\n return M\n\n\ndef reflection_from_matrix(matrix):\n \"\"\"Return mirror plane point and normal vector from reflection matrix.\n\n >>> v0 = numpy.random.random(3) - 0.5\n >>> v1 = numpy.random.random(3) - 0.5\n >>> M0 = reflection_matrix(v0, v1)\n >>> point, normal = reflection_from_matrix(M0)\n >>> M1 = reflection_matrix(point, normal)\n >>> is_same_transform(M0, M1)\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)\n # normal: unit eigenvector corresponding to eigenvalue -1\n w, V = numpy.linalg.eig(M[:3, :3])\n i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue -1\")\n normal = numpy.real(V[:, i[0]]).squeeze()\n # point: any unit eigenvector corresponding to eigenvalue 1\n w, V = numpy.linalg.eig(M)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue 1\")\n point = numpy.real(V[:, i[-1]]).squeeze()\n point /= point[3]\n return point, normal\n\n\ndef rotation_matrix(angle, direction, point=None):\n \"\"\"Return matrix to rotate about axis defined by point and direction.\n\n >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])\n >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])\n True\n >>> angle = (random.random() - 0.5) * (2*math.pi)\n >>> direc = numpy.random.random(3) - 0.5\n >>> point = numpy.random.random(3) - 0.5\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> R1 = rotation_matrix(angle-2*math.pi, direc, point)\n >>> is_same_transform(R0, R1)\n True\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> R1 = rotation_matrix(-angle, -direc, point)\n >>> is_same_transform(R0, R1)\n True\n >>> I = numpy.identity(4, numpy.float64)\n >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))\n True\n >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,\n ... direc, point)))\n True\n\n \"\"\"\n sina = math.sin(angle)\n cosa = math.cos(angle)\n direction = unit_vector(direction[:3])\n # rotation matrix around unit vector\n R = numpy.diag([cosa, cosa, cosa])\n R += numpy.outer(direction, direction) * (1.0 - cosa)\n direction *= sina\n R += numpy.array([[ 0.0, -direction[2], direction[1]],\n [ direction[2], 0.0, -direction[0]],\n [-direction[1], direction[0], 0.0]])\n M = numpy.identity(4)\n M[:3, :3] = R\n if point is not None:\n # rotation not around origin\n point = numpy.array(point[:3], dtype=numpy.float64, copy=False)\n M[:3, 3] = point - numpy.dot(R, point)\n return M\n\n\ndef rotation_from_matrix(matrix):\n \"\"\"Return rotation angle and axis from rotation matrix.\n\n >>> angle = (random.random() - 0.5) * (2*math.pi)\n >>> direc = numpy.random.random(3) - 0.5\n >>> point = numpy.random.random(3) - 0.5\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> angle, direc, point = rotation_from_matrix(R0)\n >>> R1 = rotation_matrix(angle, direc, point)\n >>> is_same_transform(R0, R1)\n True\n\n \"\"\"\n R = numpy.array(matrix, dtype=numpy.float64, copy=False)\n R33 = R[:3, :3]\n # direction: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, W = numpy.linalg.eig(R33.T)\n# i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-7)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue 1\")\n direction = numpy.real(W[:, i[-1]]).squeeze()\n # point: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, Q = numpy.linalg.eig(R)\n# i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-7)[0]\n if not len(i):\n raise ValueError(\"no unit eigenvector corresponding to eigenvalue 1\")\n point = numpy.real(Q[:, i[-1]]).squeeze()\n point /= point[3]\n # rotation angle depending on direction\n cosa = (numpy.trace(R33) - 1.0) / 2.0\n if abs(direction[2]) > 1e-8:\n sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]\n elif abs(direction[1]) > 1e-8:\n sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]\n else:\n sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]\n angle = math.atan2(sina, cosa)\n return angle, direction, point\n\n\ndef scale_matrix(factor, origin=None, direction=None):\n \"\"\"Return matrix to scale by factor around origin in direction.\n\n Use factor -1 for point symmetry.\n\n >>> v = (numpy.random.rand(4, 5) - 0.5) * 20\n >>> v[3] = 1\n >>> S = scale_matrix(-1.234)\n >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])\n True\n >>> factor = random.random() * 10 - 5\n >>> origin = numpy.random.random(3) - 0.5\n >>> direct = numpy.random.random(3) - 0.5\n >>> S = scale_matrix(factor, origin)\n >>> S = scale_matrix(factor, origin, direct)\n\n \"\"\"\n if direction is None:\n # uniform scaling\n M = numpy.diag([factor, factor, factor, 1.0])\n if origin is not None:\n M[:3, 3] = origin[:3]\n M[:3, 3] *= 1.0 - factor\n else:\n # nonuniform scaling\n direction = unit_vector(direction[:3])\n factor = 1.0 - factor\n M = numpy.identity(4)\n M[:3, :3] -= factor * numpy.outer(direction, direction)\n if origin is not None:\n M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction\n return M\n\n\ndef scale_from_matrix(matrix):\n \"\"\"Return scaling factor, origin and direction from scaling matrix.\n\n >>> factor = random.random() * 10 - 5\n >>> origin = numpy.random.random(3) - 0.5\n >>> direct = numpy.random.random(3) - 0.5\n >>> S0 = scale_matrix(factor, origin)\n >>> factor, origin, direction = scale_from_matrix(S0)\n >>> S1 = scale_matrix(factor, origin, direction)\n >>> is_same_transform(S0, S1)\n True\n >>> S0 = scale_matrix(factor, origin, direct)\n >>> factor, origin, direction = scale_from_matrix(S0)\n >>> S1 = scale_matrix(factor, origin, direction)\n >>> is_same_transform(S0, S1)\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)\n M33 = M[:3, :3]\n factor = numpy.trace(M33) - 2.0\n try:\n # direction: unit eigenvector corresponding to eigenvalue factor\n w, V = numpy.linalg.eig(M33)\n i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]\n direction = numpy.real(V[:, i]).squeeze()\n direction /= vector_norm(direction)\n except IndexError:\n # uniform scaling\n factor = (factor + 2.0) / 3.0\n direction = None\n # origin: any eigenvector corresponding to eigenvalue 1\n w, V = numpy.linalg.eig(M)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no eigenvector corresponding to eigenvalue 1\")\n origin = numpy.real(V[:, i[-1]]).squeeze()\n origin /= origin[3]\n return factor, origin, direction\n\n\ndef projection_matrix(point, normal, direction=None,\n perspective=None, pseudo=False):\n \"\"\"Return matrix to project onto plane defined by point and normal.\n\n Using either perspective point, projection direction, or none of both.\n\n If pseudo is True, perspective projections will preserve relative depth\n such that Perspective = dot(Orthogonal, PseudoPerspective).\n\n >>> P = projection_matrix([0, 0, 0], [1, 0, 0])\n >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])\n True\n >>> point = numpy.random.random(3) - 0.5\n >>> normal = numpy.random.random(3) - 0.5\n >>> direct = numpy.random.random(3) - 0.5\n >>> persp = numpy.random.random(3) - 0.5\n >>> P0 = projection_matrix(point, normal)\n >>> P1 = projection_matrix(point, normal, direction=direct)\n >>> P2 = projection_matrix(point, normal, perspective=persp)\n >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)\n >>> is_same_transform(P2, numpy.dot(P0, P3))\n True\n >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])\n >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20\n >>> v0[3] = 1\n >>> v1 = numpy.dot(P, v0)\n >>> numpy.allclose(v1[1], v0[1])\n True\n >>> numpy.allclose(v1[0], 3-v1[1])\n True\n\n \"\"\"\n M = numpy.identity(4)\n point = numpy.array(point[:3], dtype=numpy.float64, copy=False)\n normal = unit_vector(normal[:3])\n if perspective is not None:\n # perspective projection\n perspective = numpy.array(perspective[:3], dtype=numpy.float64,\n copy=False)\n M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)\n M[:3, :3] -= numpy.outer(perspective, normal)\n if pseudo:\n # preserve relative depth\n M[:3, :3] -= numpy.outer(normal, normal)\n M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)\n else:\n M[:3, 3] = numpy.dot(point, normal) * perspective\n M[3, :3] = -normal\n M[3, 3] = numpy.dot(perspective, normal)\n elif direction is not None:\n # parallel projection\n direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)\n scale = numpy.dot(direction, normal)\n M[:3, :3] -= numpy.outer(direction, normal) / scale\n M[:3, 3] = direction * (numpy.dot(point, normal) / scale)\n else:\n # orthogonal projection\n M[:3, :3] -= numpy.outer(normal, normal)\n M[:3, 3] = numpy.dot(point, normal) * normal\n return M\n\n\ndef projection_from_matrix(matrix, pseudo=False):\n \"\"\"Return projection plane and perspective point from projection matrix.\n\n Return values are same as arguments for projection_matrix function:\n point, normal, direction, perspective, and pseudo.\n\n >>> point = numpy.random.random(3) - 0.5\n >>> normal = numpy.random.random(3) - 0.5\n >>> direct = numpy.random.random(3) - 0.5\n >>> persp = numpy.random.random(3) - 0.5\n >>> P0 = projection_matrix(point, normal)\n >>> result = projection_from_matrix(P0)\n >>> P1 = projection_matrix(*result)\n >>> is_same_transform(P0, P1)\n True\n >>> P0 = projection_matrix(point, normal, direct)\n >>> result = projection_from_matrix(P0)\n >>> P1 = projection_matrix(*result)\n >>> is_same_transform(P0, P1)\n True\n >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)\n >>> result = projection_from_matrix(P0, pseudo=False)\n >>> P1 = projection_matrix(*result)\n >>> is_same_transform(P0, P1)\n True\n >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)\n >>> result = projection_from_matrix(P0, pseudo=True)\n >>> P1 = projection_matrix(*result)\n >>> is_same_transform(P0, P1)\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)\n M33 = M[:3, :3]\n w, V = numpy.linalg.eig(M)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not pseudo and len(i):\n # point: any eigenvector corresponding to eigenvalue 1\n point = numpy.real(V[:, i[-1]]).squeeze()\n point /= point[3]\n # direction: unit eigenvector corresponding to eigenvalue 0\n w, V = numpy.linalg.eig(M33)\n i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no eigenvector corresponding to eigenvalue 0\")\n direction = numpy.real(V[:, i[0]]).squeeze()\n direction /= vector_norm(direction)\n # normal: unit eigenvector of M33.T corresponding to eigenvalue 0\n w, V = numpy.linalg.eig(M33.T)\n i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]\n if len(i):\n # parallel projection\n normal = numpy.real(V[:, i[0]]).squeeze()\n normal /= vector_norm(normal)\n return point, normal, direction, None, False\n else:\n # orthogonal projection, where normal equals direction vector\n return point, direction, None, None, False\n else:\n # perspective projection\n i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]\n if not len(i):\n raise ValueError(\n \"no eigenvector not corresponding to eigenvalue 0\")\n point = numpy.real(V[:, i[-1]]).squeeze()\n point /= point[3]\n normal = - M[3, :3]\n perspective = M[:3, 3] / numpy.dot(point[:3], normal)\n if pseudo:\n perspective -= normal\n return point, normal, None, perspective, pseudo\n\n\ndef clip_matrix(left, right, bottom, top, near, far, perspective=False):\n \"\"\"Return matrix to obtain normalized device coordinates from frustrum.\n\n The frustrum bounds are axis-aligned along x (left, right),\n y (bottom, top) and z (near, far).\n\n Normalized device coordinates are in range [-1, 1] if coordinates are\n inside the frustrum.\n\n If perspective is True the frustrum is a truncated pyramid with the\n perspective point at origin and direction along z axis, otherwise an\n orthographic canonical view volume (a box).\n\n Homogeneous coordinates transformed by the perspective clip matrix\n need to be dehomogenized (divided by w coordinate).\n\n >>> frustrum = numpy.random.rand(6)\n >>> frustrum[1] += frustrum[0]\n >>> frustrum[3] += frustrum[2]\n >>> frustrum[5] += frustrum[4]\n >>> M = clip_matrix(perspective=False, *frustrum)\n >>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1])\n array([-1., -1., -1., 1.])\n >>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1])\n array([ 1., 1., 1., 1.])\n >>> M = clip_matrix(perspective=True, *frustrum)\n >>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1])\n >>> v / v[3]\n array([-1., -1., -1., 1.])\n >>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1])\n >>> v / v[3]\n array([ 1., 1., -1., 1.])\n\n \"\"\"\n if left >= right or bottom >= top or near >= far:\n raise ValueError(\"invalid frustrum\")\n if perspective:\n if near <= _EPS:\n raise ValueError(\"invalid frustrum: near <= 0\")\n t = 2.0 * near\n M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],\n [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],\n [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],\n [0.0, 0.0, -1.0, 0.0]]\n else:\n M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],\n [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],\n [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],\n [0.0, 0.0, 0.0, 1.0]]\n return numpy.array(M)\n\n\ndef shear_matrix(angle, direction, point, normal):\n \"\"\"Return matrix to shear by angle along direction vector on shear plane.\n\n The shear plane is defined by a point and normal vector. The direction\n vector must be orthogonal to the plane's normal vector.\n\n A point P is transformed by the shear matrix into P\" such that\n the vector P-P\" is parallel to the direction vector and its extent is\n given by the angle of P-P'-P\", where P' is the orthogonal projection\n of P onto the shear plane.\n\n >>> angle = (random.random() - 0.5) * 4*math.pi\n >>> direct = numpy.random.random(3) - 0.5\n >>> point = numpy.random.random(3) - 0.5\n >>> normal = numpy.cross(direct, numpy.random.random(3))\n >>> S = shear_matrix(angle, direct, point, normal)\n >>> numpy.allclose(1, numpy.linalg.det(S))\n True\n\n \"\"\"\n normal = unit_vector(normal[:3])\n direction = unit_vector(direction[:3])\n if abs(numpy.dot(normal, direction)) > 1e-6:\n raise ValueError(\"direction and normal vectors are not orthogonal\")\n angle = math.tan(angle)\n M = numpy.identity(4)\n M[:3, :3] += angle * numpy.outer(direction, normal)\n M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction\n return M\n\n\ndef shear_from_matrix(matrix):\n \"\"\"Return shear angle, direction and plane from shear matrix.\n\n >>> angle = (random.random() - 0.5) * 4*math.pi\n >>> direct = numpy.random.random(3) - 0.5\n >>> point = numpy.random.random(3) - 0.5\n >>> normal = numpy.cross(direct, numpy.random.random(3))\n >>> S0 = shear_matrix(angle, direct, point, normal)\n >>> angle, direct, point, normal = shear_from_matrix(S0)\n >>> S1 = shear_matrix(angle, direct, point, normal)\n >>> is_same_transform(S0, S1)\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)\n M33 = M[:3, :3]\n # normal: cross independent eigenvectors corresponding to the eigenvalue 1\n w, V = numpy.linalg.eig(M33)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]\n if len(i) < 2:\n raise ValueError(\"no two linear independent eigenvectors found %s\" % w)\n V = numpy.real(V[:, i]).squeeze().T\n lenorm = -1.0\n for i0, i1 in ((0, 1), (0, 2), (1, 2)):\n n = numpy.cross(V[i0], V[i1])\n w = vector_norm(n)\n if w > lenorm:\n lenorm = w\n normal = n\n normal /= lenorm\n # direction and angle\n direction = numpy.dot(M33 - numpy.identity(3), normal)\n angle = vector_norm(direction)\n direction /= angle\n angle = math.atan(angle)\n # point: eigenvector corresponding to eigenvalue 1\n w, V = numpy.linalg.eig(M)\n i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError(\"no eigenvector corresponding to eigenvalue 1\")\n point = numpy.real(V[:, i[-1]]).squeeze()\n point /= point[3]\n return angle, direction, point, normal\n\n\ndef decompose_matrix(matrix):\n \"\"\"Return sequence of transformations from transformation matrix.\n\n matrix : array_like\n Non-degenerative homogeneous transformation matrix\n\n Return tuple of:\n scale : vector of 3 scaling factors\n shear : list of shear factors for x-y, x-z, y-z axes\n angles : list of Euler angles about static x, y, z axes\n translate : translation vector along x, y, z axes\n perspective : perspective partition of matrix\n\n Raise ValueError if matrix is of wrong type or degenerative.\n\n >>> T0 = translation_matrix([1, 2, 3])\n >>> scale, shear, angles, trans, persp = decompose_matrix(T0)\n >>> T1 = translation_matrix(trans)\n >>> numpy.allclose(T0, T1)\n True\n >>> S = scale_matrix(0.123)\n >>> scale, shear, angles, trans, persp = decompose_matrix(S)\n >>> scale[0]\n 0.123\n >>> R0 = euler_matrix(1, 2, 3)\n >>> scale, shear, angles, trans, persp = decompose_matrix(R0)\n >>> R1 = euler_matrix(*angles)\n >>> numpy.allclose(R0, R1)\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=True).T\n if abs(M[3, 3]) < _EPS:\n raise ValueError(\"M[3, 3] is zero\")\n M /= M[3, 3]\n P = M.copy()\n P[:, 3] = 0.0, 0.0, 0.0, 1.0\n if not numpy.linalg.det(P):\n raise ValueError(\"matrix is singular\")\n\n scale = numpy.zeros((3, ))\n shear = [0.0, 0.0, 0.0]\n angles = [0.0, 0.0, 0.0]\n\n if any(abs(M[:3, 3]) > _EPS):\n perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))\n M[:, 3] = 0.0, 0.0, 0.0, 1.0\n else:\n perspective = numpy.array([0.0, 0.0, 0.0, 1.0])\n\n translate = M[3, :3].copy()\n M[3, :3] = 0.0\n\n row = M[:3, :3].copy()\n scale[0] = vector_norm(row[0])\n row[0] /= scale[0]\n shear[0] = numpy.dot(row[0], row[1])\n row[1] -= row[0] * shear[0]\n scale[1] = vector_norm(row[1])\n row[1] /= scale[1]\n shear[0] /= scale[1]\n shear[1] = numpy.dot(row[0], row[2])\n row[2] -= row[0] * shear[1]\n shear[2] = numpy.dot(row[1], row[2])\n row[2] -= row[1] * shear[2]\n scale[2] = vector_norm(row[2])\n row[2] /= scale[2]\n shear[1:] /= scale[2]\n\n if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:\n numpy.negative(scale, scale)\n numpy.negative(row, row)\n\n angles[1] = math.asin(-row[0, 2])\n if math.cos(angles[1]):\n angles[0] = math.atan2(row[1, 2], row[2, 2])\n angles[2] = math.atan2(row[0, 1], row[0, 0])\n else:\n #angles[0] = math.atan2(row[1, 0], row[1, 1])\n angles[0] = math.atan2(-row[2, 1], row[1, 1])\n angles[2] = 0.0\n\n return scale, shear, angles, translate, perspective\n\n\ndef compose_matrix(scale=None, shear=None, angles=None, translate=None,\n perspective=None):\n \"\"\"Return transformation matrix from sequence of transformations.\n\n This is the inverse of the decompose_matrix function.\n\n Sequence of transformations:\n scale : vector of 3 scaling factors\n shear : list of shear factors for x-y, x-z, y-z axes\n angles : list of Euler angles about static x, y, z axes\n translate : translation vector along x, y, z axes\n perspective : perspective partition of matrix\n\n >>> scale = numpy.random.random(3) - 0.5\n >>> shear = numpy.random.random(3) - 0.5\n >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)\n >>> trans = numpy.random.random(3) - 0.5\n >>> persp = numpy.random.random(4) - 0.5\n >>> M0 = compose_matrix(scale, shear, angles, trans, persp)\n >>> result = decompose_matrix(M0)\n >>> M1 = compose_matrix(*result)\n >>> is_same_transform(M0, M1)\n True\n\n \"\"\"\n M = numpy.identity(4)\n if perspective is not None:\n P = numpy.identity(4)\n P[3, :] = perspective[:4]\n M = numpy.dot(M, P)\n if translate is not None:\n T = numpy.identity(4)\n T[:3, 3] = translate[:3]\n M = numpy.dot(M, T)\n if angles is not None:\n R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')\n M = numpy.dot(M, R)\n if shear is not None:\n Z = numpy.identity(4)\n Z[1, 2] = shear[2]\n Z[0, 2] = shear[1]\n Z[0, 1] = shear[0]\n M = numpy.dot(M, Z)\n if scale is not None:\n S = numpy.identity(4)\n S[0, 0] = scale[0]\n S[1, 1] = scale[1]\n S[2, 2] = scale[2]\n M = numpy.dot(M, S)\n M /= M[3, 3]\n return M\n\n\ndef orthogonalization_matrix(lengths, angles):\n \"\"\"Return orthogonalization matrix for crystallographic cell coordinates.\n\n Angles are expected in degrees.\n\n The de-orthogonalization matrix is the inverse.\n\n >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])\n >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)\n True\n >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])\n >>> numpy.allclose(numpy.sum(O), 43.063229)\n True\n\n \"\"\"\n a, b, c = lengths\n angles = numpy.radians(angles)\n sina, sinb, _ = numpy.sin(angles)\n cosa, cosb, cosg = numpy.cos(angles)\n co = (cosa * cosb - cosg) / (sina * sinb)\n return numpy.array([\n [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],\n [-a*sinb*co, b*sina, 0.0, 0.0],\n [ a*cosb, b*cosa, c, 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])\n\n\ndef affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):\n \"\"\"Return affine transform matrix to register two point sets.\n\n v0 and v1 are shape (ndims, \\*) arrays of at least ndims non-homogeneous\n coordinates, where ndims is the dimensionality of the coordinate space.\n\n If shear is False, a similarity transformation matrix is returned.\n If also scale is False, a rigid/Eucledian transformation matrix\n is returned.\n\n By default the algorithm by Hartley and Zissermann [15] is used.\n If usesvd is True, similarity and Eucledian transformation matrices\n are calculated by minimizing the weighted sum of squared deviations\n (RMSD) according to the algorithm by Kabsch [8].\n Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]\n is used, which is slower when using this Python implementation.\n\n The returned matrix performs rotation, translation and uniform scaling\n (if specified).\n\n >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]\n >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]\n >>> affine_matrix_from_points(v0, v1)\n array([[ 0.14549, 0.00062, 675.50008],\n [ 0.00048, 0.14094, 53.24971],\n [ 0. , 0. , 1. ]])\n >>> T = translation_matrix(numpy.random.random(3)-0.5)\n >>> R = random_rotation_matrix(numpy.random.random(3))\n >>> S = scale_matrix(random.random())\n >>> M = concatenate_matrices(T, R, S)\n >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20\n >>> v0[3] = 1\n >>> v1 = numpy.dot(M, v0)\n >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)\n >>> M = affine_matrix_from_points(v0[:3], v1[:3])\n >>> numpy.allclose(v1, numpy.dot(M, v0))\n True\n\n More examples in superimposition_matrix()\n\n \"\"\"\n v0 = numpy.array(v0, dtype=numpy.float64, copy=True)\n v1 = numpy.array(v1, dtype=numpy.float64, copy=True)\n\n ndims = v0.shape[0]\n if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:\n raise ValueError(\"input arrays are of wrong shape or type\")\n\n # move centroids to origin\n t0 = -numpy.mean(v0, axis=1)\n M0 = numpy.identity(ndims+1)\n M0[:ndims, ndims] = t0\n v0 += t0.reshape(ndims, 1)\n t1 = -numpy.mean(v1, axis=1)\n M1 = numpy.identity(ndims+1)\n M1[:ndims, ndims] = t1\n v1 += t1.reshape(ndims, 1)\n\n if shear:\n # Affine transformation\n A = numpy.concatenate((v0, v1), axis=0)\n u, s, vh = numpy.linalg.svd(A.T)\n vh = vh[:ndims].T\n B = vh[:ndims]\n C = vh[ndims:2*ndims]\n t = numpy.dot(C, numpy.linalg.pinv(B))\n t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)\n M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))\n elif usesvd or ndims != 3:\n # Rigid transformation via SVD of covariance matrix\n u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))\n # rotation matrix from SVD orthonormal bases\n R = numpy.dot(u, vh)\n if numpy.linalg.det(R) < 0.0:\n # R does not constitute right handed system\n R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)\n s[-1] *= -1.0\n # homogeneous transformation matrix\n M = numpy.identity(ndims+1)\n M[:ndims, :ndims] = R\n else:\n # Rigid transformation matrix via quaternion\n # compute symmetric matrix N\n xx, yy, zz = numpy.sum(v0 * v1, axis=1)\n xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)\n xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)\n N = [[xx+yy+zz, 0.0, 0.0, 0.0],\n [yz-zy, xx-yy-zz, 0.0, 0.0],\n [zx-xz, xy+yx, yy-xx-zz, 0.0],\n [xy-yx, zx+xz, yz+zy, zz-xx-yy]]\n # quaternion: eigenvector corresponding to most positive eigenvalue\n w, V = numpy.linalg.eigh(N)\n q = V[:, numpy.argmax(w)]\n q /= vector_norm(q) # unit quaternion\n # homogeneous transformation matrix\n M = quaternion_matrix(q)\n\n if scale and not shear:\n # Affine transformation; scale is ratio of RMS deviations from centroid\n v0 *= v0\n v1 *= v1\n M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))\n\n # move centroids back\n M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))\n M /= M[ndims, ndims]\n return M\n\n\ndef superimposition_matrix(v0, v1, scale=False, usesvd=True):\n \"\"\"Return matrix to transform given 3D point set into second point set.\n\n v0 and v1 are shape (3, \\*) or (4, \\*) arrays of at least 3 points.\n\n The parameters scale and usesvd are explained in the more general\n affine_matrix_from_points function.\n\n The returned matrix is a similarity or Eucledian transformation matrix.\n This function has a fast C implementation in transformations.c.\n\n >>> v0 = numpy.random.rand(3, 10)\n >>> M = superimposition_matrix(v0, v0)\n >>> numpy.allclose(M, numpy.identity(4))\n True\n >>> R = random_rotation_matrix(numpy.random.random(3))\n >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]\n >>> v1 = numpy.dot(R, v0)\n >>> M = superimposition_matrix(v0, v1)\n >>> numpy.allclose(v1, numpy.dot(M, v0))\n True\n >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20\n >>> v0[3] = 1\n >>> v1 = numpy.dot(R, v0)\n >>> M = superimposition_matrix(v0, v1)\n >>> numpy.allclose(v1, numpy.dot(M, v0))\n True\n >>> S = scale_matrix(random.random())\n >>> T = translation_matrix(numpy.random.random(3)-0.5)\n >>> M = concatenate_matrices(T, R, S)\n >>> v1 = numpy.dot(M, v0)\n >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)\n >>> M = superimposition_matrix(v0, v1, scale=True)\n >>> numpy.allclose(v1, numpy.dot(M, v0))\n True\n >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)\n >>> numpy.allclose(v1, numpy.dot(M, v0))\n True\n >>> v = numpy.empty((4, 100, 3))\n >>> v[:, :, 0] = v0\n >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)\n >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))\n True\n\n \"\"\"\n v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]\n v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]\n return affine_matrix_from_points(v0, v1, shear=False,\n scale=scale, usesvd=usesvd)\n\n\ndef euler_matrix(ai, aj, ak, axes='sxyz'):\n \"\"\"Return homogeneous rotation matrix from Euler angles and axis sequence.\n\n ai, aj, ak : Euler's roll, pitch and yaw angles\n axes : One of 24 axis sequences as string or encoded tuple\n\n >>> R = euler_matrix(1, 2, 3, 'syxz')\n >>> numpy.allclose(numpy.sum(R[0]), -1.34786452)\n True\n >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))\n >>> numpy.allclose(numpy.sum(R[0]), -0.383436184)\n True\n >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)\n >>> for axes in _AXES2TUPLE.keys():\n ... R = euler_matrix(ai, aj, ak, axes)\n >>> for axes in _TUPLE2AXES.keys():\n ... R = euler_matrix(ai, aj, ak, axes)\n\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n ai, aj, ak = -ai, -aj, -ak\n\n si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)\n ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)\n cc, cs = ci*ck, ci*sk\n sc, ss = si*ck, si*sk\n\n M = numpy.identity(4)\n if repetition:\n M[i, i] = cj\n M[i, j] = sj*si\n M[i, k] = sj*ci\n M[j, i] = sj*sk\n M[j, j] = -cj*ss+cc\n M[j, k] = -cj*cs-sc\n M[k, i] = -sj*ck\n M[k, j] = cj*sc+cs\n M[k, k] = cj*cc-ss\n else:\n M[i, i] = cj*ck\n M[i, j] = sj*sc-cs\n M[i, k] = sj*cc+ss\n M[j, i] = cj*sk\n M[j, j] = sj*ss+cc\n M[j, k] = sj*cs-sc\n M[k, i] = -sj\n M[k, j] = cj*si\n M[k, k] = cj*ci\n return M\n\n\ndef euler_from_matrix(matrix, axes='sxyz'):\n \"\"\"Return Euler angles from rotation matrix for specified axis sequence.\n\n axes : One of 24 axis sequences as string or encoded tuple\n\n Note that many Euler angle triplets can describe one matrix.\n\n >>> R0 = euler_matrix(1, 2, 3, 'syxz')\n >>> al, be, ga = euler_from_matrix(R0, 'syxz')\n >>> R1 = euler_matrix(al, be, ga, 'syxz')\n >>> numpy.allclose(R0, R1)\n True\n >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)\n >>> for axes in _AXES2TUPLE.keys():\n ... R0 = euler_matrix(axes=axes, *angles)\n ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))\n ... if not numpy.allclose(R0, R1): print(axes, \"failed\")\n\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > _EPS:\n ax = math.atan2( M[i, j], M[i, k])\n ay = math.atan2( sy, M[i, i])\n az = math.atan2( M[j, i], -M[k, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2( sy, M[i, i])\n az = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > _EPS:\n ax = math.atan2( M[k, j], M[k, k])\n ay = math.atan2(-M[k, i], cy)\n az = math.atan2( M[j, i], M[i, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2(-M[k, i], cy)\n az = 0.0\n\n if parity:\n ax, ay, az = -ax, -ay, -az\n if frame:\n ax, az = az, ax\n return ax, ay, az\n\n\ndef euler_from_quaternion(quaternion, axes='sxyz'):\n \"\"\"Return Euler angles from quaternion for specified axis sequence.\n\n >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])\n >>> numpy.allclose(angles, [0.123, 0, 0])\n True\n\n \"\"\"\n return euler_from_matrix(quaternion_matrix(quaternion), axes)\n\n\ndef quaternion_from_euler(ai, aj, ak, axes='sxyz'):\n \"\"\"Return quaternion from Euler angles and axis sequence.\n\n ai, aj, ak : Euler's roll, pitch and yaw angles\n axes : One of 24 axis sequences as string or encoded tuple\n\n >>> q = quaternion_from_euler(1, 2, 3, 'ryxz')\n >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])\n True\n\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis + 1\n j = _NEXT_AXIS[i+parity-1] + 1\n k = _NEXT_AXIS[i-parity] + 1\n\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n ak /= 2.0\n ci = math.cos(ai)\n si = math.sin(ai)\n cj = math.cos(aj)\n sj = math.sin(aj)\n ck = math.cos(ak)\n sk = math.sin(ak)\n cc = ci*ck\n cs = ci*sk\n sc = si*ck\n ss = si*sk\n\n q = numpy.empty((4, ))\n if repetition:\n q[0] = cj*(cc - ss)\n q[i] = cj*(cs + sc)\n q[j] = sj*(cc + ss)\n q[k] = sj*(cs - sc)\n else:\n q[0] = cj*cc + sj*ss\n q[i] = cj*sc - sj*cs\n q[j] = cj*ss + sj*cc\n q[k] = cj*cs - sj*sc\n if parity:\n q[j] *= -1.0\n\n return q\n\n\ndef quaternion_about_axis(angle, axis):\n \"\"\"Return quaternion for rotation about axis.\n\n >>> q = quaternion_about_axis(0.123, [1, 0, 0])\n >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])\n True\n\n \"\"\"\n q = numpy.array([0.0, axis[0], axis[1], axis[2]])\n qlen = vector_norm(q)\n if qlen > _EPS:\n q *= math.sin(angle/2.0) / qlen\n q[0] = math.cos(angle/2.0)\n return q\n\n\ndef quaternion_matrix(quaternion):\n \"\"\"Return homogeneous rotation matrix from quaternion.\n\n >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])\n >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))\n True\n >>> M = quaternion_matrix([1, 0, 0, 0])\n >>> numpy.allclose(M, numpy.identity(4))\n True\n >>> M = quaternion_matrix([0, 1, 0, 0])\n >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))\n True\n\n \"\"\"\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n n = numpy.dot(q, q)\n if n < _EPS:\n return numpy.identity(4)\n q *= math.sqrt(2.0 / n)\n q = numpy.outer(q, q)\n return numpy.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])\n\n\ndef quaternion_from_matrix(matrix, isprecise=False):\n \"\"\"Return quaternion from rotation matrix.\n\n If isprecise is True, the input matrix is assumed to be a precise rotation\n matrix and a faster algorithm is used.\n\n >>> q = quaternion_from_matrix(numpy.identity(4), True)\n >>> numpy.allclose(q, [1, 0, 0, 0])\n True\n >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))\n >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])\n True\n >>> R = rotation_matrix(0.123, (1, 2, 3))\n >>> q = quaternion_from_matrix(R, True)\n >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])\n True\n >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],\n ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]\n >>> q = quaternion_from_matrix(R)\n >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])\n True\n >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],\n ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]\n >>> q = quaternion_from_matrix(R)\n >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])\n True\n >>> R = random_rotation_matrix()\n >>> q = quaternion_from_matrix(R)\n >>> is_same_transform(R, quaternion_matrix(q))\n True\n\n \"\"\"\n M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]\n if isprecise:\n q = numpy.empty((4, ))\n t = numpy.trace(M)\n if t > M[3, 3]:\n q[0] = t\n q[3] = M[1, 0] - M[0, 1]\n q[2] = M[0, 2] - M[2, 0]\n q[1] = M[2, 1] - M[1, 2]\n else:\n i, j, k = 1, 2, 3\n if M[1, 1] > M[0, 0]:\n i, j, k = 2, 3, 1\n if M[2, 2] > M[i, i]:\n i, j, k = 3, 1, 2\n t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]\n q[i] = t\n q[j] = M[i, j] + M[j, i]\n q[k] = M[k, i] + M[i, k]\n q[3] = M[k, j] - M[j, k]\n q *= 0.5 / math.sqrt(t * M[3, 3])\n else:\n m00 = M[0, 0]\n m01 = M[0, 1]\n m02 = M[0, 2]\n m10 = M[1, 0]\n m11 = M[1, 1]\n m12 = M[1, 2]\n m20 = M[2, 0]\n m21 = M[2, 1]\n m22 = M[2, 2]\n # symmetric matrix K\n K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],\n [m01+m10, m11-m00-m22, 0.0, 0.0],\n [m02+m20, m12+m21, m22-m00-m11, 0.0],\n [m21-m12, m02-m20, m10-m01, m00+m11+m22]])\n K /= 3.0\n # quaternion is eigenvector of K that corresponds to largest eigenvalue\n w, V = numpy.linalg.eigh(K)\n q = V[[3, 0, 1, 2], numpy.argmax(w)]\n if q[0] < 0.0:\n numpy.negative(q, q)\n return q\n\n\ndef quaternion_multiply(quaternion1, quaternion0):\n \"\"\"Return multiplication of two quaternions.\n\n >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])\n >>> numpy.allclose(q, [28, -44, -14, 48])\n True\n\n \"\"\"\n w0, x0, y0, z0 = quaternion0\n w1, x1, y1, z1 = quaternion1\n return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)\n\n\ndef quaternion_conjugate(quaternion):\n \"\"\"Return conjugate of quaternion.\n\n >>> q0 = random_quaternion()\n >>> q1 = quaternion_conjugate(q0)\n >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])\n True\n\n \"\"\"\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n numpy.negative(q[1:], q[1:])\n return q\n\n\ndef quaternion_inverse(quaternion):\n \"\"\"Return inverse of quaternion.\n\n >>> q0 = random_quaternion()\n >>> q1 = quaternion_inverse(q0)\n >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])\n True\n\n \"\"\"\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\n numpy.negative(q[1:], q[1:])\n return q / numpy.dot(q, q)\n\n\ndef quaternion_real(quaternion):\n \"\"\"Return real part of quaternion.\n\n >>> quaternion_real([3, 0, 1, 2])\n 3.0\n\n \"\"\"\n return float(quaternion[0])\n\n\ndef quaternion_imag(quaternion):\n \"\"\"Return imaginary part of quaternion.\n\n >>> quaternion_imag([3, 0, 1, 2])\n array([ 0., 1., 2.])\n\n \"\"\"\n return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)\n\n\ndef quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):\n \"\"\"Return spherical linear interpolation between two quaternions.\n\n >>> q0 = random_quaternion()\n >>> q1 = random_quaternion()\n >>> q = quaternion_slerp(q0, q1, 0)\n >>> numpy.allclose(q, q0)\n True\n >>> q = quaternion_slerp(q0, q1, 1, 1)\n >>> numpy.allclose(q, q1)\n True\n >>> q = quaternion_slerp(q0, q1, 0.5)\n >>> angle = math.acos(numpy.dot(q0, q))\n >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \\\n numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)\n True\n\n \"\"\"\n q0 = unit_vector(quat0[:4])\n q1 = unit_vector(quat1[:4])\n if fraction == 0.0:\n return q0\n elif fraction == 1.0:\n return q1\n d = numpy.dot(q0, q1)\n if abs(abs(d) - 1.0) < _EPS:\n return q0\n if shortestpath and d < 0.0:\n # invert rotation\n d = -d\n numpy.negative(q1, q1)\n angle = math.acos(d) + spin * math.pi\n if abs(angle) < _EPS:\n return q0\n isin = 1.0 / math.sin(angle)\n q0 *= math.sin((1.0 - fraction) * angle) * isin\n q1 *= math.sin(fraction * angle) * isin\n q0 += q1\n return q0\n\n\ndef random_quaternion(rand=None):\n \"\"\"Return uniform random unit quaternion.\n\n rand: array like or None\n Three independent random variables that are uniformly distributed\n between 0 and 1.\n\n >>> q = random_quaternion()\n >>> numpy.allclose(1, vector_norm(q))\n True\n >>> q = random_quaternion(numpy.random.random(3))\n >>> len(q.shape), q.shape[0]==4\n (1, True)\n\n \"\"\"\n if rand is None:\n rand = numpy.random.rand(3)\n else:\n assert len(rand) == 3\n r1 = numpy.sqrt(1.0 - rand[0])\n r2 = numpy.sqrt(rand[0])\n pi2 = math.pi * 2.0\n t1 = pi2 * rand[1]\n t2 = pi2 * rand[2]\n return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,\n numpy.cos(t1)*r1, numpy.sin(t2)*r2])\n\n\ndef random_rotation_matrix(rand=None):\n \"\"\"Return uniform random rotation matrix.\n\n rand: array like\n Three independent random variables that are uniformly distributed\n between 0 and 1 for each returned quaternion.\n\n >>> R = random_rotation_matrix()\n >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))\n True\n\n \"\"\"\n return quaternion_matrix(random_quaternion(rand))\n\n\nclass Arcball(object):\n \"\"\"Virtual Trackball Control.\n\n >>> ball = Arcball()\n >>> ball = Arcball(initial=numpy.identity(4))\n >>> ball.place([320, 320], 320)\n >>> ball.down([500, 250])\n >>> ball.drag([475, 275])\n >>> R = ball.matrix()\n >>> numpy.allclose(numpy.sum(R), 3.90583455)\n True\n >>> ball = Arcball(initial=[1, 0, 0, 0])\n >>> ball.place([320, 320], 320)\n >>> ball.setaxes([1, 1, 0], [-1, 1, 0])\n >>> ball.setconstrain(True)\n >>> ball.down([400, 200])\n >>> ball.drag([200, 400])\n >>> R = ball.matrix()\n >>> numpy.allclose(numpy.sum(R), 0.2055924)\n True\n >>> ball.next()\n\n \"\"\"\n\n def __init__(self, initial=None):\n \"\"\"Initialize virtual trackball control.\n\n initial : quaternion or rotation matrix\n\n \"\"\"\n self._axis = None\n self._axes = None\n self._radius = 1.0\n self._center = [0.0, 0.0]\n self._vdown = numpy.array([0.0, 0.0, 1.0])\n self._constrain = False\n if initial is None:\n self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])\n else:\n initial = numpy.array(initial, dtype=numpy.float64)\n if initial.shape == (4, 4):\n self._qdown = quaternion_from_matrix(initial)\n elif initial.shape == (4, ):\n initial /= vector_norm(initial)\n self._qdown = initial\n else:\n raise ValueError(\"initial not a quaternion or matrix\")\n self._qnow = self._qpre = self._qdown\n\n def place(self, center, radius):\n \"\"\"Place Arcball, e.g. when window size changes.\n\n center : sequence[2]\n Window coordinates of trackball center.\n radius : float\n Radius of trackball in window coordinates.\n\n \"\"\"\n self._radius = float(radius)\n self._center[0] = center[0]\n self._center[1] = center[1]\n\n def setaxes(self, *axes):\n \"\"\"Set axes to constrain rotations.\"\"\"\n if axes is None:\n self._axes = None\n else:\n self._axes = [unit_vector(axis) for axis in axes]\n\n def setconstrain(self, constrain):\n \"\"\"Set state of constrain to axis mode.\"\"\"\n self._constrain = constrain == True\n\n def getconstrain(self):\n \"\"\"Return state of constrain to axis mode.\"\"\"\n return self._constrain\n\n def down(self, point):\n \"\"\"Set initial cursor window coordinates and pick constrain-axis.\"\"\"\n self._vdown = arcball_map_to_sphere(point, self._center, self._radius)\n self._qdown = self._qpre = self._qnow\n if self._constrain and self._axes is not None:\n self._axis = arcball_nearest_axis(self._vdown, self._axes)\n self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)\n else:\n self._axis = None\n\n def drag(self, point):\n \"\"\"Update current cursor window coordinates.\"\"\"\n vnow = arcball_map_to_sphere(point, self._center, self._radius)\n if self._axis is not None:\n vnow = arcball_constrain_to_axis(vnow, self._axis)\n self._qpre = self._qnow\n t = numpy.cross(self._vdown, vnow)\n if numpy.dot(t, t) < _EPS:\n self._qnow = self._qdown\n else:\n q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]\n self._qnow = quaternion_multiply(q, self._qdown)\n\n def next(self, acceleration=0.0):\n \"\"\"Continue rotation in direction of last drag.\"\"\"\n q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)\n self._qpre, self._qnow = self._qnow, q\n\n def matrix(self):\n \"\"\"Return homogeneous rotation matrix.\"\"\"\n return quaternion_matrix(self._qnow)\n\n\ndef arcball_map_to_sphere(point, center, radius):\n \"\"\"Return unit sphere coordinates from window coordinates.\"\"\"\n v0 = (point[0] - center[0]) / radius\n v1 = (center[1] - point[1]) / radius\n n = v0*v0 + v1*v1\n if n > 1.0:\n # position outside of sphere\n n = math.sqrt(n)\n return numpy.array([v0/n, v1/n, 0.0])\n else:\n return numpy.array([v0, v1, math.sqrt(1.0 - n)])\n\n\ndef arcball_constrain_to_axis(point, axis):\n \"\"\"Return sphere point perpendicular to axis.\"\"\"\n v = numpy.array(point, dtype=numpy.float64, copy=True)\n a = numpy.array(axis, dtype=numpy.float64, copy=True)\n v -= a * numpy.dot(a, v) # on plane\n n = vector_norm(v)\n if n > _EPS:\n if v[2] < 0.0:\n numpy.negative(v, v)\n v /= n\n return v\n if a[2] == 1.0:\n return numpy.array([1.0, 0.0, 0.0])\n return unit_vector([-a[1], a[0], 0.0])\n\n\ndef arcball_nearest_axis(point, axes):\n \"\"\"Return axis, which arc is nearest to point.\"\"\"\n point = numpy.array(point, dtype=numpy.float64, copy=False)\n nearest = None\n mx = -1.0\n for axis in axes:\n t = numpy.dot(arcball_constrain_to_axis(point, axis), point)\n if t > mx:\n nearest = axis\n mx = t\n return nearest\n\n\n# epsilon for testing whether a number is close to zero\n_EPS = numpy.finfo(float).eps * 4.0\n\n# axis sequences for Euler angles\n_NEXT_AXIS = [1, 2, 0, 1]\n\n# map axes strings to/from tuples of inner axis, parity, repetition, frame\n_AXES2TUPLE = {\n 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),\n 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),\n 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),\n 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),\n 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),\n 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),\n 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),\n 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}\n\n_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())\n\n\ndef vector_norm(data, axis=None, out=None):\n \"\"\"Return length, i.e. eucledian norm, of ndarray along axis.\n\n >>> v = numpy.random.random(3)\n >>> n = vector_norm(v)\n >>> numpy.allclose(n, numpy.linalg.norm(v))\n True\n >>> v = numpy.random.rand(6, 5, 3)\n >>> n = vector_norm(v, axis=-1)\n >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))\n True\n >>> n = vector_norm(v, axis=1)\n >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))\n True\n >>> v = numpy.random.rand(5, 4, 3)\n >>> n = numpy.empty((5, 3))\n >>> vector_norm(v, axis=1, out=n)\n >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))\n True\n >>> vector_norm([])\n 0.0\n >>> vector_norm([1])\n 1.0\n\n \"\"\"\n data = numpy.array(data, dtype=numpy.float64, copy=True)\n if out is None:\n if data.ndim == 1:\n return math.sqrt(numpy.dot(data, data))\n data *= data\n out = numpy.atleast_1d(numpy.sum(data, axis=axis))\n numpy.sqrt(out, out)\n return out\n else:\n data *= data\n numpy.sum(data, axis=axis, out=out)\n numpy.sqrt(out, out)\n\n\ndef unit_vector(data, axis=None, out=None):\n \"\"\"Return ndarray normalized by length, i.e. eucledian norm, along axis.\n\n >>> v0 = numpy.random.random(3)\n >>> v1 = unit_vector(v0)\n >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))\n True\n >>> v0 = numpy.random.rand(5, 4, 3)\n >>> v1 = unit_vector(v0, axis=-1)\n >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)\n >>> numpy.allclose(v1, v2)\n True\n >>> v1 = unit_vector(v0, axis=1)\n >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)\n >>> numpy.allclose(v1, v2)\n True\n >>> v1 = numpy.empty((5, 4, 3))\n >>> unit_vector(v0, axis=1, out=v1)\n >>> numpy.allclose(v1, v2)\n True\n >>> list(unit_vector([]))\n []\n >>> list(unit_vector([1]))\n [1.0]\n\n \"\"\"\n if out is None:\n data = numpy.array(data, dtype=numpy.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(numpy.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = numpy.array(data, copy=False)\n data = out\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\n numpy.sqrt(length, length)\n if axis is not None:\n length = numpy.expand_dims(length, axis)\n data /= length\n if out is None:\n return data\n\n\ndef random_vector(size):\n \"\"\"Return array of random doubles in the half-open interval [0.0, 1.0).\n\n >>> v = random_vector(10000)\n >>> numpy.all(v >= 0) and numpy.all(v < 1)\n True\n >>> v0 = random_vector(10)\n >>> v1 = random_vector(10)\n >>> numpy.any(v0 == v1)\n False\n\n \"\"\"\n return numpy.random.random(size)\n\n\ndef vector_product(v0, v1, axis=0):\n \"\"\"Return vector perpendicular to vectors.\n\n >>> v = vector_product([2, 0, 0], [0, 3, 0])\n >>> numpy.allclose(v, [0, 0, 6])\n True\n >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]\n >>> v1 = [[3], [0], [0]]\n >>> v = vector_product(v0, v1)\n >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])\n True\n >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]\n >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]\n >>> v = vector_product(v0, v1, axis=1)\n >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])\n True\n\n \"\"\"\n return numpy.cross(v0, v1, axis=axis)\n\n\ndef angle_between_vectors(v0, v1, directed=True, axis=0):\n \"\"\"Return angle between vectors.\n\n If directed is False, the input vectors are interpreted as undirected axes,\n i.e. the maximum angle is pi/2.\n\n >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])\n >>> numpy.allclose(a, math.pi)\n True\n >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)\n >>> numpy.allclose(a, 0)\n True\n >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]\n >>> v1 = [[3], [0], [0]]\n >>> a = angle_between_vectors(v0, v1)\n >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])\n True\n >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]\n >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]\n >>> a = angle_between_vectors(v0, v1, axis=1)\n >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])\n True\n\n \"\"\"\n v0 = numpy.array(v0, dtype=numpy.float64, copy=False)\n v1 = numpy.array(v1, dtype=numpy.float64, copy=False)\n dot = numpy.sum(v0 * v1, axis=axis)\n dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)\n return numpy.arccos(dot if directed else numpy.fabs(dot))\n\n\ndef inverse_matrix(matrix):\n \"\"\"Return inverse of square transformation matrix.\n\n >>> M0 = random_rotation_matrix()\n >>> M1 = inverse_matrix(M0.T)\n >>> numpy.allclose(M1, numpy.linalg.inv(M0.T))\n True\n >>> for size in range(1, 7):\n ... M0 = numpy.random.rand(size, size)\n ... M1 = inverse_matrix(M0)\n ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)\n\n \"\"\"\n return numpy.linalg.inv(matrix)\n\n\ndef concatenate_matrices(*matrices):\n \"\"\"Return concatenation of series of transformation matrices.\n\n >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5\n >>> numpy.allclose(M, concatenate_matrices(M))\n True\n >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))\n True\n\n \"\"\"\n M = numpy.identity(4)\n for i in matrices:\n M = numpy.dot(M, i)\n return M\n\n\ndef is_same_transform(matrix0, matrix1):\n \"\"\"Return True if two matrices perform same transformation.\n\n >>> is_same_transform(numpy.identity(4), numpy.identity(4))\n True\n >>> is_same_transform(numpy.identity(4), random_rotation_matrix())\n False\n\n \"\"\"\n matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)\n matrix0 /= matrix0[3, 3]\n matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)\n matrix1 /= matrix1[3, 3]\n return numpy.allclose(matrix0, matrix1)\n\n\ndef _import_module(module_name, warn=False, prefix='_py_', ignore='_'):\n \"\"\"Try import all public attributes from module into global namespace.\n\n Existing attributes with name clashes are renamed with prefix.\n Attributes starting with underscore are ignored by default.\n\n Return True on successful import.\n\n \"\"\"\n sys.path.append(os.path.dirname(__file__))\n try:\n module = __import__(module_name)\n except ImportError:\n sys.path.pop()\n if warn:\n warnings.warn(\"failed to import module \" + module_name)\n else:\n sys.path.pop()\n for attr in dir(module):\n if ignore and attr.startswith(ignore):\n continue\n if prefix:\n if attr in globals():\n globals()[prefix + attr] = globals()[attr]\n elif warn:\n warnings.warn(\"no Python implementation of \" + attr)\n globals()[attr] = getattr(module, attr)\n return True\n\n\n__version__ = '2012.01.01'\n__docformat__ = 'restructuredtext en'\n\n_import_module('_transformations')\n\nif __name__ == \"__main__\":\n import doctest\n import random # used in doctests\n numpy.set_printoptions(suppress=True, precision=5)\n doctest.testmod()\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.radians", "numpy.expand_dims", "numpy.sqrt", "numpy.vstack", "numpy.fabs", "numpy.concatenate", "numpy.mean", "numpy.cross", "numpy.negative", "numpy.trace", "numpy.roll", "numpy.linalg.svd", "numpy.allclose", "numpy.linalg.eig", "numpy.sin", "numpy.linalg.det", "numpy.finfo", "numpy.real", "numpy.argmax", "numpy.outer", "numpy.zeros", "numpy.linalg.inv", "numpy.linalg.eigh", "numpy.identity", "numpy.random.rand", "numpy.array", "numpy.sum", "numpy.random.random", "numpy.set_printoptions", "numpy.cos", "numpy.linalg.pinv", "numpy.empty" ] ]
gamedaygeorge/datacube-applications-library
[ "1b6314ee3465f9f17930391a4c241e981a9e200e" ]
[ "DCAL_utils_special/dc_mosaic.py" ]
[ "# Copyright 2016 United States Government as represented by the Administrator\n# of the National Aeronautics and Space Administration. All Rights Reserved.\n#\n# Portion of this code is Copyright Geoscience Australia, Licensed under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport xarray as xr\nfrom collections import OrderedDict\nimport hdmedians as hd\n\nfrom . import dc_utilities as utilities\nfrom .dc_utilities import create_default_clean_mask\n\n\n\"\"\"\nUtility Functions\n\"\"\"\n\ndef convert_to_dtype(data, dtype):\n \"\"\"\n A utility function converting xarray, pandas, or NumPy data to a given dtype.\n\n Parameters\n ----------\n data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,\n or numpy.ndarray\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n \"\"\"\n if dtype is None: # Don't convert the data type.\n return data\n return data.astype(dtype)\n\n\n\"\"\"\nCompositing Functions\n\"\"\"\n\ndef create_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):\n \"\"\"\n Creates a most-recent-to-oldest mosaic of the input dataset.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n intermediate_product: xarray.Dataset\n A 2D dataset used to store intermediate results.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Composited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n\n dataset_in_dtypes, band_list = [None]*2\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n if intermediate_product is not None:\n dataset_out = intermediate_product.copy(deep=True)\n else:\n dataset_out = None\n\n time_slices = range(len(dataset_in.time))\n if 'reverse_time' in kwargs:\n time_slices = reversed(time_slices)\n for timeslice in time_slices:\n dataset_slice = dataset_in.isel(time=timeslice).drop('time')\n clean_mask_slice = clean_mask[timeslice]\n dataset_slice = dataset_slice.where((dataset_slice != no_data) & (clean_mask_slice))\n if dataset_out is None:\n dataset_out = dataset_slice.copy(deep=True)\n utilities.clear_attrs(dataset_out)\n else:\n for key in list(dataset_slice.data_vars):\n data_var_is_no_data = dataset_out[key].values == no_data\n dataset_out[key].values[data_var_is_no_data] = dataset_slice[key].values[data_var_is_no_data]\n\n # Handle datatype conversions.\n dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\ndef create_mean_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):\n \"\"\"\n Method for calculating the mean pixel value for a given dataset.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Compositited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n\n dataset_in_dtypes = None\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n # Mask out clouds and scan lines.\n dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))\n dataset_out = dataset_in.mean(dim='time', skipna=True, keep_attrs=False)\n\n # Handle datatype conversions.\n dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\n\ndef create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):\n \"\"\"\n Method for calculating the median pixel value for a given dataset.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Compositited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n\n dataset_in_dtypes = None\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n # Mask out clouds and Landsat 7 scan lines.\n dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))\n dataset_out = dataset_in.median(dim='time', skipna=True, keep_attrs=False)\n\n # Handle datatype conversions.\n dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\n\ndef create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):\n \"\"\"\n Method for calculating the pixel value for the max ndvi value.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Compositited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n dataset_in = dataset_in.copy(deep=True)\n\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n\n dataset_in_dtypes = None\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n # Mask out clouds and scan lines.\n dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)\n\n if intermediate_product is not None:\n dataset_out = intermediate_product.copy(deep=True)\n else:\n dataset_out = None\n\n time_slices = range(len(dataset_in.time))\n for timeslice in time_slices:\n dataset_slice = dataset_in.isel(time=timeslice).drop('time')\n ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)\n ndvi.values[np.invert(clean_mask)[timeslice, ::]] = -1000000000\n dataset_slice['ndvi'] = ndvi\n if dataset_out is None:\n dataset_out = dataset_slice.copy(deep=True)\n utilities.clear_attrs(dataset_out)\n else:\n for key in list(dataset_slice.data_vars):\n dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = \\\n dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]\n # Handle datatype conversions.\n dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\n\ndef create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):\n \"\"\"\n Method for calculating the pixel value for the min ndvi value.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Compositited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n dataset_in = dataset_in.copy(deep=True)\n\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n\n dataset_in_dtypes = None\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n # Mask out clouds and scan lines.\n dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)\n\n if intermediate_product is not None:\n dataset_out = intermediate_product.copy(deep=True)\n else:\n dataset_out = None\n\n time_slices = range(len(dataset_in.time))\n for timeslice in time_slices:\n dataset_slice = dataset_in.isel(time=timeslice).drop('time')\n ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)\n ndvi.values[np.invert(clean_mask)[timeslice, ::]] = 1000000000\n dataset_slice['ndvi'] = ndvi\n if dataset_out is None:\n dataset_out = dataset_slice.copy(deep=True)\n utilities.clear_attrs(dataset_out)\n else:\n for key in list(dataset_slice.data_vars):\n dataset_out[key].values[dataset_slice.ndvi.values <\n dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values <\n dataset_out.ndvi.values]\n # Handle datatype conversions.\n dataset_out = restore_or_convert_dtypes(dtype, None, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\ndef unpack_bits(land_cover_endcoding, data_array, cover_type):\n \"\"\"\n\tDescription:\n\t\tUnpack bits for end of ls7 and ls8 functions \n\t-----\n\tInput:\n\t\tland_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8\n data_array( xarray DataArray)\n cover_type(String) type of cover\n\tOutput:\n unpacked DataArray\n\t\"\"\"\n boolean_mask = np.isin(data_array.values, land_cover_endcoding[cover_type]) \n return xr.DataArray(boolean_mask.astype(bool),\n coords = data_array.coords,\n dims = data_array.dims,\n name = cover_type + \"_mask\",\n attrs = data_array.attrs)\n\ndef ls8_unpack_qa( data_array , cover_type):\n\n land_cover_endcoding = dict( fill =[1] ,\n clear =[322, 386, 834, 898, 1346],\n water =[324, 388, 836, 900, 1348],\n shadow =[328, 392, 840, 904, 1350],\n snow =[336, 368, 400, 432, 848, 880, 812, 944, 1352],\n cloud =[352, 368, 416, 432, 848, 880, 912, 944, 1352],\n low_conf_cl =[322, 324, 328, 336, 352, 368, 834, 836, 840, 848, 864, 880],\n med_conf_cl =[386, 388, 392, 400, 416, 432, 898, 900, 904, 928, 944],\n high_conf_cl =[480, 992],\n low_conf_cir =[322, 324, 328, 336, 352, 368, 386, 388, 392, 400, 416, 432, 480],\n high_conf_cir=[834, 836, 840, 848, 864, 880, 898, 900, 904, 912, 928, 944],\n terrain_occ =[1346,1348, 1350, 1352]\n )\n return unpack_bits(land_cover_endcoding, data_array, cover_type)\n\n\ndef ls8_oli_unpack_qa(data_array, cover_type):\n \"\"\"\n Returns a boolean `xarray.DataArray` denoting which points in `data_array`\n are of the selected `cover_type` (True indicates presence and\n False indicates absence).\n\n For more information, see this: https://landsat.usgs.gov/collectionqualityband\n The most relevant section for this function is titled\n \"Landsat 8 OLI/ OLI-TIRS Level-1 Possible Attributes,\n Pixel Values, and Pixel Value Interpretations\".\n\n Parameters\n ----------\n data_array: xarray.DataArray\n A DataArray of the QA band.\n cover_type: string\n A string in the set [fill, terrain_occ, clear, rad_sat_1_2,\n rad_sat_3_4, rad_sat_5_pls, cloud, low_conf_cl,\n med_conf_cl, high_conf_cl, high_cl_shdw,\n high_snow_ice, low_conf_cir, high_conf_cir].\n\n 'fill' removes \"no_data\" values, which indicates an absence of data. This value is -9999 for Landsat platforms.\n Generally, don't use 'fill'.\n 'terrain_occ' allows only occluded terrain.\n 'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.\n 'rad_sat_1_2' denotes radiometric saturation in 1 or 2 bands.\n 'rad_sat_3_4' denotes radiometric saturation in 3 or 4 bands.\n 'rad_sat_5_pls' denotes radiometric saturation in 5 or more bands.\n 'cloud' allows only clouds, but note that it often only selects cloud boundaries.\n 'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.\n - 'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.\n - 'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.\n - Note that 'med_conf_cl' and 'cloud' are very similar.\n - 'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.\n 'high_cl_shdw' denotes high confidence in cloud shadow.\n 'high_snow_ice' denotes high confidence in snow or ice.\n 'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.\n\n Returns\n -------\n mask: xarray.DataArray\n The boolean `xarray.DataArray` denoting which points in `data_array`\n are of the selected `cover_type` (True indicates presence and\n False indicates absence). This will have the same dimensions and coordinates as `data_array`.\n \"\"\"\n land_cover_encoding = dict(fill =[1],\n terrain_occ =[2, 2722],\n clear =[2720, 2724, 2728, 2732],\n rad_sat_1_2 =[2724, 2756, 2804, 2980, 3012, 3748, 3780, 6820, 6852, 6900, 7076, 7108, 7844, 7876],\n rad_sat_3_4 =[2728, 2760, 2808, 2984, 3016, 3752, 3784, 6824, 6856, 6904, 7080, 7112, 7848, 7880],\n rad_sat_5_pls=[2732, 2764, 2812, 2988, 3020, 3756, 3788, 6828, 6860, 6908, 7084, 7116, 7852, 7884],\n cloud =[2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908],\n low_conf_cl =[2752, 2722, 2724, 2728, 2732, 2976, 2980, 2984, 2988, 3744, 3748, 3752, 3756, 6816, 6820, 6824, 6828, 7072, 7076, 7080, 7084, 7840, 7844, 7848, 7852],\n med_conf_cl =[2752, 2756, 2760, 2764, 3008, 3012, 3016, 3020, 3776, 3780, 3784, 3788, 6848, 6852, 6856, 6860, 7104, 7108, 7112, 7116, 7872, 7876, 7880, 7884],\n high_conf_cl =[2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908],\n high_cl_shdw=[2976, 2980, 2984, 2988, 3008, 3012, 3016, 3020, 7072, 7076, 7080, 7084, 7104, 7108, 7112, 7116],\n high_snow_ice=[3744, 3748, 3752, 3756, 3776, 3780, 3784, 3788, 7840, 7844, 7848, 7852, 7872, 7876, 7880, 7884],\n low_conf_cir =[2720, 2722, 2724, 2728, 2732, 2752, 2756, 2760, 2764, 2800, 2804, 2808, 2812, 2976, 2980, 2984, 2988, 3008, 3012, 3016, 3020, 3744, 3748, 3752, 3756, 3780, 3784, 3788],\n high_conf_cir=[6816, 6820, 6824, 6828, 6848, 6852, 6856, 6860, 6896, 6900, 6904, 6908, 7072, 7076, 7080, 7084, 7104, 7108, 7112, 7116, 7840, 7844, 7848, 7852, 7872, 7876, 7880, 7884]\n )\n return unpack_bits(land_cover_encoding, data_array, cover_type)\n\ndef ls7_unpack_qa( data_array , cover_type):\n\n land_cover_endcoding = dict( fill = [1],\n clear = [66, 130],\n water = [68, 132],\n shadow = [72, 136],\n snow = [80, 112, 144, 176],\n cloud = [96, 112, 160, 176, 224],\n low_conf = [66, 68, 72, 80, 96, 112],\n med_conf = [130, 132, 136, 144, 160, 176],\n high_conf= [224]\n )\n return unpack_bits(land_cover_endcoding, data_array, cover_type)\n\ndef ls5_unpack_qa( data_array , cover_type):\n\n land_cover_endcoding = dict( fill = [1],\n clear = [66, 130],\n water = [68, 132],\n shadow = [72, 136],\n snow = [80, 112, 144, 176],\n cloud = [96, 112, 160, 176, 224],\n low_conf = [66, 68, 72, 80, 96, 112],\n med_conf = [130, 132, 136, 144, 160, 176],\n high_conf= [224]\n )\n return unpack_bits(land_cover_endcoding, data_array, cover_type)\n\n\ndef create_hdmedians_multiple_band_mosaic(dataset_in,\n clean_mask=None,\n no_data=-9999,\n dtype=None,\n intermediate_product=None,\n operation=\"median\",\n **kwargs):\n \"\"\"\n Calculates the geomedian or geomedoid using a multi-band processing method.\n\n Parameters\n ----------\n dataset_in: xarray.Dataset\n A dataset retrieved from the Data Cube; should contain:\n coordinates: time, latitude, longitude (in that order)\n variables: variables to be mosaicked (e.g. red, green, and blue bands)\n clean_mask: np.ndarray\n An ndarray of the same shape as `dataset_in` - specifying which values to mask out.\n If no clean mask is specified, then all values are kept during compositing.\n no_data: int or float\n The no data value.\n dtype: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n operation: str in ['median', 'medoid']\n\n Returns\n -------\n dataset_out: xarray.Dataset\n Compositited data with the format:\n coordinates: latitude, longitude\n variables: same as dataset_in\n \"\"\"\n # Default to masking nothing.\n if clean_mask is None:\n clean_mask = create_default_clean_mask(dataset_in)\n assert operation in ['median', 'medoid'], \"Only median and medoid operations are supported.\"\n\n band_list = list(dataset_in.data_vars)\n dataset_in_dtypes = None\n if dtype is None:\n # Save dtypes because masking with Dataset.where() converts to float64.\n dataset_in_dtypes = {}\n for band in band_list:\n dataset_in_dtypes[band] = dataset_in[band].dtype\n\n # Mask out clouds and scan lines.\n dataset_in = dataset_in.where((dataset_in != no_data) & clean_mask)\n\n arrays = [dataset_in[band] for band in band_list]\n stacked_data = np.stack(arrays)\n bands_shape, time_slices_shape, lat_shape, lon_shape = stacked_data.shape[0], \\\n stacked_data.shape[1], stacked_data.shape[2], \\\n stacked_data.shape[3]\n # Reshape to remove lat/lon\n reshaped_stack = stacked_data.reshape(bands_shape, time_slices_shape,\n lat_shape * lon_shape)\n # Build zeroes array across time slices.\n hdmedians_result = np.zeros((bands_shape, lat_shape * lon_shape))\n\n # For each pixel (lat/lon combination), find the geomedian or geomedoid across time.\n for x in range(reshaped_stack.shape[2]):\n try:\n hdmedians_result[:, x] = hd.nangeomedian(\n reshaped_stack[:, :, x], axis=1) if operation == \"median\" else hd.nanmedoid(\n reshaped_stack[:, :, x], axis=1)\n except ValueError as e:\n # If all bands have nan values across time, the geomedians are nans.\n hdmedians_result[:, x] = np.full((bands_shape), np.nan)\n output_dict = {\n value: (('latitude', 'longitude'), hdmedians_result[index, :].reshape(lat_shape, lon_shape))\n for index, value in enumerate(band_list)\n }\n dataset_out = xr.Dataset(output_dict,\n coords={'latitude': dataset_in['latitude'],\n 'longitude': dataset_in['longitude']},\n attrs=dataset_in.attrs)\n dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)\n return dataset_out\n\ndef restore_or_convert_dtypes(dtype_for_all=None, band_list=None, dataset_in_dtypes=None, dataset_out=None, no_data=-9999):\n \"\"\"\n Converts datatypes of data variables in a copy of an xarray Dataset.\n\n Parameters\n ----------\n dtype_for_all: str or numpy.dtype\n A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.\n np.int16, np.float32) to convert the data to.\n band_list: list-like\n !! UNUSED, DEPRECATED !!\n dataset_in_dtypes: dict\n A dictionary mapping band names to datatypes.\n One of `dtype_for_all` or `dataset_in_dtypes` must be `None`.\n no_data: int or float\n The no data value.\n\n Returns\n -------\n dataset_out: xarray.Dataset\n The output Dataset.\n \"\"\"\n assert dtype_for_all is None or dataset_in_dtypes is None, \\\n \"One of `dtype_for_all` or `dataset_in_dtypes` must be `None`.\"\n if dtype_for_all is not None:\n # Integer types can't represent nan.\n if np.issubdtype(dtype_for_all, np.integer): # This also works for Python int type.\n utilities.nan_to_num(dataset_out, no_data)\n convert_to_dtype(dataset_out, dtype_for_all)\n else: # Restore dtypes to state before masking.\n for band in dataset_in_dtypes:\n band_dtype = dataset_in_dtypes[band]\n if np.issubdtype(band_dtype, np.integer):\n utilities.nan_to_num(dataset_out[band], no_data)\n dataset_out[band] = dataset_out[band].astype(band_dtype)\n return dataset_out\n" ]
[ [ "numpy.invert", "numpy.issubdtype", "numpy.stack", "numpy.full", "numpy.zeros", "numpy.isin" ] ]
quantshah/qgrad
[ "db13e8463de49a8df1bf59524bc1b0a875405ee9" ]
[ "qgrad/qgrad_qutip.py" ]
[ "\"\"\"\nImplementation of some common quantum mechanics functions that work with JAX\n\"\"\"\nfrom jax.ops import index, index_update\nimport jax.numpy as jnp\nfrom jax.random import PRNGKey, uniform\nimport numpy as np\nfrom scipy.linalg import expm, sqrtm\nfrom numpy.linalg import matrix_power\n\n\ndef fidelity(a, b):\n \"\"\"Computes fidelity between two states (pure or mixed).\n \n .. note::\n ``a`` and ``b`` can either both be kets or both be density matrices,\n or anyone of ``a`` or ``b`` may be a ket or a density matrix. Fidelity has\n private functions to recognize kets and density matrices.\n\n Args:\n a (:obj:`jnp.ndarray`): State vector (ket) or a density matrix. \n b (:obj:`jnp.ndarray`): State vector (ket) or a density matrix. \n \n Returns:\n float: fidelity between the two input states\n \"\"\"\n if isket(a) and isket(b):\n return _fidelity_ket(a, b)\n else:\n if isket(a) == True:\n a = to_dm(a)\n if isket(b) == True:\n b = to_dm(b)\n return _fidelity_dm(a, b)\n\n\ndef _fidelity_ket(a, b):\n \"\"\"Private function that computes fidelity between two kets.\n \n Args:\n a (:obj:`jnp.ndarray`): State vector (ket)\n b (:obj:`jnp.ndarray`): State vector (ket) \n \n Returns:\n float: fidelity between the two state vectors\n \"\"\"\n a, b = jnp.asarray(a), jnp.asarray(b)\n return jnp.abs(jnp.dot(jnp.transpose(jnp.conjugate(a)), b)) ** 2\n\n\ndef _fidelity_dm(a, b):\n \"\"\"Private function that computes fidelity among two mixed states.\n \n Args:\n a (:obj:`jnp.ndarray`): density matrix (density matrix)\n b (:obj:`jnp.ndarray`): density matrix (density matrix)\n \n Returns:\n float: fidelity between the two density matrices \n \"\"\"\n dm1, dm2 = jnp.asarray(a), jnp.asarray(b)\n # Trace distace fidelity\n tr_dist = 0.5 * jnp.trace(jnp.abs(dm1 - dm2))\n # D^2 = 1 - F^2\n return jnp.sqrt(1 - tr_dist ** 2)\n\n\ndef sigmax():\n r\"\"\"Returns a Pauli-X operator.\n\n .. math:: \\sigma_{x} = \\begin{bmatrix} 0 & 1 \\\\ 1 & 0 \\end{bmatrix}. \n \n Returns:\n :obj:`jnp.ndarray`: :math:`\\sigma_{x}` operator\n \"\"\"\n return jnp.array([[0.0, 1.0], [1.0, 0.0]], dtype=jnp.complex64)\n\n\ndef sigmay():\n r\"\"\"Returns a Pauli-Y operator.\n\n .. math:: \\sigma_{y} = \\begin{bmatrix} 0 & -i \\\\ i & 0 \\end{bmatrix}. \n \n Returns:\n :obj:`jnp.ndarray`: :math:`\\sigma_{y}` operator\n\n \"\"\"\n return jnp.array(\n [[0.0 + 0.0j, 0.0 - 1.0j], [0.0 + 1.0j, 0.0 + 0.0j]], dtype=jnp.complex64\n )\n\n\ndef sigmaz():\n r\"\"\"Returns a Pauli-Y operator.\n\n .. math:: \\sigma_{z} = \\begin{bmatrix} 1 & 0 \\\\ 0 & -1 \\end{bmatrix}. \n \n Returns:\n :obj:`jnp.ndarray`: :math:`\\sigma_{z}` operator\n\n \"\"\"\n return jnp.array([[1.0, 0.0], [0.0, -1.0]], dtype=jnp.complex64)\n\n\ndef destroy(N):\n \"\"\"Destruction (lowering or annihilation) operator.\n \n Args:\n N (int): Dimension of Hilbert space.\n\n Returns:\n :obj:`jnp.ndarray`: Matrix representation for an N-dimensional annihilation operator\n\n \"\"\"\n if not isinstance(N, (int, jnp.integer)): # raise error if N not integer\n raise ValueError(\"Hilbert space dimension must be an integer value\")\n data = jnp.sqrt(jnp.arange(1, N, dtype=jnp.float32))\n mat = np.zeros((N, N))\n np.fill_diagonal(\n mat[:, 1:], data\n ) # np.full_diagonal is not implemented in jax.numpy\n return jnp.asarray(mat, dtype=jnp.complex64) # wrap as a jax.numpy array\n\n\n# TODO: apply jax device array data type to everything all at once\n# ind = jnp.arange(1, N, dtype=jnp.float32)\n# ptr = jnp.arange(N + 1, dtype=jnp.float32)\n# ptr = index_update(\n# ptr, index[-1], N - 1\n# ) index_update mutates the jnp array in-place like numpy\n# return (\n# csr_matrix((data, ind, ptr), shape=(N, N))\n# if full is True\n# else csr_matrix((data, ind, ptr), shape=(N, N)).toarray()\n# )\n\n\ndef create(N):\n \"\"\"Creation (raising) operator.\n\n Args:\n N (int): Dimension of Hilbert space \n\n Returns:\n :obj:`jnp.ndarray`: Matrix representation for an N-dimensional creation operator\n\n \"\"\"\n if not isinstance(N, (int, jnp.integer)): # raise error if N not integer\n raise ValueError(\"Hilbert space dimension must be an integer value\")\n data = jnp.sqrt(jnp.arange(1, N, dtype=jnp.float32))\n mat = np.zeros((N, N))\n np.fill_diagonal(mat[1:], data) # np.full_diagonal is not implemented in jax.numpy\n return jnp.asarray(mat, dtype=jnp.complex64) # wrap as a jax.numpy array\n # ind = jnp.arange(0, N - 1, dtype=jnp.float32)\n # ptr = jnp.arange(N + 1, dtype=jnp.float32)\n # ptr = index_update(\n # ptr, index[0], 0\n # ) # index_update mutates the jnp array in-place like numpy\n # return (\n # csr_matrix((data, ind, ptr), shape=(N, N))\n # if full is True\n # else csr_matrix((data, ind, ptr), shape=(N, N)).toarray()\n # )\n # return data\n\n\ndef expect(oper, state):\n \"\"\"Calculates the expectation value of an operator \n with respect to an input state.\n\n .. note::\n\n Input state, represented by the argumuent ``state`` can only be a density matrix or a ket.\n\n Args:\n oper (:obj:`jnp.ndarray`): JAX numpy array representing an operator\n state (:obj:`jnp.ndarray`): JAX numpy array representing a density matrix or a ket \n\n Returns:\n float: Expectation value. ``real`` if the ``oper`` is Hermitian, ``complex`` otherwise \n \"\"\"\n if jnp.asarray(state).shape[1] >= 2:\n return _expect_dm(oper, state)\n\n else:\n return _expect_ket(oper, state)\n\n\ndef _expect_dm(oper, state):\n \"\"\"Private function to calculate the expectation value of \n an operator with respect to a density matrix\n \"\"\"\n # convert to jax.numpy arrays in case user gives raw numpy\n oper, rho = jnp.asarray(oper), jnp.asarray(state)\n # Tr(rho*op)\n return jnp.trace(jnp.dot(rho, oper))\n\n\ndef _expect_ket(oper, state):\n \"\"\"Private function to calculate the expectation value of \n an operator with respect to a ket.\n \"\"\"\n oper, ket = jnp.asarray(oper), jnp.asarray(state)\n return jnp.vdot(jnp.transpose(ket), jnp.dot(oper, ket))\n\n\ndef _kth_diag_indices(a, k):\n rows, cols = jnp.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\n\nclass Displace:\n r\"\"\"Displacement operator for optical phase space.\n \n .. math:: D(\\alpha) = \\exp(\\alpha a^\\dagger -\\alpha^* a)\n\n Args:\n n (int): dimension of the displace operator\n \"\"\"\n\n def __init__(self, n):\n # The off-diagonal of the real-symmetric similar matrix T.\n sym = (2.0 * (jnp.arange(1, n) % 2) - 1) * jnp.sqrt(jnp.arange(1, n))\n # Solve the eigensystem.\n mat = jnp.zeros((n, n), dtype=jnp.complex128)\n\n i, j = _kth_diag_indices(mat, -1)\n mat = index_update(mat, index[i, j], sym)\n\n i, j = _kth_diag_indices(mat, 1)\n mat = index_update(mat, index[i, j], sym)\n\n self.evals, self.evecs = jnp.linalg.eigh(mat)\n self.range = jnp.arange(n)\n self.t_scale = 1j ** (self.range % 2)\n\n def __call__(self, alpha):\n r\"\"\"Callable with ``alpha`` as the displacement parameter\n\n Args:\n alpha (float): Displacement parameter\n\n Returns:\n :obj:`jnp.ndarray`: Matrix representing :math:`n-`dimensional displace operator\n with :math:`\\alpha` displacement\n \n \"\"\"\n # Diagonal of the transformation matrix P, and apply to eigenvectors.\n transform = jnp.where(\n alpha == 0,\n self.t_scale,\n self.t_scale * (alpha / jnp.abs(alpha)) ** -self.range,\n )\n evecs = transform[:, None] * self.evecs\n # Get the exponentiated diagonal.\n diag = jnp.exp(1j * jnp.abs(alpha) * self.evals)\n return jnp.conj(evecs) @ (diag[:, None] * evecs.T)\n\n\n# TODO: Add mathematical description of squeeze in docstrings\n# TODO:gradients of squeezing\ndef squeeze(N, z):\n \"\"\"Single-mode squeezing operator.\n\n Args:\n N (int): Dimension of Hilbert space\n z (float/complex): Squeezing parameter\n\n Returns:\n :obj:`jnp.ndarray`: JAX numpy representation of the squeezing operator\n \n \"\"\"\n op = (1.0 / 2.0) * (\n (jnp.conj(z) * matrix_power(destroy(N), 2)) - (z * matrix_power(create(N), 2))\n )\n return expm(op)\n\n\ndef basis(N, n=0):\n r\"\"\"Generates the vector representation of a Fock state.\n \n Args:\n N (int): Number of Fock states in the Hilbert space\n n (int): Number state (defaults to vacuum state, n = 0)\n\n Returns:\n :obj:`jnp.ndarray`: Number state :math:`|n\\rangle`\n\n \"\"\"\n\n if (not isinstance(N, (int, np.integer))) or N < 0:\n raise ValueError(\"N must be integer N >= 0\")\n\n zeros = jnp.zeros((N, 1), dtype=jnp.complex64) # column of zeros\n return index_update(zeros, index[n, 0], 1.0)\n\n\ndef coherent(N, alpha):\n \"\"\"Generates coherent state with eigenvalue alpha by displacing the vacuum state\n by a displacement parameter alpha.\n\n Args:\n N (int): Dimension of Hilbert space\n alpha (float/complex): Eigenvalue of the coherent state\n\n Returns:\n :obj:`jnp.ndarray`: Coherent state (eigenstate of the lowering operator)\n\n \"\"\"\n x = basis(N, 0) # Vacuum state\n displace = Displace(N)\n return jnp.dot(displace(alpha), x)\n\n\ndef dag(state):\n r\"\"\"Returns conjugate transpose of a given state, represented by :math:`A^{\\dagger}`, where :math:`A` is\n a quantum state represented by a ket, a bra or, more generally, a density matrix.\n\n Args:\n state (:obj:`jnp.ndarray`): State to perform the dagger operation on\n \n Returns:\n :obj:`jnp.ndarray`: Conjugate transposed jax.numpy representation of the input state\n \n \"\"\"\n return jnp.conjugate(jnp.transpose(state))\n\n\ndef isket(state):\n \"\"\"Checks whether a state is a ket based on its shape.\n \n Args:\n state (:obj:`jnp.ndarray`): input state\n\n Returns:\n bool: ``True`` if state is a ket and ``False`` otherwise\n \"\"\"\n return state.shape[1] == 1\n\n\ndef isbra(state):\n \"\"\"Checks whether a state is a bra based on its shape.\n \n Args:\n state (:obj:`jnp.ndarray`): input state\n\n Returns:\n bool: ``True`` if state is a bra and ``False`` otherwise\n \"\"\"\n return state.shape[0] == 1\n\n\ndef isherm(oper):\n \"\"\"Checks whether a given operator is Hermitian.\n\n Args:\n oper (:obj:`jnp.ndarray`): input observable\n \n Returns:\n bool: ``True`` if the operator is Hermitian and \n ``False`` otherwise\n \"\"\"\n return jnp.all(oper == dag(oper))\n\n\ndef isdm(mat):\n \"\"\"Checks whether a given matrix is a valid density matrix.\n\n Args:\n mat (:obj:`jnp.ndarray`): Input matrix\n \n Returns:\n bool: ``True`` if input matrix is a valid density matrix; \n ``False`` otherwise\n \"\"\"\n isdensity = True\n\n if (\n isket(mat) == True\n or isbra(mat) == True\n or isherm(mat) == False\n or jnp.allclose(jnp.real(jnp.trace(mat)), 1, atol=1e-09) == False\n ):\n isdensity = False\n else:\n evals, _ = jnp.linalg.eig(mat)\n for eig in evals:\n if eig < 0 and jnp.allclose(eig, 0, atol=1e-06) == False:\n isdensity = False\n break\n\n return isdensity\n\n\ndef to_dm(state):\n r\"\"\"Converts a ket or a bra into its density matrix representation using \n the outer product :math:`|x\\rangle \\langle x|`.\n \n Args:\n state (:obj:`jnp.ndarray`): input ket or a bra\n\n Returns:\n :obj:`jnp.ndarray`: density matrix representation of a ket or a bra\n \"\"\"\n if isket(state):\n out = jnp.dot(state, dag(state))\n\n elif isbra(state):\n out = jnp.dot(dag(state), state)\n\n else:\n raise TypeError(\n \"Input is neither a ket, nor a bra. First dimension of a bra should be 1. Eg: (1, 4).\\\n Second dimension of a ket should be 1. Eg: (4, 1)\"\n )\n\n return out\n\n\ndef _make_rot(N, params, idx):\n r\"\"\"Returns an :math:`N \\times N` rotation matrix :math:`R_{ij}`,\n where :math:`R_{ij}` is an :math:`N-`dimensional identity matrix\n with the elements :math:`R_{ii}, R_{ij}, R_{ji}` and :math:`R_{jj}`\n replaced as follows:\n\n .. math::\n\n \\begin{pmatrix} R_{ii} & R{ij} \\\\ R_{ji} & R_{jj} \n \\end{pmatrix} = \\begin{pmatrix}\n e^{i\\phi_{ij}}cos(\\theta_{ij}) & \n -e^{i\\phi_{ij}sin(\\theta_{ij})} \\\\\n sin(\\theta_{ij}) & cos(\\theta_{ij})\n \\end{pmatrix}\n\n Args:\n N (int): dimension of the rotation matrix\n params(:obj:`jnp.ndarray`): array of rotation parameters,\n :math:`\\theta_{ij}` and :math:`\\phi_{ij}` of\n shape (2, )\n idx (tuple): indices (i, j) whose 4 permutations (as shown in\n the equation above) are to update the :math:`N \\times N`\n identity to a rotation matrix by substituting `params`\n\n Returns:\n :obj:`jnp.ndarray`: :math:`N \\times N` rotation matrix\n \"\"\"\n i, j = idx\n theta, phi = params\n rotation = jnp.eye(N, dtype=jnp.complex64)\n # updating the four entries\n rotation = index_update(rotation, index[i, i], jnp.exp(1j * phi) * jnp.cos(theta))\n rotation = index_update(rotation, index[i, j], -jnp.exp(1j * phi) * jnp.sin(theta))\n rotation = index_update(rotation, index[j, i], jnp.sin(theta))\n rotation = index_update(rotation, index[j, j], jnp.cos(theta))\n return rotation\n\n\nclass Unitary:\n r\"\"\"Class for an :math:`N \\times N` parametrized unitary \n matrix :math:`U(N)`\n \n Unitary :math:`U(N)` is constructed using the following scheme\n \n .. math::\n U(N) = D\\prod_{i=2}^{N}\\prod_{j=1}^{i-1}R^{'}_{ij}\n \n where :math:`D` is a diagonal matrix, whose elements are \n :math:`e^{i\\omega{j}}` and :math:`R_{ij}^{'}` are rotation \n matrices (available via `_make_rot`) where\n :math:`R_{ij}` is an :math:`N`-dimensional identity matrix\n with the elements :math:`R_{ii}, R_{ij}, R_{ji}` and :math:`R_{jj}`\n replaced as follows:\n\n .. math::\n\n \\begin{pmatrix} R_{ii} & R_{ij} \\\\ R_{ji} & R_{jj} \n \\end{pmatrix} = \\begin{pmatrix}\n e^{i\\phi_{ij}}cos(\\theta_{ij}) & \n -e^{i\\phi_{ij}}sin(\\theta_{ij}) \\\\\n sin(\\theta_{ij}) & cos(\\theta_{ij})\n \\end{pmatrix}\n\n and :math:`R_{ij}^{'} = R(-\\theta_{ij}, -\\phi_{ij})`\n \n Ref: Jing, Li, et al. \"Tunable efficient unitary neural\n networks (eunn) and their application to rnns.\"\n International Conference on Machine Learning. 2017.\n\n Args:\n N (int): Dimension of the unitary matrix\n \"\"\"\n\n def __init__(self, N):\n self.N = N\n\n def __call__(self, thetas, phis, omegas):\n r\"\"\"Returns a parameterized unitary matrix parameerized\n by the given angles `thetas`, `phis`, and `omegas`.\n \n Args: \n thetas (:obj:`jnp.ndarray`): theta angles for rotations\n of shape (`N` * (`N` - 1) / 2, )\n phis (:obj:`jnp.ndarray`): phi angles for rotations\n of shape (`N` * (`N` - 1) / 2, )\n omegas (:obj:`jnp.ndarray`): omegas to paramterize the\n exponents in the diagonal matrix\n \n Returns:\n :obj:`jnp.ndarray`: :math:`N \\times N` parameterized \n unitary matrix\n\n .. note::\n There are a total of :math:`\\frac{N}(N-1)}{2}` \n :math:`\\theta_{ij}` parameters :math:`\\frac{N}(N-1)}{2}` \n :math:`\\phi{ij}` parameters, and :math:`N omega_{ij}`\n parameters. \n \"\"\"\n\n if omegas.shape[0] != self.N:\n raise ValueError(\n \"The dimension of omegas should be the same as the unitary\"\n )\n if phis.shape[0] != thetas.shape[0]:\n raise ValueError(\n \"Number of phi and theta rotation parameters should be the same\"\n )\n if (\n phis.shape[0] != (self.N) * (self.N - 1) / 2\n or thetas.shape[0] != (self.N) * (self.N - 1) / 2\n ):\n raise ValueError(\n \"\"\"Size of each of the rotation parameters \\\n should be N * (N - 1) / 2, where N is the size \\\n of the unitary matrix\"\"\"\n )\n diagonal = jnp.zeros((self.N, self.N), dtype=jnp.complex64)\n for i in range(self.N):\n diagonal = index_update(diagonal, index[i, i], jnp.exp(1j * omegas[i]))\n # negative angles for matrix inversion\n params = [[-i, -j] for i, j in zip(thetas, phis)]\n rotation = jnp.eye(self.N, dtype=jnp.complex64)\n param_idx = 0 # keep track of parameter indices to feed rotation\n for i in range(2, self.N + 1):\n for j in range(1, i):\n rotation = jnp.dot(\n rotation, _make_rot(self.N, params[param_idx], (i - 1, j - 1))\n )\n # (i-1, j-1) to match numpy matrix indexing\n param_idx += 1\n return jnp.dot(diagonal, rotation)\n\n\ndef rand_ket(N, seed=None):\n r\"\"\"Returns a random :math:`N`-dimensional\n ket.\n\n Args:\n N (int): Dimension of random ket\n \n Reurns:\n :obj:`jnp.ndarray`: random \n :math:`N \\times 1` dimensional \n vector (ket)\n \"\"\"\n if seed == None:\n seed = np.random.randint(1000)\n ket = uniform(PRNGKey(seed), (N, 1)) + 1j * uniform(PRNGKey(seed), (N, 1))\n return ket / jnp.linalg.norm(ket)\n\n\ndef rand_dm(N, seed=None):\n r\"\"\"Returns a random :math:`N \\times N`-dimensional\n density matrix.\n\n Args:\n N (int): Dimension of random density matrix\n \n Reurns:\n :obj:`jnp.ndarray`: random \n :math:`N \\times N` dimensional \n matrix (density matrix).\n \"\"\"\n if seed == None:\n seed = np.random.randint(1000)\n key = PRNGKey(seed)\n return to_dm(rand_ket(N, seed))\n\n\ndef rand_unitary(N, seed=None):\n r\"\"\"Returns an :math:`N \\times N` randomly parametrized unitary\n \n Args:\n N (int): Size of the Hilbert space\n \n Returns:\n :obj:`jnp.ndarray`: :math:`N \\times N` parameterized random \n unitary matrix\n\n .. note::\n JAX provides Psuedo-Random Number Generator Keys (PRNG Keys) that \n aim to ensure reproducibility. `seed` integer here is fed as \n input to a PRNGKey that returns of array of shape (2,)\n for every different input integer seed. PRNGKey for the same input \n integer shall sample the same values from any distribution.\n \n \"\"\"\n if seed == None:\n seed = np.random.randint(1000)\n params = uniform(PRNGKey(seed), (N ** 2,), minval=0.0, maxval=2 * jnp.pi)\n\n rand_thetas = params[: N * (N - 1) // 2]\n rand_phis = params[N * (N - 1) // 2 : N * (N - 1)]\n rand_omegas = params[N * (N - 1) :]\n\n return Unitary(N)(rand_thetas, rand_phis, rand_omegas)\n" ]
[ [ "numpy.random.randint", "numpy.zeros", "numpy.fill_diagonal", "scipy.linalg.expm" ] ]
lorinczb/pytorch-dc-tts
[ "9dae50678113e2f60ad0752b99b959bb0b11dfc9" ]
[ "pretrained_voxceleb_model/DatasetLoader.py" ]
[ "#! /usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport torch\nimport torchaudio\nimport numpy\nimport random\nimport pdb\nimport os\nimport threading\nimport time\nimport math\nfrom scipy.io import wavfile\nfrom queue import Queue\n\n# torchfb = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=2048, win_length=800, hop_length=200,\n# window_fn=torch.hamming_window, n_mels=80)\n\n\ndef round_down(num, divisor):\n return num - (num%divisor)\n\n\ndef get_spectrogram_dctts(fpath):\n import librosa\n import numpy as np\n y, sr = librosa.load(fpath, sr=16000)\n\n # Trimming\n y, _ = librosa.effects.trim(y)\n\n # Preemphasis\n y = np.append(y[0], y[1:] - 0.97 * y[:-1])\n\n # stft\n linear = librosa.stft(y=y,\n n_fft=2048,\n hop_length=200,\n win_length=800)\n\n # magnitude spectrogram\n mag = np.abs(linear) # (1+n_fft//2, T)\n\n # mel spectrogram\n mel_basis = librosa.filters.mel(16000, 2048, 80) # (n_mels, 1+n_fft//2)\n mel = np.dot(mel_basis, mag) # (n_mels, t)\n\n # to decibel\n mel = 20 * np.log10(np.maximum(1e-5, mel))\n mag = 20 * np.log10(np.maximum(1e-5, mag))\n\n max_db = 100\n ref_db = 20\n # normalize\n mel = np.clip((mel - ref_db + max_db) / max_db, 1e-8, 1)\n mag = np.clip((mag - ref_db + max_db) / max_db, 1e-8, 1)\n\n # Transpose\n mel = mel.T.astype(np.float32) # (T, n_mels)\n mag = mag.T.astype(np.float32) # (T, 1+n_fft//2)\n mel = mel[::4, :]\n return torch.Tensor(mel)\n\n\ndef loadWAV(filename, max_frames, evalmode=True, num_eval=10, n_mels=80):\n\n # # Maximum audio length\n # max_audio = max_frames * 160 + 240\n # print(\"max_frames: \", max_frames)\n # # Read wav file and convert to torch tensor\n # sample_rate, audio = wavfile.read(filename)\n #\n # audiosize = audio.shape[0]\n #\n # if audiosize <= max_audio:\n # shortage = math.floor( ( max_audio - audiosize + 1 ) / 2 )\n # audio = numpy.pad(audio, (shortage, shortage), 'constant', constant_values=0)\n # audiosize = audio.shape[0]\n #\n # if evalmode:\n # startframe = numpy.linspace(0,audiosize-max_audio,num=num_eval)\n # else:\n # startframe = numpy.array([numpy.int64(random.random()*(audiosize-max_audio))])\n #\n # feats = []\n # if evalmode and max_frames == 0:\n # feats.append(audio)\n # else:\n # for asf in startframe:\n # feats.append(audio[int(asf):int(asf)+max_audio])\n #\n # feat = numpy.stack(feats,axis=0)\n #\n # feat = torch.FloatTensor(feat)\n\n mel_input = get_spectrogram_dctts(filename) + 1e-6\n mel_input = mel_input.permute(1, 0)\n mel_input = mel_input.unsqueeze(0)\n\n return mel_input\n\n\nclass DatasetLoader(object):\n def __init__(self, dataset_file_name, batch_size, max_frames, max_seg_per_spk, nDataLoaderThread, nPerSpeaker, train_path, maxQueueSize = 10, **kwargs):\n self.dataset_file_name = dataset_file_name;\n self.nWorkers = nDataLoaderThread;\n self.max_frames = max_frames;\n self.max_seg_per_spk = max_seg_per_spk;\n self.batch_size = batch_size;\n self.maxQueueSize = maxQueueSize;\n\n self.data_dict = {};\n self.data_list = [];\n self.nFiles = 0;\n self.nPerSpeaker = nPerSpeaker; ## number of clips per sample (e.g. 1 for softmax, 2 for triplet or pm)\n\n self.dataLoaders = [];\n \n ### Read Training Files...\n with open(dataset_file_name) as dataset_file:\n while True:\n line = dataset_file.readline();\n if not line:\n break;\n \n data = line.split();\n speaker_name = data[0];\n filename = os.path.join(train_path,data[1]);\n\n if not (speaker_name in self.data_dict):\n self.data_dict[speaker_name] = [];\n\n self.data_dict[speaker_name].append(filename);\n\n ### Initialize Workers...\n self.datasetQueue = Queue(self.maxQueueSize);\n\n def dataLoaderThread(self, nThreadIndex):\n \n index = nThreadIndex*self.batch_size;\n\n if(index >= self.nFiles):\n return;\n\n while(True):\n if(self.datasetQueue.full() == True):\n time.sleep(1.0);\n continue;\n\n in_data = [];\n for ii in range(0,self.nPerSpeaker):\n feat = []\n for ij in range(index,index+self.batch_size):\n feat.append(loadWAV(self.data_list[ij][ii], self.max_frames, evalmode=False));\n in_data.append(torch.cat(feat, dim=0));\n\n in_label = numpy.asarray(self.data_label[index:index+self.batch_size]);\n \n self.datasetQueue.put([in_data, in_label]);\n\n index += self.batch_size*self.nWorkers;\n\n if(index+self.batch_size > self.nFiles):\n break;\n\n def __iter__(self):\n\n dictkeys = list(self.data_dict.keys());\n dictkeys.sort()\n\n lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]\n\n flattened_list = []\n flattened_label = []\n\n ## Data for each class\n for findex, key in enumerate(dictkeys):\n data = self.data_dict[key]\n numSeg = round_down(min(len(data),self.max_seg_per_spk),self.nPerSpeaker)\n \n rp = lol(numpy.random.permutation(len(data))[:numSeg],self.nPerSpeaker)\n flattened_label.extend([findex] * (len(rp)))\n for indices in rp:\n flattened_list.append([data[i] for i in indices])\n\n ## Data in random order\n mixid = numpy.random.permutation(len(flattened_label))\n mixlabel = []\n mixmap = []\n\n ## Prevent two pairs of the same speaker in the same batch\n for ii in mixid:\n startbatch = len(mixlabel) - len(mixlabel) % self.batch_size\n if flattened_label[ii] not in mixlabel[startbatch:]:\n mixlabel.append(flattened_label[ii])\n mixmap.append(ii)\n\n self.data_list = [flattened_list[i] for i in mixmap]\n self.data_label = [flattened_label[i] for i in mixmap]\n \n ## Iteration size\n self.nFiles = len(self.data_label);\n\n ### Make and Execute Threads...\n for index in range(0, self.nWorkers):\n self.dataLoaders.append(threading.Thread(target = self.dataLoaderThread, args = [index]));\n self.dataLoaders[-1].start();\n\n return self;\n\n\n def __next__(self):\n\n while(True):\n isFinished = True;\n \n if(self.datasetQueue.empty() == False):\n return self.datasetQueue.get();\n for index in range(0, self.nWorkers):\n if(self.dataLoaders[index].is_alive() == True):\n isFinished = False;\n break;\n\n if(isFinished == False):\n time.sleep(1.0);\n continue;\n\n\n for index in range(0, self.nWorkers):\n self.dataLoaders[index].join();\n\n self.dataLoaders = [];\n raise StopIteration;\n\n\n def __call__(self):\n pass;\n\n def getDatasetName(self):\n return self.dataset_file_name;\n\n def qsize(self):\n return self.datasetQueue.qsize();" ]
[ [ "numpy.dot", "numpy.maximum", "numpy.abs", "torch.Tensor", "numpy.clip", "numpy.asarray", "torch.cat", "numpy.append" ] ]
fengxiaoshuai/CNN_model_optimizer
[ "4c48420989ffe31a4075d36a5133fee0d999466a" ]
[ "distillation/build_student.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\n\nwith tf.variable_scope(\"student\"):\n input_label = tf.placeholder(dtype=tf.float32, shape=[10, 10], name=\"label\")\n input_image = tf.placeholder(dtype=tf.float32, shape=[10, 224, 224, 3], name=\"input\")\n conv1 = tf.layers.conv2d(inputs=input_image, filters=64, kernel_size=[3, 3], padding='same')\n conv2 = tf.layers.conv2d(conv1, filters=64, kernel_size=[3, 3], padding='same')\n conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=[3, 3], padding='same')\n shape = int(np.prod(conv3.get_shape()[1:]))\n flat = tf.reshape(conv3, [-1, shape])\n fc1 = tf.layers.dense(flat, units=100)\n fc2 = tf.layers.dense(fc1, units=10, name=\"logit\")\n probability = tf.nn.softmax(fc2)\n loss = tf.losses.softmax_cross_entropy(input_label, fc2)\n print(input_label)\n\nimage = np.ones(shape=[10, 224, 224, 3])\n\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n saver = tf.train.Saver()\n saver.save(sess, \"./student/student\")\n print(sess.run(probability, feed_dict={input_image: image}))\n" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.nn.softmax", "tensorflow.reshape", "tensorflow.placeholder", "tensorflow.layers.dense", "numpy.ones", "tensorflow.losses.softmax_cross_entropy", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.variable_scope", "tensorflow.train.Saver" ] ]
cccaaannn/background_subtractor
[ "2832faa1a049e99a1125ee3d6802f13d8c8a387c" ]
[ "main.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport random as rnd\n\n\n\ndef path_creator(path, img_count,staring_string,extension):\n img_count_str = str(img_count)\n\n while True:\n if(len(img_count_str)<6):\n img_count_str = \"0\" + img_count_str\n else:\n break\n img_path = \"{0}{1}{2}{3}\".format(path,staring_string,img_count_str, extension)\n img_name = \"{0}{1}{2}\".format(staring_string,img_count_str, extension)\n return img_path, img_name\n\ndef get_images(*,img_path, staring_image, img_count, random=False):\n images = []\n for i in range(img_count):\n if(random):\n image_count = rnd.randint(1,1000)\n else:\n image_count = i + staring_image\n\n path, _ = path_creator(img_path, image_count, staring_string = \"in\",extension = \".jpg\")\n img = cv2.imread(path)\n images.append(img)\n # show_images(images)\n return images\n\ndef save_image(image_path, image):\n cv2.imwrite(image_path, image)\n\ndef median_method(frames):\n\n medianFrame = np.zeros(frames[0].shape)\n print(frames[0].shape)\n # satır sutun 240 320 3\n\n R = []\n G = []\n B = []\n for row_count in range(frames[0].shape[0]):\n for colum_count in range(frames[0].shape[1]):\n for frame in frames:\n R.append(frame[row_count][colum_count][0])\n G.append(frame[row_count][colum_count][1])\n B.append(frame[row_count][colum_count][2])\n\n medianFrame[row_count][colum_count][0] = np.median(R)\n medianFrame[row_count][colum_count][1] = np.median(G)\n medianFrame[row_count][colum_count][2] = np.median(B)\n R = []\n G = []\n B = []\n medianFrame = medianFrame.astype(dtype=np.uint8) \n return medianFrame\n\ndef evaluate(img1, img2):\n mean_error = abs((img1.astype(\"float\") - img2.astype(\"float\"))).mean()\n sum_error = abs(np.sum(img1.astype(\"float\") - img2.astype(\"float\")))/255\n return sum_error, mean_error\n\ndef create_log_file(logs, file_name):\n with open(file_name + \".log\", \"w\") as file:\n for log in logs:\n file.write(log)\n file.write(\"\\n\")\n\n\n\n\n# my implementation (slower than opencv)\ndef abs_diff(img1,img2):\n diffrance = np.zeros(img1.shape)\n for row_count in range(img1.shape[0]):\n for colum_count in range(img1.shape[1]):\n for color in range(3):\n diffrance[row_count][colum_count][color] = abs(int(img1[row_count][colum_count][color]) - int(img2[row_count][colum_count][color]))\n diffrance = diffrance.astype(dtype=np.uint8) \n return diffrance\n\ndef abs_diff_gray(img1,img2):\n diffrance = np.zeros(img1.shape)\n for row_count in range(img1.shape[0]):\n for colum_count in range(img1.shape[1]):\n diffrance[row_count][colum_count] = abs(int(img1[row_count][colum_count]) - int(img2[row_count][colum_count]))\n diffrance = diffrance.astype(dtype=np.uint8) \n return diffrance\n\n\n# dataset\n# http://www.changedetection.net Dataset>2012>Baseline\n\ntest_img_root_path = \"C:\\\\Users\\\\can\\\\ProjectDependencies\\\\datasets\\\\computer_vision\\\\highway\\\\input\\\\\"\nvalidation_img_root_path = \"C:\\\\Users\\\\can\\\\ProjectDependencies\\\\datasets\\\\computer_vision\\\\highway\\\\groundtruth\\\\\"\nsave_path = \"C:\\\\Users\\\\can\\\\Desktop\\\\highway\"\n\nlogfile_name = \"highway\"\n\n\n# get images\nimages = get_images(img_path = test_img_root_path, staring_image=1, img_count=25, random=True)\n\n\n# medianFrame = median_method(images)\nmedianFrame = np.median(images, axis=0).astype(dtype=np.uint8) \nmedianFrame = cv2.cvtColor(medianFrame, cv2.COLOR_BGR2GRAY)\n\nresults = []\nimg_count = 0\nwhile(True):\n\n # get paths\n img_count += 1\n test_img_path, test_img_name = path_creator(test_img_root_path, img_count, staring_string = \"in\",extension = \".jpg\")\n validation_img_path, _ = path_creator(validation_img_root_path, img_count, staring_string = \"gt\",extension = \".png\")\n\n # read images\n test_img = cv2.imread(test_img_path, cv2.IMREAD_GRAYSCALE)\n val_img = cv2.imread(validation_img_path, cv2.IMREAD_GRAYSCALE)\n\n if(test_img is None):\n break\n\n\n # find difference and treshold\n # difference = abs_diff_gray(medianFrame, frame)\n difference = cv2.absdiff(medianFrame, test_img)\n _, thresholded_img = cv2.threshold(difference, 35, 255, cv2.THRESH_BINARY)\n\n # save_image(os.path.join(save_path, test_img_name), thresholded_img)\n\n # evaluate\n sum_error, mean_error = evaluate(thresholded_img,val_img)\n\n # show results\n result = \"img name:{0} -> sum_error:{1:.0f} mean_error:{2:.3f} \".format(test_img_name, sum_error, mean_error)\n print(result)\n results.append(result)\n\n\n # show images\n cv2.imshow(\"median img\", medianFrame)\n cv2.imshow(\"test img\", test_img)\n cv2.imshow(\"validation img\", val_img)\n cv2.imshow(\"difference\", thresholded_img)\n\n cv2.waitKey(30)\n \n\n# save results\ncreate_log_file(results, logfile_name)\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.median", "numpy.zeros" ] ]
yoyoFC/Energy_DB_Project
[ "fd6f2bc82e5a8ac031458f9e2e57578e62fafca7" ]
[ ".ipynb_checkpoints/ES_Customer_Class-checkpoint.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport psycopg2 as db_connect\nimport altair as alt\n\n\nhost_name = \"dataviz.cgq2ewzuuqs1.us-east-2.rds.amazonaws.com\"\ndb_user = \"postgres\"\ndb_password = \"ElPeruano_2021\"\ndb_name = \"postgres\"\ndb_port = 5432\nconnection = db_connect.connect(host=host_name,user=db_user,password=db_password,database=db_name,port=db_port)\n\n\n\nquery = 'SELECT * FROM public.' + '\"EO_Customer_Class\" '\n\n#query = ('Select TO_CHAR( ' + \n# 'TO_DATE (Extract(Month from public.\"EO_Residential_Avg_Bill\".\"Date\")::text, '+ \" #'MM' \" +'),' + \" 'Mon' \" + \n# ') AS \"month_name\", ' + \n# 'Extract(Year from public.\"EO_Residential_Avg_Bill\".\"Date\") as year_Num,' + \n# '\"Average_kWH\", \"Fuel_Charge_cents_kWH\", \"Average_Bill\" ' +\n# ' from public.\"EO_Residential_Avg_Bill\"' + \n# ' ORDER BY \"month_name\",\"year_num\" ' )\n\nst.write(query)\ncursor = connection.cursor()\ncursor.execute(query)\ndata = cursor.fetchall()\ndf_raw = pd.DataFrame(data)\n\nconnection.close()\n\ndf_raw.rename(columns={0: 'Year',1:'Residential',2:'Commerical',3:'Industrial',4:'Other', 5:'Total'}, inplace=True)\n\nst.title(\"Customer Class data\")\n\nst.table(df_raw)\n\n#convert_dict = {'Residential':int,\n# 'Commerical': int,\n# 'Industrial': int,\n# 'Other': int,\n# 'Total':int}\n\n#df_raw=df_raw.astype(convert_dict)\n\n\n\ndata_ = {'Year': [2007, 2008, 2009, 2010, 2011,2012,2013,2014,2015,2016,2017,2018,2019],\n 'Residential':[345197,352574,363217,368700, 372329,376614,383257, 391410,401556,411366, 421752, 433411,443792],\n 'Commerical': [41825,\t42585,\t43049,\t43489,\t43815,\t44006,\t44847,\t45436,\t46253,\t47352,\t48285,\t48966,\t49587],\n 'Industrial':[75,\t78,\t81,\t80,\t81,\t82,\t138, 151,\t127,\t110,\t104\t,112,\t114],\n 'Other': [1523,\t1553,\t1579,\t1601,\t1640,\t1668,\t2340,\t2406,\t2507,\t2515,\t2560,\t2715,\t2765]\n\n\n }\n\n\n\n\ndf=pd.DataFrame(data_).set_index('Year')\n\nst.bar_chart(df)\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
thorben-frank/netket
[ "33e7a2c2ae5cf7b2a3d9b34b34ecbfb31b5865af" ]
[ "netket/operator/_abstract_operator.py" ]
[ "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom typing import Tuple\n\nimport numpy as np\nimport jax.numpy as jnp\nfrom netket.utils.types import DType\n\nfrom scipy.sparse import csr_matrix as _csr_matrix\nfrom numba import jit\n\nfrom netket.hilbert import AbstractHilbert\n\n\n@jit(nopython=True)\ndef compute_row_indices(rows, sections):\n ntot = sections[-1]\n res = np.empty(ntot, dtype=np.intp)\n\n for i in range(1, sections.size):\n res[sections[i - 1] : sections[i]] = rows[i - 1]\n\n return res\n\n\nclass AbstractOperator(abc.ABC):\n \"\"\"Abstract class for quantum Operators. This class prototypes the methods\n needed by a class satisfying the Operator concept. Users interested in\n implementing new quantum Operators should derive they own class from this\n class\n \"\"\"\n\n _hilbert: AbstractHilbert\n r\"\"\"The hilbert space associated to this operator.\"\"\"\n\n def __init__(self, hilbert: AbstractHilbert):\n self._hilbert = hilbert\n\n @property\n def hilbert(self) -> AbstractHilbert:\n r\"\"\"The hilbert space associated to this operator.\"\"\"\n return self._hilbert\n\n @property\n def size(self) -> int:\n r\"\"\"The total number number of local degrees of freedom.\"\"\"\n return self._hilbert.size\n\n @property\n def is_hermitian(self) -> bool:\n \"\"\"Returns true if this operator is hermitian.\"\"\"\n return False\n\n @property\n def H(self) -> \"AbstractOperator\":\n \"\"\"Returns the Conjugate-Transposed operator\"\"\"\n if self.is_hermitian:\n return self\n\n from ._lazy import Adjoint\n\n return Adjoint(self)\n\n @property\n def T(self) -> \"AbstractOperator\":\n \"\"\"Returns the transposed operator\"\"\"\n return self.transpose()\n\n @property\n @abc.abstractmethod\n def dtype(self) -> DType:\n \"\"\"The dtype of the operator's matrix elements ⟨σ|Ô|σ'⟩.\"\"\"\n raise NotImplementedError\n\n def collect(self) -> \"AbstractOperator\":\n \"\"\"\n Returns a guranteed concrete instancce of an operator.\n\n As some operations on operators return lazy wrapperes (such as transpose,\n hermitian conjugate...), this is used to obtain a guaranteed non-lazy\n operator.\n \"\"\"\n return self\n\n def transpose(self, *, concrete=False) -> \"AbstractOperator\":\n \"\"\"Returns the transpose of this operator.\n\n Args:\n concrete: if True returns a concrete operator and not a lazy wrapper\n\n Returns:\n if concrete is not True, self or a lazy wrapper; the\n transposed operator otherwise\n \"\"\"\n if not concrete:\n from ._lazy import Transpose\n\n return Transpose(self)\n else:\n raise NotImplementedError\n\n def conjugate(self, *, concrete=False) -> \"AbstractOperator\":\n \"\"\"Returns the complex-conjugate of this operator.\n\n Args:\n concrete: if True returns a concrete operator and not a lazy wrapper\n\n Returns:\n if concrete is not True, self or a lazy wrapper; the\n complex-conjugated operator otherwise\n \"\"\"\n raise NotImplementedError\n\n @property\n def max_conn_size(self) -> int:\n \"\"\"The maximum number of non zero ⟨x|O|x'⟩ for every x.\"\"\"\n raise NotImplementedError\n\n def get_conn_padded(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n r\"\"\"Finds the connected elements of the Operator.\n Starting from a batch of quantum numbers x={x_1, ... x_n} of size B x M\n where B size of the batch and M size of the hilbert space, finds all states\n y_i^1, ..., y_i^K connected to every x_i.\n Returns a matrix of size B x Kmax x M where Kmax is the maximum number of\n connections for every y_i.\n\n Args:\n x : A N-tensor of shape (...,hilbert.size) containing\n the batch/batches of quantum numbers x.\n\n Returns:\n x_primes: The connected states x', in a N+1-tensor.\n mels: A N-tensor containing the matrix elements :math:`O(x,x')`\n associated to each x' for every batch.\n \"\"\"\n n_visible = x.shape[-1]\n n_samples = x.size // n_visible\n\n sections = np.empty(n_samples, dtype=np.int32)\n x_primes, mels = self.get_conn_flattened(\n x.reshape(-1, x.shape[-1]), sections, pad=True\n )\n\n n_primes = sections[0]\n\n x_primes_r = x_primes.reshape(*x.shape[:-1], n_primes, n_visible)\n mels_r = mels.reshape(*x.shape[:-1], n_primes)\n\n return x_primes_r, mels_r\n\n @abc.abstractmethod\n def get_conn_flattened(\n self, x: np.ndarray, sections: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n r\"\"\"Finds the connected elements of the Operator. Starting\n from a given quantum number x, it finds all other quantum numbers x' such\n that the matrix element :math:`O(x,x')` is different from zero. In general there\n will be several different connected states x' satisfying this\n condition, and they are denoted here :math:`x'(k)`, for :math:`k=0,1...N_{\\mathrm{connected}}`.\n\n This is a batched version, where x is a matrix of shape (batch_size,hilbert.size).\n\n Args:\n x (matrix): A matrix of shape (batch_size,hilbert.size) containing\n the batch of quantum numbers x.\n sections (array): An array of sections for the flattened x'.\n See numpy.split for the meaning of sections.\n\n Returns:\n matrix: The connected states x', flattened together in a single matrix.\n array: An array containing the matrix elements :math:`O(x,x')` associated to each x'.\n\n \"\"\"\n raise NotImplementedError()\n\n def get_conn(self, x):\n r\"\"\"Finds the connected elements of the Operator. Starting\n from a given quantum number x, it finds all other quantum numbers x' such\n that the matrix element :math:`O(x,x')` is different from zero. In general there\n will be several different connected states x' satisfying this\n condition, and they are denoted here :math:`x'(k)`, for :math:`k=0,1...N_{\\mathrm{connected}}`.\n\n Args:\n x (array): An array of shape (hilbert.size) containing the quantum numbers x.\n\n Returns:\n matrix: The connected states x' of shape (N_connected,hilbert.size)\n array: An array containing the matrix elements :math:`O(x,x')` associated to each x'.\n\n \"\"\"\n\n return self.get_conn_flattened(\n x.reshape((1, -1)),\n np.ones(1),\n )\n\n def n_conn(self, x, out=None) -> np.ndarray:\n r\"\"\"Return the number of states connected to x.\n\n Args:\n x (matrix): A matrix of shape (batch_size,hilbert.size) containing\n the batch of quantum numbers x.\n out (array): If None an output array is allocated.\n\n Returns:\n array: The number of connected states x' for each x[i].\n\n \"\"\"\n if out is None:\n out = np.empty(x.shape[0], dtype=np.intc)\n self.get_conn_flattened(x, out)\n out = self._n_conn_from_sections(out)\n\n return out\n\n @staticmethod\n @jit(nopython=True)\n def _n_conn_from_sections(out):\n low = 0\n for i in range(out.shape[0]):\n old_out = out[i]\n out[i] = out[i] - low\n low = old_out\n\n return out\n\n def to_sparse(self) -> _csr_matrix:\n r\"\"\"Returns the sparse matrix representation of the operator. Note that,\n in general, the size of the matrix is exponential in the number of quantum\n numbers, and this operation should thus only be performed for\n low-dimensional Hilbert spaces or sufficiently sparse operators.\n\n This method requires an indexable Hilbert space.\n\n Returns:\n The sparse matrix representation of the operator.\n \"\"\"\n concrete_op = self.collect()\n hilb = self.hilbert\n\n x = hilb.all_states()\n\n sections = np.empty(x.shape[0], dtype=np.int32)\n x_prime, mels = concrete_op.get_conn_flattened(x, sections)\n\n numbers = hilb.states_to_numbers(x_prime)\n\n sections1 = np.empty(sections.size + 1, dtype=np.int32)\n sections1[1:] = sections\n sections1[0] = 0\n\n ## eliminate duplicates from numbers\n # rows_indices = compute_row_indices(hilb.states_to_numbers(x), sections1)\n\n return _csr_matrix(\n (mels, numbers, sections1),\n shape=(self.hilbert.n_states, self.hilbert.n_states),\n )\n\n # return _csr_matrix(\n # (mels, (rows_indices, numbers)),\n # shape=(self.hilbert.n_states, self.hilbert.n_states),\n # )\n\n def to_dense(self) -> np.ndarray:\n r\"\"\"Returns the dense matrix representation of the operator. Note that,\n in general, the size of the matrix is exponential in the number of quantum\n numbers, and this operation should thus only be performed for\n low-dimensional Hilbert spaces or sufficiently sparse operators.\n\n This method requires an indexable Hilbert space.\n\n Returns:\n The dense matrix representation of the operator as a Numpy array.\n \"\"\"\n return self.to_sparse().todense().A\n\n def to_qobj(self) -> \"qutip.Qobj\": # noqa: F821\n r\"\"\"Convert the operator to a qutip's Qobj.\n\n Returns:\n A `qutip.Qobj` object.\n \"\"\"\n from qutip import Qobj\n\n return Qobj(\n self.to_sparse(), dims=[list(self.hilbert.shape), list(self.hilbert.shape)]\n )\n\n def apply(self, v: np.ndarray) -> np.ndarray:\n op = self.to_linear_operator()\n return op.dot(v)\n\n def __call__(self, v: np.ndarray) -> np.ndarray:\n return self.apply(v)\n\n def conj(self, *, concrete=False) -> \"AbstractOperator\":\n return self.conjugate(concrete=False)\n\n def to_linear_operator(self):\n return self.to_sparse()\n\n def _get_conn_flattened_closure(self):\n raise NotImplementedError(\n \"\"\"\n _get_conn_flattened_closure not implemented for this operator type.\n You were probably trying to use an operator with a sampler.\n Please report this bug.\n\n numba4jax won't work.\n \"\"\"\n )\n\n def __repr__(self):\n return f\"{type(self).__name__}(hilbert={self.hilbert})\"\n\n def __matmul__(self, other):\n if isinstance(other, np.ndarray) or isinstance(other, jnp.ndarray):\n return self.apply(other)\n elif isinstance(other, AbstractOperator):\n if self == other and self.is_hermitian:\n from ._lazy import Squared\n\n return Squared(self)\n else:\n return self._op__matmul__(other)\n else:\n return NotImplemented\n\n def _op__matmul__(self, other):\n \"Implementation on subclasses of __matmul__\"\n return NotImplemented\n\n def __rmatmul__(self, other):\n if isinstance(other, np.ndarray) or isinstance(other, jnp.ndarray):\n # return self.apply(other)\n return NotImplemented\n elif isinstance(other, AbstractOperator):\n if self == other and self.is_hermitian:\n from ._lazy import Squared\n\n return Squared(self)\n else:\n return self._op__rmatmul__(other)\n else:\n return NotImplemented\n\n def _op__rmatmul__(self, other):\n \"Implementation on subclasses of __matmul__\"\n return NotImplemented\n" ]
[ [ "scipy.sparse.csr_matrix", "numpy.empty", "numpy.ones" ] ]
ninikolov/low_resource_summarization
[ "3655e1d0538f082942649cdbaa9fee3efc9c4c0c" ]
[ "summarization_systems/oracle.py" ]
[ "\"\"\"Extractive oracle\"\"\"\n\n\n\nimport argparse\nimport itertools\nimport logging\nimport nltk\nimport numpy as np\nimport sys\nfrom multiprocessing import Process, Manager, cpu_count\nfrom tqdm import *\nfrom nltk.tokenize import ToktokTokenizer\ntoktok = ToktokTokenizer().tokenize\n\n\ndef set_overlap(source_set, target_set):\n \"\"\"Compute the overlap score between a source and a target set.\n It is the intersection of the two sets, divided by the length of the target set.\"\"\"\n word_overlap = target_set.intersection(source_set)\n overlap = len(word_overlap) / float(len(target_set))\n assert 0. <= overlap <= 1.\n return overlap\n\n\ndef jaccard_similarity(source, target):\n \"\"\"Compute the jaccard similarity between two texts.\"\"\"\n if len(source) == 0 or len(target) == 0:\n return 0.\n source_set = set(source)\n target_set = set(target)\n try:\n return set_overlap(source_set, target_set.union(source_set))\n except ZeroDivisionError as e:\n logging.error(e)\n return 0.\n\n\ndef copy_rate(source, target, tokenize=False):\n \"\"\"\n Compute copy rate\n\n :param source:\n :param target:\n :return:\n \"\"\"\n if tokenize:\n source = toktok(source)\n target = toktok(target)\n source_set = set(source)\n target_set = set(target)\n if len(source_set) == 0 or len(target_set) == 0:\n return 0.\n return set_overlap(source_set, target_set)\n\n\ndef repeat_rate(sents):\n \"\"\"\n Compute the repeat rate of a text\n\n :param sents:\n :return:\n \"\"\"\n if len(sents) == 1:\n return 0.\n else:\n repeat_rates = []\n for i, sent in enumerate(sents):\n rest = \" \".join([sents[j] for j, s in enumerate(sents) if j != i])\n repeat_rates.append(copy_rate(rest, sent, True))\n return np.mean(repeat_rates)\n \n\ndef get_sentence_ranking(sentences, target, metric=\"copy\"):\n target_array = toktok(target)\n similarities = []\n for sent in sentences:\n if metric == \"jacc\":\n similarities.append(jaccard_similarity(toktok(sent), target_array))\n elif metric == \"copy\":\n similarities.append(copy_rate(toktok(sent), target_array))\n return np.argsort(similarities)[::-1]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Compute the Oracle extractive summary.\")\n parser.add_argument('-src', help='The source file', required=True)\n parser.add_argument('-tgt', help='The target file', required=True)\n parser.add_argument('-output', help='The output file to write to', required=True)\n parser.add_argument('-metric', help='The metric to use, default Jaccard similarity', default=\"copy\")\n args = parser.parse_args()\n\n with open(args.src) as src:\n with open(args.tgt) as tgt:\n output = open(args.output, \"w\")\n with tqdm(desc=\"Oracle\") as pbar:\n for i, (src_line, tgt_line) in enumerate(zip(src, tgt)):\n src_sents = src_line.strip().split(\" <s> \")\n tgt_sents = tgt_line.strip().split(\" <s> \")\n optimal_summary = []\n for summary_sent in tgt_sents:\n # Get the close\n selection_order = get_sentence_ranking(src_sents, summary_sent, args.metric)\n for input_index in selection_order:\n if input_index not in optimal_summary:\n optimal_summary.append(input_index)\n break\n output.write(\"{}\\n\".format(\" <s> \".join([src_sents[j] for j in optimal_summary])))\n pbar.update()\n" ]
[ [ "numpy.argsort", "numpy.mean" ] ]
LSheneman/texas_rangers_modeler
[ "ebaa63e639beb715d108068cbf4e22f24848b4f7" ]
[ "src/features/tweetCleaner.py" ]
[ "import re \nimport sys\nimport tweepy \nfrom tweepy import OAuthHandler \nfrom textblob import TextBlob \nimport pickle\nimport string\nimport config\nimport pandas as pd\n\nclass TwitterCleaner(object): \n ''' \n Generic Twitter Class for sentiment analysis. \n '''\n def __init__(self): \n ''' \n Class constructor or initialization method. \n '''\n\n def unpickle_data(self, filename):\n with open(filename, \"rb\") as handle:\n tweet_list = pickle.load(handle)\n return tweet_list\n \n def deEmojify(self, inputString):\n return inputString.encode('ascii', 'ignore').decode('ascii')\n\n \n def clean_tweets(self, tweets): \n ''' \n Utility function to clean tweets text by converting to lower case, removing numbers, . \n ''' \n try:\n\n tweets = tweets.map(lambda tweet: tweet.lower())\n tweets = tweets.map(lambda tweet: self.deEmojify(tweet))\n tweets = tweets.map(lambda tweet: tweet.replace('rt', ''))\n \n # Remove urls\n tweets = tweets.map(lambda tweet: re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", tweet))\n\n # Remove user tags\n tweets = tweets.map(lambda tweet: re.sub(\"(@[^ ]+ )*@[^ ]+\", \"\", tweet))\n\n\n for punc in [\".\", \",\", \"?\", \"!\", \"'\", \":\", \"(\", \")\"]:\n tweets = tweets.map(lambda tweet: re.sub(\"[{}]+\".format(punc), \"\", tweet))\n tweets = tweets.map(lambda tweet: \" \".join(tweet.split()))\n tweets = tweets.map(lambda tweet: tweet.rstrip())\n \n # Remove documents with less 100 words (some timeline are only composed of URLs)\n # tweets = [tweet for tweet in tweets if len(tweet) > 100]\n\n\n\n \n return tweets \n \n except:\n print(\"Error cleaining tweets!\")\n \n def get_class(self, analysis):\n try:\n if analysis > 0.75:\n val = 'positive'\n elif analysis < -0.75:\n val = 'negative'\n else:\n val = 'neutral'\n return val\n \n except:\n print(\"Class Error!\")\n\n def get_tweet_sentiment(self, tweet): \n ''' \n Utility function to classify sentiment of passed tweet \n using textblob's sentiment method \n '''\n tweet['analysis'] = tweet.text.map(lambda text: TextBlob(text).sentiment.polarity)\n tweet['sentiment'] = tweet.analysis.map(lambda analysis: self.get_class(analysis))\n \n return tweet\n\n\n def get_tweets(self, fName, count = 10): \n ''' \n Main function to fetch tweets and parse them. \n '''\n # empty dataframe to store parsed tweets \n clean_tweets = pd.DataFrame()\n\n try: \n # read tweets from file\n fetched_tweets = self.unpickle_data(fName)\n fetched_tweets = pd.DataFrame(fetched_tweets)\n \n except: \n # print error (if any) \n print(\"Input Error!\")\n \n # Remove non-English tweets\n fetched_tweets = fetched_tweets[fetched_tweets.lang == 'en']\n \n clean_tweets = pd.DataFrame()\n\n #clean the text of the tweets\n clean_tweets['text'] = self.clean_tweets(fetched_tweets.text)\n \n clean_tweets = self.get_tweet_sentiment(clean_tweets)\n\n # remove duplicate tweets\n # clean_tweets.drop_duplicates(subset =\"text\", inplace=True)\n \n \n return clean_tweets \n \ndef main(): \n fName_in = sys.argv[1]\n fName_out = sys.argv[2]\n\n # creating object of TwitterClient Class \n api = TwitterCleaner() \n # calling function to get tweets \n tweets = api.get_tweets(fName_in) \n \n print(\"Number of tweets: {}\".format(len(tweets)))\n \n # identify positive tweets from tweets \n positive = tweets[tweets['sentiment'] == 'positive'].index \n # percentage of positive tweets \n print(\"\\nPositive tweets percentage: {}%\".format(round(100*len(positive)/len(tweets), 2))) \n \n \n negative = tweets[tweets['sentiment'] == 'negative'].index\n # percentage of negative tweets\n print(\"Negative tweets percentage: {} %\".format(round(100*len(negative)/len(tweets), 2)))\n\n # identify neutral tweets from tweets \n neutral = tweets[tweets['sentiment'] == 'neutral'].index \n # percentage of neutral tweets \n print(\"Neutral tweets percentage: {} %\".format(round(100*len(neutral)/len(tweets), 2)))\n\n \n # printing first tweets \n print(\"\\nPositive tweets:\\n\") \n for tweet in positive[:10]:\n print(tweets.text[tweet])\n \n print(\"\\nNegative tweets:\\n\") \n for tweet in negative[:10]:\n print(tweets.text[tweet])\n \n print(\"\\nNeutral tweets:\\n\") \n for tweet in neutral[:10]:\n print(tweets.text[tweet])\n \n tweets.to_pickle(fName_out)\n print(\"Clean tweets saved as {}\".format(fName_out))\n \nif __name__ == \"__main__\": \n # calling main function \n main() \n" ]
[ [ "pandas.DataFrame" ] ]
kenichinakanishi/gen-efficientnet-pytorch
[ "76f18aaf5a42e4b521a2cc482241575702075a43" ]
[ "geffnet/gen_efficientnet.py" ]
[ "\"\"\" Generic Efficient Networks\n\nA generic MobileNet class with building blocks to support a variety of models:\n\n* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent ports)\n - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946\n - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971\n - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665\n - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252\n\n* EfficientNet-Lite\n\n* MixNet (Small, Medium, and Large)\n - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595\n\n* MNasNet B1, A1 (SE), Small\n - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626\n\n* FBNet-C\n - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443\n\n* Single-Path NAS Pixel1\n - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877\n\n* And likely more...\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .config import layer_config_kwargs, is_scriptable\nfrom .conv2d_layers import select_conv2d\nfrom .helpers import load_pretrained\nfrom .efficientnet_builder import *\n\n__all__ = ['GenEfficientNet', 'mnasnet_050', 'mnasnet_075', 'mnasnet_100', 'mnasnet_b1', 'mnasnet_140',\n 'semnasnet_050', 'semnasnet_075', 'semnasnet_100', 'mnasnet_a1', 'semnasnet_140', 'mnasnet_small',\n 'mobilenetv2_100', 'mobilenetv2_140', 'mobilenetv2_110d', 'mobilenetv2_120d',\n 'fbnetc_100', 'spnasnet_100', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3',\n 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8',\n 'efficientnet_l2', 'efficientnet_es', 'efficientnet_em', 'efficientnet_el',\n 'efficientnet_cc_b0_4e', 'efficientnet_cc_b0_8e', 'efficientnet_cc_b1_8e',\n 'efficientnet_lite0', 'efficientnet_lite1', 'efficientnet_lite2', 'efficientnet_lite3', 'efficientnet_lite4',\n 'tf_efficientnet_b0', 'tf_efficientnet_b1', 'tf_efficientnet_b2', 'tf_efficientnet_b3',\n 'tf_efficientnet_b4', 'tf_efficientnet_b5', 'tf_efficientnet_b6', 'tf_efficientnet_b7', 'tf_efficientnet_b8',\n 'tf_efficientnet_b0_ap', 'tf_efficientnet_b1_ap', 'tf_efficientnet_b2_ap', 'tf_efficientnet_b3_ap',\n 'tf_efficientnet_b4_ap', 'tf_efficientnet_b5_ap', 'tf_efficientnet_b6_ap', 'tf_efficientnet_b7_ap',\n 'tf_efficientnet_b8_ap', 'tf_efficientnet_b0_ns', 'tf_efficientnet_b1_ns', 'tf_efficientnet_b2_ns',\n 'tf_efficientnet_b3_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5_ns', 'tf_efficientnet_b6_ns',\n 'tf_efficientnet_b7_ns', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475',\n 'tf_efficientnet_es', 'tf_efficientnet_em', 'tf_efficientnet_el',\n 'tf_efficientnet_cc_b0_4e', 'tf_efficientnet_cc_b0_8e', 'tf_efficientnet_cc_b1_8e',\n 'tf_efficientnet_lite0', 'tf_efficientnet_lite1', 'tf_efficientnet_lite2', 'tf_efficientnet_lite3',\n 'tf_efficientnet_lite4',\n 'mixnet_s', 'mixnet_m', 'mixnet_l', 'mixnet_xl', 'tf_mixnet_s', 'tf_mixnet_m', 'tf_mixnet_l']\n\n\nmodel_urls = {\n 'mnasnet_050': None,\n 'mnasnet_075': None,\n 'mnasnet_100':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth',\n 'mnasnet_140': None,\n 'mnasnet_small': None,\n\n 'semnasnet_050': None,\n 'semnasnet_075': None,\n 'semnasnet_100':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth',\n 'semnasnet_140': None,\n\n 'mobilenetv2_100':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth',\n 'mobilenetv2_110d':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth',\n 'mobilenetv2_120d':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth',\n 'mobilenetv2_140':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth',\n\n 'fbnetc_100':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth',\n 'spnasnet_100':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth',\n\n 'efficientnet_b0':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth',\n 'efficientnet_b1':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth',\n 'efficientnet_b2':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth',\n 'efficientnet_b3':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth',\n 'efficientnet_b4': None,\n 'efficientnet_b5': None,\n 'efficientnet_b6': None,\n 'efficientnet_b7': None,\n 'efficientnet_b8': None,\n 'efficientnet_l2': None,\n\n 'efficientnet_es':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth',\n 'efficientnet_em': None,\n 'efficientnet_el': None,\n\n 'efficientnet_cc_b0_4e': None,\n 'efficientnet_cc_b0_8e': None,\n 'efficientnet_cc_b1_8e': None,\n\n 'efficientnet_lite0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth',\n 'efficientnet_lite1': None,\n 'efficientnet_lite2': None,\n 'efficientnet_lite3': None,\n 'efficientnet_lite4': None,\n\n 'tf_efficientnet_b0':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth',\n 'tf_efficientnet_b1':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth',\n 'tf_efficientnet_b2':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth',\n 'tf_efficientnet_b3':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth',\n 'tf_efficientnet_b4':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth',\n 'tf_efficientnet_b5':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth',\n 'tf_efficientnet_b6':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth',\n 'tf_efficientnet_b7':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth',\n 'tf_efficientnet_b8':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth',\n\n 'tf_efficientnet_b0_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth',\n 'tf_efficientnet_b1_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth',\n 'tf_efficientnet_b2_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth',\n 'tf_efficientnet_b3_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth',\n 'tf_efficientnet_b4_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth',\n 'tf_efficientnet_b5_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth',\n 'tf_efficientnet_b6_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth',\n 'tf_efficientnet_b7_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth',\n 'tf_efficientnet_b8_ap':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth',\n\n 'tf_efficientnet_b0_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth',\n 'tf_efficientnet_b1_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth',\n 'tf_efficientnet_b2_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth',\n 'tf_efficientnet_b3_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth',\n 'tf_efficientnet_b4_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth',\n 'tf_efficientnet_b5_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth',\n 'tf_efficientnet_b6_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth',\n 'tf_efficientnet_b7_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth',\n 'tf_efficientnet_l2_ns_475':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth',\n 'tf_efficientnet_l2_ns':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth',\n\n 'tf_efficientnet_es':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth',\n 'tf_efficientnet_em':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth',\n 'tf_efficientnet_el':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth',\n\n 'tf_efficientnet_cc_b0_4e':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth',\n 'tf_efficientnet_cc_b0_8e':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth',\n 'tf_efficientnet_cc_b1_8e':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth',\n\n 'tf_efficientnet_lite0':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth',\n 'tf_efficientnet_lite1':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth',\n 'tf_efficientnet_lite2':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth',\n 'tf_efficientnet_lite3':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth',\n 'tf_efficientnet_lite4':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth',\n\n 'mixnet_s': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth',\n 'mixnet_m': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth',\n 'mixnet_l': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth',\n 'mixnet_xl': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth',\n\n 'tf_mixnet_s':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth',\n 'tf_mixnet_m':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth',\n 'tf_mixnet_l':\n 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth',\n}\n\n\nclass GenEfficientNet(nn.Module):\n \"\"\" Generic EfficientNets\n\n An implementation of mobile optimized networks that covers:\n * EfficientNet (B0-B8, L2, CondConv, EdgeTPU)\n * MixNet (Small, Medium, and Large, XL)\n * MNASNet A1, B1, and small\n * FBNet C\n * Single-Path NAS Pixel1\n \"\"\"\n\n def __init__(self, block_args, num_classes=1000, in_chans=3, num_features=1280, stem_size=32, fix_stem=False,\n channel_multiplier=1.0, channel_divisor=8, channel_min=None,\n pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_connect_rate=0.,\n se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,\n weight_init='goog', antialiased=False):\n super(GenEfficientNet, self).__init__()\n self.drop_rate = drop_rate\n\n if not fix_stem:\n stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)\n self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)\n self.bn1 = norm_layer(stem_size, **norm_kwargs)\n self.act1 = act_layer(inplace=True)\n in_chs = stem_size\n\n builder = EfficientNetBuilder(\n channel_multiplier, channel_divisor, channel_min,\n pad_type, act_layer, se_kwargs, norm_layer, norm_kwargs, drop_connect_rate, antialiased)\n self.blocks = nn.Sequential(*builder(in_chs, block_args))\n in_chs = builder.in_chs\n\n self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type)\n self.bn2 = norm_layer(num_features, **norm_kwargs)\n self.act2 = act_layer(inplace=True)\n self.global_pool = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(num_features, num_classes)\n\n for n, m in self.named_modules():\n if weight_init == 'goog':\n initialize_weight_goog(m, n)\n else:\n initialize_weight_default(m, n)\n\n def features(self, x):\n x = self.conv_stem(x)\n x = self.bn1(x)\n x = self.act1(x)\n x = self.blocks(x)\n x = self.conv_head(x)\n x = self.bn2(x)\n x = self.act2(x)\n return x\n\n def as_sequential(self):\n layers = [self.conv_stem, self.bn1, self.act1]\n layers.extend(self.blocks)\n layers.extend([\n self.conv_head, self.bn2, self.act2,\n self.global_pool, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.features(x)\n x = self.global_pool(x)\n x = x.flatten(1)\n if self.drop_rate > 0.:\n x = F.dropout(x, p=self.drop_rate, training=self.training)\n return self.classifier(x)\n\n\ndef _create_model(model_kwargs, variant, pretrained=False):\n as_sequential = model_kwargs.pop('as_sequential', False)\n model = GenEfficientNet(**model_kwargs)\n if pretrained:\n load_pretrained(model, model_urls[variant])\n if as_sequential:\n model = model.as_sequential()\n return model\n\n\ndef _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-a1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r2_k3_s2_e6_c24'],\n # stage 2, 56x56 in\n ['ir_r3_k5_s2_e3_c40_se0.25'],\n # stage 3, 28x28 in\n ['ir_r4_k3_s2_e6_c80'],\n # stage 4, 14x14in\n ['ir_r2_k3_s1_e6_c112_se0.25'],\n # stage 5, 14x14in\n ['ir_r3_k5_s2_e6_c160_se0.25'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r3_k3_s2_e3_c24'],\n # stage 2, 56x56 in\n ['ir_r3_k5_s2_e3_c40'],\n # stage 3, 28x28 in\n ['ir_r3_k5_s2_e6_c80'],\n # stage 4, 14x14in\n ['ir_r2_k3_s1_e6_c96'],\n # stage 5, 14x14in\n ['ir_r4_k5_s2_e6_c192'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320_noskip']\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a mnasnet-b1 model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet\n Paper: https://arxiv.org/pdf/1807.11626.pdf.\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_c8'],\n ['ir_r1_k3_s2_e3_c16'],\n ['ir_r2_k3_s2_e6_c16'],\n ['ir_r4_k5_s2_e6_c32_se0.25'],\n ['ir_r3_k3_s1_e6_c32_se0.25'],\n ['ir_r3_k5_s2_e6_c88_se0.25'],\n ['ir_r1_k3_s1_e6_c144']\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=8,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_mobilenet_v2(\n variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs):\n \"\"\" Generate MobileNet-V2 network\n Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py\n Paper: https://arxiv.org/abs/1801.04381\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_c16'],\n ['ir_r2_k3_s2_e6_c24'],\n ['ir_r3_k3_s2_e6_c32'],\n ['ir_r4_k3_s2_e6_c64'],\n ['ir_r3_k3_s1_e6_c96'],\n ['ir_r3_k3_s2_e6_c160'],\n ['ir_r1_k3_s1_e6_c320'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head),\n num_features=1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n fix_stem=fix_stem_head,\n channel_multiplier=channel_multiplier,\n norm_kwargs=resolve_bn_args(kwargs),\n act_layer=nn.ReLU6,\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\" FBNet-C\n\n Paper: https://arxiv.org/abs/1812.03443\n Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py\n\n NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper,\n it was used to confirm some building block details\n \"\"\"\n arch_def = [\n ['ir_r1_k3_s1_e1_c16'],\n ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'],\n ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'],\n ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'],\n ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'],\n ['ir_r4_k5_s2_e6_c184'],\n ['ir_r1_k3_s1_e6_c352'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=16,\n num_features=1984, # paper suggests this, but is not 100% clear\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates the Single-Path NAS model from search targeted for Pixel1 phone.\n\n Paper: https://arxiv.org/abs/1904.02877\n\n Args:\n channel_multiplier: multiplier to number of channels per layer.\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_c16_noskip'],\n # stage 1, 112x112 in\n ['ir_r3_k3_s2_e3_c24'],\n # stage 2, 56x56 in\n ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'],\n # stage 3, 28x28 in\n ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'],\n # stage 4, 14x14in\n ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'],\n # stage 5, 14x14in\n ['ir_r4_k5_s2_e6_c192'],\n # stage 6, 7x7 in\n ['ir_r1_k3_s1_e6_c320_noskip']\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates an EfficientNet model.\n\n Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n 'efficientnet-b8': (2.2, 3.6, 672, 0.5),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16_se0.25'],\n ['ir_r2_k3_s2_e6_c24_se0.25'],\n ['ir_r2_k5_s2_e6_c40_se0.25'],\n ['ir_r3_k3_s2_e6_c80_se0.25'],\n ['ir_r3_k5_s1_e6_c112_se0.25'],\n ['ir_r4_k5_s2_e6_c192_se0.25'],\n ['ir_r1_k3_s1_e6_c320_se0.25'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'swish'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n arch_def = [\n # NOTE `fc` is present to override a mismatch between stem channels and in chs not\n # present in other models\n ['er_r1_k3_s1_e4_c24_fc24_noskip'],\n ['er_r2_k3_s2_e8_c32'],\n ['er_r4_k3_s2_e8_c48'],\n ['ir_r5_k5_s2_e8_c96'],\n ['ir_r4_k5_s1_e8_c144'],\n ['ir_r2_k5_s2_e8_c192'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_efficientnet_condconv(\n variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs):\n \"\"\"Creates an efficientnet-condconv model.\"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16_se0.25'],\n ['ir_r2_k3_s2_e6_c24_se0.25'],\n ['ir_r2_k5_s2_e6_c40_se0.25'],\n ['ir_r3_k3_s2_e6_c80_se0.25'],\n ['ir_r3_k5_s1_e6_c112_se0.25_cc4'],\n ['ir_r4_k5_s2_e6_c192_se0.25_cc4'],\n ['ir_r1_k3_s1_e6_c320_se0.25_cc4'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier),\n num_features=round_channels(1280, channel_multiplier, 8, None),\n stem_size=32,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'swish'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates an EfficientNet-Lite model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite\n Paper: https://arxiv.org/abs/1905.11946\n\n EfficientNet params\n name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)\n 'efficientnet-lite0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-lite1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-lite2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-lite3': (1.2, 1.4, 280, 0.3),\n 'efficientnet-lite4': (1.4, 1.8, 300, 0.3),\n\n Args:\n channel_multiplier: multiplier to number of channels per layer\n depth_multiplier: multiplier to number of repeats per stage\n \"\"\"\n arch_def = [\n ['ds_r1_k3_s1_e1_c16'],\n ['ir_r2_k3_s2_e6_c24'],\n ['ir_r2_k5_s2_e6_c40'],\n ['ir_r3_k3_s2_e6_c80'],\n ['ir_r3_k5_s1_e6_c112'],\n ['ir_r4_k5_s2_e6_c192'],\n ['ir_r1_k3_s1_e6_c320'],\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True),\n num_features=1280,\n stem_size=32,\n fix_stem=True,\n channel_multiplier=channel_multiplier,\n act_layer=nn.ReLU6,\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs,\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def),\n num_features=1536,\n stem_size=16,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium-Large model.\n\n Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet\n Paper: https://arxiv.org/abs/1907.09595\n \"\"\"\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c24'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n with layer_config_kwargs(kwargs):\n model_kwargs = dict(\n block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'),\n num_features=1536,\n stem_size=24,\n channel_multiplier=channel_multiplier,\n act_layer=resolve_act_layer(kwargs, 'relu'),\n norm_kwargs=resolve_bn_args(kwargs),\n **kwargs\n )\n model = _create_model(model_kwargs, variant, pretrained)\n return model\n\n\ndef mnasnet_050(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 0.5. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mnasnet_075(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 0.75. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mnasnet_100(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mnasnet_b1(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.0. \"\"\"\n return mnasnet_100(pretrained, **kwargs)\n\n\ndef mnasnet_140(pretrained=False, **kwargs):\n \"\"\" MNASNet B1, depth multiplier of 1.4 \"\"\"\n model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef semnasnet_050(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 0.5 \"\"\"\n model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs)\n return model\n\n\ndef semnasnet_075(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 0.75. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs)\n return model\n\n\ndef semnasnet_100(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mnasnet_a1(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.0. \"\"\"\n return semnasnet_100(pretrained, **kwargs)\n\n\ndef semnasnet_140(pretrained=False, **kwargs):\n \"\"\" MNASNet A1 (w/ SE), depth multiplier of 1.4. \"\"\"\n model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mnasnet_small(pretrained=False, **kwargs):\n \"\"\" MNASNet Small, depth multiplier of 1.0. \"\"\"\n model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mobilenetv2_100(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.0 channel multiplier \"\"\"\n model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mobilenetv2_140(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.4 channel multiplier \"\"\"\n model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mobilenetv2_110d(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers\"\"\"\n model = _gen_mobilenet_v2(\n 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mobilenetv2_120d(pretrained=False, **kwargs):\n \"\"\" MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers \"\"\"\n model = _gen_mobilenet_v2(\n 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs)\n return model\n\n\ndef fbnetc_100(pretrained=False, **kwargs):\n \"\"\" FBNet-C \"\"\"\n if pretrained:\n # pretrained model trained with non-default BN epsilon\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef spnasnet_100(pretrained=False, **kwargs):\n \"\"\" Single-Path NAS Pixel1\"\"\"\n model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 \"\"\"\n # NOTE for train set drop_rate=0.2, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 \"\"\"\n # NOTE for train set drop_rate=0.2, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 \"\"\"\n # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 \"\"\"\n # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 \"\"\"\n # NOTE for train set drop_rate=0.4, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b5(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 \"\"\"\n # NOTE for train set drop_rate=0.4, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b6(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 \"\"\"\n # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b7(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 \"\"\"\n # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_b8(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8 \"\"\"\n # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2\n model = _gen_efficientnet(\n 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_l2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2. \"\"\"\n # NOTE for train, drop_rate should be 0.5\n model = _gen_efficientnet(\n 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_es(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge Small. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_em(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Medium. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_el(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Large. \"\"\"\n model = _gen_efficientnet_edge(\n 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_cc_b0_4e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts \"\"\"\n # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_cc_b0_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts \"\"\"\n # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_cc_b1_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B1 w/ 8 Experts \"\"\"\n # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2\n model = _gen_efficientnet_condconv(\n 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_lite0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite0 \"\"\"\n model = _gen_efficientnet_lite(\n 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_lite1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite1 \"\"\"\n model = _gen_efficientnet_lite(\n 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_lite2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite2 \"\"\"\n model = _gen_efficientnet_lite(\n 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_lite3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite3 \"\"\"\n model = _gen_efficientnet_lite(\n 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef efficientnet_lite4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite4 \"\"\"\n model = _gen_efficientnet_lite(\n 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b5(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 RandAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b6(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 AutoAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b7(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 RandAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b8(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8 RandAug. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b0_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b1_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b2_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b3_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b4_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b5_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b6_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b7_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b8_ap(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B8 AdvProp. Tensorflow compatible variant\n Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b0_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B0 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b1_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B1 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b2_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b3_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B3 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b4_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B4 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b5_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B5 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b6_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B6 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_b7_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-B7 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_l2_ns_475(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_l2_ns(pretrained=False, **kwargs):\n \"\"\" EfficientNet-L2 NoisyStudent. Tensorflow compatible variant\n Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252)\n \"\"\"\n # NOTE for train, drop_rate should be 0.5\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet(\n 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_es(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge Small. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_em(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Medium. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_el(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Edge-Large. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_edge(\n 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 4 Experts \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B0 w/ 8 Experts \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs):\n \"\"\" EfficientNet-CondConv-B1 w/ 8 Experts \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_condconv(\n 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2,\n pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_lite0(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite0. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_lite1(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite1. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_lite2(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite2. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_lite3(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite3. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_efficientnet_lite4(pretrained=False, **kwargs):\n \"\"\" EfficientNet-Lite4. Tensorflow compatible variant \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_efficientnet_lite(\n 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mixnet_s(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model.\n \"\"\"\n # NOTE for train set drop_rate=0.2\n model = _gen_mixnet_s(\n 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mixnet_m(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium model.\n \"\"\"\n # NOTE for train set drop_rate=0.25\n model = _gen_mixnet_m(\n 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mixnet_l(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Large model.\n \"\"\"\n # NOTE for train set drop_rate=0.25\n model = _gen_mixnet_m(\n 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mixnet_xl(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Extra-Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n \"\"\"\n # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2\n model = _gen_mixnet_m(\n 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs)\n return model\n\n\ndef mixnet_xxl(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Double Extra Large model.\n Not a paper spec, experimental def by RW w/ depth scaling.\n \"\"\"\n # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2\n model = _gen_mixnet_m(\n 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_mixnet_s(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Small model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_s(\n 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_mixnet_m(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Medium model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_m(\n 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs)\n return model\n\n\ndef tf_mixnet_l(pretrained=False, **kwargs):\n \"\"\"Creates a MixNet Large model. Tensorflow compatible variant\n \"\"\"\n kwargs['bn_eps'] = BN_EPS_TF_DEFAULT\n kwargs['pad_type'] = 'same'\n model = _gen_mixnet_m(\n 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs)\n return model\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.functional.dropout", "torch.nn.Flatten", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d" ] ]
diagccmc/pyblock
[ "9195e4231ce5fdbae1046bec456867099a40adfa" ]
[ "pyblock/pd_utils.py" ]
[ "'''Pandas-based wrapper around :mod:`pyblock.blocking`.'''\n\n# copyright: (c) 2014 James Spencer\n# license: modified BSD license; see LICENSE for further details.\n\nimport numpy\nimport pandas as pd\nimport pyblock.blocking\n\ndef reblock(data, axis=0, weights=None):\n '''Blocking analysis of correlated data.\n\nParameters\n----------\ndata : :class:`pandas.Series` or :class:`pandas.DataFrame`\n Data to be blocked. See ``axis`` for order.\naxis : int\n If non-zero, variables in data are in rows with the columns\n corresponding to the observation values. Blocking is then performed along\n the rows. Otherwise each column is a variable, the observations are in the\n columns and blocking is performed down the columns. Only used if data is\n a :class:`pandas.DataFrame`.\nweights : :class:`pandas.Series` or :class:`pandas.DataFrame`\n A 1D weighting of the data to be reblocked. For multidimensional data an\n identical weighting is applied to the data for each variable.\n\nReturns\n-------\ndata_len : :class:`pandas.Series`\n Number of data points used in each reblocking iteration. Note some\n reblocking iterations discard a data point if there were an odd number of\n data points in the previous iteration.\nblock_info : :class:`pandas.DataFrame`\n Mean, standard error and estimated standard error for each variable at each\n reblock step.\ncovariance : :class:`pandas.DataFrame`\n Covariance matrix at each reblock step.\n\nSee also\n--------\n:func:`pyblock.blocking.reblock`:\n numpy-based implementation; see for documentation and notes on the\n reblocking procedure. :func:`pyblock.pd_utils.reblock` is a simple wrapper\n around this.\n'''\n\n try:\n columns = [data.name]\n if data.name is None:\n columns = ['data']\n axis = 0\n except AttributeError:\n # Have DataFrame rather than Series.\n if axis:\n columns = data.index.values\n else:\n columns = data.columns.values\n\n if weights is not None:\n if isinstance(weights, pd.DataFrame):\n if numpy.min(weights.shape) > 1:\n raise RuntimeError(\"cannot handle multidimensional weights\")\n weights = numpy.array(weights.unstack())\n else:\n weights = weights.values\n\n block_stats = pyblock.blocking.reblock(data.values,\n rowvar=axis,\n weights=weights)\n data_size = data.shape[axis]\n optimal_blocks = pyblock.blocking.find_optimal_block(data_size, block_stats)\n\n # Now nicely package it up into a dict of pandas/built-in objects.\n\n iblock = []\n data_len = []\n block_info = []\n covariance = []\n keys = ['mean', 'standard error', 'standard error error', 'optimal block']\n multi_keys = [(col,k) for col in columns for k in keys]\n multi_keys = pd.MultiIndex.from_tuples(multi_keys)\n null = numpy.zeros(len(columns))\n for stat in block_stats:\n # Contents of stat:\n # (iblock, data_len, mean, covariance, standard err,\n # esimate of error in standard error)\n iblock.append(stat.block)\n data_len.append(stat.ndata)\n\n pd_stat = [stat.mean, stat.std_err, stat.std_err_err, null]\n pd_stat = numpy.array(pd_stat).T.flatten()\n block_info.append(pd.Series(pd_stat, index=multi_keys))\n\n # Covariance is a 2D matrix (in general) so can't put it into\n # a DataFrame with everything else, so put it in its own.\n cov = numpy.array(stat.cov, ndmin=2)\n covariance.append(pd.DataFrame(cov, index=columns, columns=columns))\n\n data_len = pd.Series(data_len, index=iblock, name='data length')\n data_len.index.name = 'reblock'\n\n block_info = pd.concat(block_info, axis=1, keys=iblock).transpose()\n block_info.index.name = 'reblock'\n loc = block_info.columns.get_level_values(1) == 'optimal block'\n block_info.loc[:,loc] = ''\n\n covariance = pd.concat(covariance, keys=iblock)\n covariance.index.names = ['reblock', '']\n\n for (ivar, optimal) in enumerate(optimal_blocks):\n if optimal >= 0:\n block_info.loc[optimal,(columns[ivar], 'optimal block')] = '<--- '\n\n return (data_len, block_info, covariance)\n\ndef optimal_block(block_sub_info):\n '''Get the optimal block value from the reblocking data.\n\nParameters\n----------\nblock_sub_info: :class:`pandas.DataFrame` or :class:`pandas.Series`\n Reblocking data (i.e. the first item of the tuple returned by ``reblock``),\n or a subset thereof containing the statistics columns for one or more data\n items.\n\nReturns\n-------\nindex : int\n Reblocking index corresponding to the reblocking iteration at which serial\n correlation has been removed (as estimated by the procedure in\n ``pyblock.blocking.find_optimal_block``). If multiple data sets are passed\n in block_sub_info, this is the maximum index out of all data sets. Set to\n inf if an optimal block is not found for a data set.\n\nRaises\n------\nValueError\n block_sub_info contains no Series or column in DataFrame named 'optimal\n block'.\n'''\n\n # Handle the following cases:\n # * Series with optimal block in it.\n # * block_sub_info DataFrame for one variable (no hierarchical column names)\n # * block_sub_info DataFrame for multiple variables (hierarchical column names)\n # (each set of columns for one variable in block_sub_info contains the mean,\n # standard error and estimated error in the standard error for that\n # variable).\n try:\n if 'optimal block' in block_sub_info.name:\n iterator = [('optimal block', block_sub_info)]\n else:\n raise ValueError('No optimal block data')\n except AttributeError:\n # Have DataFrame.\n # 'optimal block' is in the innermost level.\n level = block_sub_info.columns.nlevels - 1\n opt_cols = [col == 'optimal block'\n for col in block_sub_info.columns.get_level_values(level)]\n if not any(opt_cols):\n raise ValueError('No optimal block data')\n iterator = block_sub_info.loc[:,opt_cols].iteritems()\n\n opt = -1\n for (name, col) in iterator:\n col_opt = col[col != ''].index\n if len(col_opt) == 0:\n opt = float('inf')\n elif len(col_opt) == 1:\n opt = max(col_opt[0], opt)\n else:\n raise ValueError('Multiple entries listed as optimal.')\n\n return opt\n\ndef reblock_summary(block_sub_info):\n '''Get the data corresponding to the optimal block from the reblocking data.\n\nParameters\n----------\nblock_sub_info : :class:`pandas.DataFrame` or :class:`pandas.Series`\n Reblocking data (i.e. the first item of the tuple returned by ``reblock``),\n or a subset thereof containing the statistics columns for one or more data\n items.\n\nReturns\n-------\nsummary : :class:`pandas.DataFrame`\n Mean, standard error and estimate of the error in the standard error\n corresponding to the optimal block size in the reblocking data (or largest\n optimal size if multiple data sets are given. The index is labelled with\n the data name, if known. An empty DataFrame is returned if no optimal block\n size was found.\n'''\n opt = optimal_block(block_sub_info)\n if opt < float('inf'):\n summary = block_sub_info.loc[opt]\n # Convert to DataFrame, with statistics in columns.\n if summary.index.nlevels == 1:\n # Sadly don't know the data name; leave to user.\n summary = pd.DataFrame(summary).T\n else:\n # Have hierarchical index; can pivot into a DataFrame.\n # Each row will be labelled by the data name.\n summary = summary.unstack()\n summary.drop('optimal block', axis=1, inplace=True)\n else:\n summary = pd.DataFrame()\n return summary\n" ]
[ [ "pandas.concat", "pandas.Series", "numpy.min", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.array" ] ]
rudibakaal/Predicting-Credit-Card-Defaults-with-Tensorflow
[ "c5dde5d06adecce0e5e85564af922f11e79f89ec" ]
[ "cc_default.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils.vis_utils import plot_model\nimport matplotlib.style as style\n\nds = pd.read_csv('default of credit card clients.csv',engine='python',skiprows=1)\nds = ds.reindex(np.random.permutation(ds.index))\nds = ds.dropna()\n\ntrain = ds\n\ns = StandardScaler()\nfor x in train.columns:\n if x != 'default payment next month':\n train[x] = s.fit_transform(train[x].values.reshape(-1, 1)).astype('float64')\n\n\ntrain_features = train.drop(['default payment next month','ID'],axis=1)\ntrain_label = train.pop('default payment next month')\n\n\ninput_dim = train_features.shape[1]\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(32, input_dim = input_dim, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))\nmodel.add(keras.layers.Dense(16, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))\nmodel.add(keras.layers.Dense(16, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))\nmodel.add(keras.layers.Dense(1, activation='sigmoid',kernel_initializer='he_uniform'))\n\n\nmodel.compile(optimizer='adam', loss='binary_crossentropy',\n metrics = 'binary_accuracy')\n\n\nhistory = model.fit(train_features, train_label, epochs=55, validation_split=0.5)\n\nmetrics = np.mean(history.history['val_binary_accuracy'])\nresults = model.evaluate(train_features, train_label)\nprint('\\nLoss, Binary_accuracy: \\n',(results))\n\n\nstyle.use('dark_background')\npd.DataFrame(history.history).plot(figsize=(11, 7),linewidth=4)\nplt.title('Binary Cross-entropy',fontsize=14, fontweight='bold')\nplt.xlabel('Epochs',fontsize=13)\nplt.ylabel('Metrics',fontsize=13)\nplt.show()\n" ]
[ [ "pandas.read_csv", "tensorflow.keras.layers.LeakyReLU", "matplotlib.pyplot.title", "matplotlib.style.use", "tensorflow.keras.layers.Dense", "pandas.DataFrame", "numpy.random.permutation", "numpy.mean", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.ylabel" ] ]
CityU-AIM-Group/D2Net
[ "c42f45addf9ca6c734a1335fd466abd38aa2968c" ]
[ "utils/lovasz_loss.py" ]
[ "\"\"\"\nLovasz-Softmax and Jaccard hinge loss in PyTorch\nMaxim Berman 2018 ESAT-PSI KU Leuven (MIT License)\n\"\"\"\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nfrom itertools import filterfalse as ifilterfalse\n\n\ndef lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1. - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n\ndef iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):\n \"\"\"\n IoU for foreground class\n binary: 1 foreground, 0 background\n \"\"\"\n if not per_image:\n preds, labels = (preds,), (labels,)\n ious = []\n for pred, label in zip(preds, labels):\n intersection = ((label == 1) & (pred == 1)).sum()\n union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()\n if not union:\n iou = EMPTY\n else:\n iou = intersection.float() / union.float()\n ious.append(iou)\n iou = mean(ious) # mean accross images if per_image\n return 100 * iou\n\n\ndef iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):\n \"\"\"\n Array of IoU for each (non ignored) class\n \"\"\"\n if not per_image:\n preds, labels = (preds,), (labels,)\n ious = []\n for pred, label in zip(preds, labels):\n print(pred.shape, label.shape)\n iou = [] \n for i in range(C):\n if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)\n intersection = ((label == i) & (pred == i)).sum()\n union = ((label == i) | ((pred == i) & (label != ignore))).sum()\n if not union:\n iou.append(EMPTY)\n else:\n iou.append(intersection.float() / union.float())\n ious.append(iou)\n ious = list(map(mean, zip(*ious))) # mean accross images if per_image\n return 100 * np.array(ious)\n\n\n# --------------------------- BINARY LOSSES ---------------------------\n\n\ndef lovasz_hinge(logits, labels, per_image=True, ignore=None):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n \"\"\"\n if per_image:\n loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))\n for log, lab in zip(logits, labels))\n else:\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\n return loss\n\n\ndef lovasz_hinge_flat(logits, labels):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n \"\"\"\n if len(labels) == 0:\n # only void pixels, the gradients should be 0\n return logits.sum() * 0.\n signs = 2. * labels.float() - 1.\n errors = (1. - logits * Variable(signs))\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = lovasz_grad(gt_sorted)\n # loss = torch.dot(F.relu(errors_sorted), Variable(grad))\n loss = torch.dot(F.elu(errors_sorted), Variable(grad))\n return loss\n\n\ndef flatten_binary_scores(scores, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n \"\"\"\n scores = scores.view(-1)\n labels = labels.view(-1)\n if ignore is None:\n return scores, labels\n valid = (labels != ignore)\n vscores = scores[valid]\n vlabels = labels[valid]\n return vscores, vlabels\n\n\nclass StableBCELoss(torch.nn.modules.Module):\n def __init__(self):\n super(StableBCELoss, self).__init__()\n def forward(self, input, target):\n neg_abs = - input.abs()\n loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()\n return loss.mean()\n\n\ndef binary_xloss(logits, labels, ignore=None):\n \"\"\"\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n \"\"\"\n logits, labels = flatten_binary_scores(logits, labels, ignore)\n loss = StableBCELoss()(logits, Variable(labels.float()))\n return loss\n\n\n# --------------------------- MULTICLASS LOSSES ---------------------------\n\n\ndef lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)\n only_present: average only on classes present in ground truth\n per_image: compute the loss per image instead of per batch\n ignore: void class labels\n \"\"\"\n probas = F.softmax(probas, dim=1)\n if per_image:\n loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present)\n for prob, lab in zip(probas, labels))\n else:\n loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)\n return loss\n\n\ndef lovasz_softmax_flat(probas, labels, only_present=False):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n only_present: average only on classes present in ground truth\n \"\"\"\n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.\n C = probas.size(1)\n \n C = probas.size(1)\n losses = []\n for c in range(C):\n fg = (labels == c).float() # foreground for class c\n if only_present and fg.sum() == 0:\n continue\n errors = (Variable(fg) - probas[:, c]).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))\n return mean(losses)\n\n\ndef flatten_probas(probas, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch\n \"\"\"\n probas = probas.view(probas.size(0), probas.size(1), -1) # N,C,H,W => N,C,H*W\n probas = probas.transpose(1, 2) # N,C,H*W => N,H*W,C\n probas = probas.contiguous().view(-1, probas.size(2))\n\n labels = labels.view(-1)\n if ignore is None:\n return probas, labels\n valid = (labels != ignore)\n vprobas = probas[valid.nonzero().squeeze()]\n vlabels = labels[valid]\n return vprobas, vlabels\n\ndef xloss(logits, labels, ignore=None):\n \"\"\"\n Cross entropy loss\n \"\"\"\n return F.cross_entropy(logits, Variable(labels), ignore_index=255)\n\n\n# --------------------------- HELPER FUNCTIONS ---------------------------\ndef isnan(x):\n return x != x\n \n \ndef mean(l, ignore_nan=True, empty=0):\n \"\"\"\n nanmean compatible with generators.\n \"\"\"\n l = iter(l)\n if ignore_nan:\n l = ifilterfalse(isnan, l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n" ]
[ [ "torch.nn.functional.softmax", "torch.sort", "torch.nn.functional.elu", "numpy.array", "torch.autograd.Variable" ] ]
liepeiming/captcha_trainer
[ "51459fc0d18324145a0dbdeb0ef6cc2ce47c71a5" ]
[ "utils/data.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: kerlomz <[email protected]>\nimport os\nimport hashlib\nimport utils\nimport random\nimport utils.sparse\nimport tensorflow as tf\nimport numpy as np\nfrom constants import RunMode, ModelField, DatasetType, LossFunction\nfrom config import ModelConfig, EXCEPT_FORMAT_MAP\nfrom encoder import Encoder\nfrom exception import exception\n\n\nclass DataIterator:\n \"\"\"数据集迭代类\"\"\"\n\n def __init__(self, model_conf: ModelConfig, mode: RunMode, ran_captcha=None):\n \"\"\"\n :param model_conf: 工程配置\n :param mode: 运行模式(区分:训练/验证)\n \"\"\"\n self.model_conf = model_conf\n self.mode = mode\n self.path_map = {\n RunMode.Trains: self.model_conf.trains_path[DatasetType.TFRecords],\n RunMode.Validation: self.model_conf.validation_path[DatasetType.TFRecords]\n }\n self.batch_map = {\n RunMode.Trains: self.model_conf.batch_size,\n RunMode.Validation: self.model_conf.validation_batch_size\n }\n self.data_dir = self.path_map[mode]\n self.next_element = None\n self.image_path = []\n self.label_list = []\n self._label_list = []\n self._size = 0\n self.encoder = Encoder(self.model_conf, self.mode)\n self.ran_captcha = ran_captcha\n\n @staticmethod\n def parse_example(serial_example):\n\n features = tf.io.parse_single_example(\n serial_example,\n features={\n 'label': tf.io.FixedLenFeature([], tf.string),\n 'input': tf.io.FixedLenFeature([], tf.string),\n }\n )\n _input = tf.cast(features['input'], tf.string)\n _label = tf.cast(features['label'], tf.string)\n\n return _input, _label\n\n @staticmethod\n def total_sample(file_name):\n sample_nums = 0\n for _ in tf.compat.v1.python_io.tf_record_iterator(file_name):\n sample_nums += 1\n return sample_nums\n\n def read_sample_from_tfrecords(self, path):\n \"\"\"\n 从TFRecords中读取样本\n :param path: TFRecords文件路径\n :return:\n \"\"\"\n if isinstance(path, list):\n for p in path:\n self._size += self.total_sample(p)\n else:\n self._size = self.total_sample(path)\n\n min_after_dequeue = 1000\n batch = self.batch_map[self.mode]\n if self.model_conf.da_random_captcha['Enable']:\n batch = random.randint(int(batch / 3 * 2), batch)\n\n dataset_train = tf.data.TFRecordDataset(\n filenames=path,\n num_parallel_reads=20\n ).map(self.parse_example)\n dataset_train = dataset_train.shuffle(\n min_after_dequeue,\n reshuffle_each_iteration=True\n ).prefetch(128).batch(batch, drop_remainder=True).repeat()\n iterator = tf.compat.v1.data.make_one_shot_iterator(dataset_train)\n self.next_element = iterator.get_next()\n\n @property\n def size(self):\n \"\"\"样本数\"\"\"\n return self._size\n\n @property\n def labels(self):\n \"\"\"标签\"\"\"\n return self.label_list\n\n @staticmethod\n def to_sparse(input_batch, label_batch):\n \"\"\"密集输入转稀疏\"\"\"\n batch_inputs = input_batch\n batch_labels = utils.sparse.sparse_tuple_from_sequences(label_batch)\n return batch_inputs, batch_labels\n\n def generate_captcha(self, num) -> (list, list):\n _images = []\n _labels = []\n for i in range(num):\n try:\n image, labels, font_type = self.ran_captcha.create()\n _images.append(image)\n _labels.append(''.join(labels).encode())\n except Exception as e:\n print(e)\n pass\n return _images, _labels\n\n def generate_batch_by_tfrecords(self, session):\n \"\"\"根据TFRecords生成当前批次,输入为当前TensorFlow会话,输出为稀疏型X和Y\"\"\"\n # print(session.graph)\n batch = self.batch_map[self.mode]\n\n _input, _label = session.run(self.next_element)\n if self.model_conf.da_random_captcha['Enable']:\n remain_batch = batch - len(_label)\n extra_input, extra_label = self.generate_captcha(remain_batch)\n _input = np.concatenate((_input, extra_input), axis=0)\n _label = np.concatenate((_label, extra_label), axis=0)\n\n input_batch = []\n label_batch = []\n for index, (i1, i2) in enumerate(zip(_input, _label)):\n try:\n label_array = self.encoder.text(i2)\n if self.model_conf.model_field == ModelField.Image:\n input_array = self.encoder.image(i1)\n else:\n input_array = self.encoder.text(i1)\n\n if input_array is None:\n # tf.compat.v1.logging.warn(\n # \"{}, Cannot identify image file labeled: {}, ignored.\".format(input_array, label_array))\n continue\n\n if isinstance(input_array, str):\n # tf.compat.v1.logging.warn(\"{}, \\nInput errors labeled: {} [{}], ignored.\".format(input_array, i1, label_array))\n continue\n if isinstance(label_array, dict):\n # tf.logging.warn(\"The sample label {} contains invalid charset: {}.\".format(\n # label_array['label'], label_array['char']\n # ))\n continue\n\n if input_array.shape[-1] != self.model_conf.image_channel:\n # pass\n tf.compat.v1.logging.warn(\"{}, \\nInput shape: {}, ignored.\".format(\n self.model_conf.image_channel, input_array.shape[-1])\n )\n continue\n\n label_len_correct = len(label_array) != self.model_conf.max_label_num\n using_cross_entropy = self.model_conf.loss_func == LossFunction.CrossEntropy\n if label_len_correct and using_cross_entropy and not self.model_conf.auto_padding:\n tf.compat.v1.logging.warn(\"The number of labels must be fixed when using cross entropy, label: {}, \"\n \"the number of tags is incorrect, ignored.\".format(i2))\n continue\n\n if len(label_array) > self.model_conf.max_label_num and using_cross_entropy:\n tf.compat.v1.logging.warn(\n \"The number of label[{}] exceeds the maximum number of labels, ignored.{}\".format(i2,\n label_array))\n continue\n\n input_batch.append(input_array)\n label_batch.append(label_array)\n except OSError:\n random_suffix = hashlib.md5(i1).hexdigest()\n file_format = EXCEPT_FORMAT_MAP[self.model_conf.model_field]\n with open(file=\"oserror_{}.{}\".format(random_suffix, file_format), mode=\"wb\") as f:\n f.write(i1)\n tf.compat.v1.logging.warn(\"OSError [{}]\".format(i2))\n continue\n\n # 如果图片尺寸不固定则padding当前批次,使用最大的宽度作为序列最大长度\n if self.model_conf.model_field == ModelField.Image and self.model_conf.resize[0] == -1:\n input_batch = tf.keras.preprocessing.sequence.pad_sequences(\n sequences=input_batch,\n maxlen=None,\n dtype='float32',\n padding='post',\n truncating='post',\n value=0\n )\n\n self.label_list = label_batch\n return self.to_sparse(input_batch, self.label_list)\n" ]
[ [ "tensorflow.compat.v1.data.make_one_shot_iterator", "tensorflow.data.TFRecordDataset", "tensorflow.cast", "numpy.concatenate", "tensorflow.io.FixedLenFeature", "tensorflow.compat.v1.python_io.tf_record_iterator", "tensorflow.keras.preprocessing.sequence.pad_sequences" ] ]
yongzhuo/Macadam
[ "794a29c760ce25264388c3a85a6b118733afb023" ]
[ "macadam/tc/t00_predict.py" ]
[ "# !/usr/bin/python\n# -*- coding: utf-8 -*-\n# @time : 2020/5/8 21:38\n# @author : Mo\n# @function: class of model predict\n\n\n# 适配linux\nimport sys\nimport os\npath_root = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\nsys.path.append(path_root)\n## cpu-gpu与tf.keras\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nos.environ[\"TF_KERAS\"] = \"1\"\n\nfrom macadam.base.utils import padding_sequences, metrics_report\nfrom macadam.base.layers import custom_objects_macadam\nfrom macadam.base.embedding import embedding_map\nfrom macadam.base.utils import load_json\nfrom macadam import keras, K, L, M, O\nfrom collections import OrderedDict\nfrom typing import List, Dict\nfrom tqdm import tqdm\nimport numpy as np\n\n\nclass ModelPredict():\n def __init__(self, path_dir):\n \"\"\"\n init\n \"\"\"\n self.path_model_info = os.path.join(path_dir, \"macadam.info\")\n self.path_model_h5 = os.path.join(path_dir, \"macadam.h5\")\n self.path_dir = path_dir\n # os.environ[\"MACADAM_LEVEL\"] = \"PREDICT\"\n self.load_tokenizer()\n self.load_model()\n\n def load_model(self):\n \"\"\"\n load model of keras of h5 which include graph-node and custom_objects \n \"\"\"\n self.model = M.load_model(self.path_model_h5, compile=False)\n\n def load_tokenizer(self):\n \"\"\"\n load model_info of model, hyper_parameters/label2index/index2label/vocab_dict\n \"\"\"\n self.model_info = load_json(self.path_model_info)\n self.l2i = self.model_info.get(\"label\", {}).get(\"l2i\", {})\n self.i2l = self.model_info.get(\"label\", {}).get(\"i2l\", {})\n hyper_parameters = self.model_info.get(\"hyper_parameters\", {})\n embed_type = hyper_parameters.get(\"sharing\", {}).get(\"embed_type\", \"bert\").upper()\n token2idx = self.model_info.get(\"vocab\", {}).get(\"token2idx\", {})\n Embedding = embedding_map.get(embed_type)\n self.embedd = Embedding(hyper_parameters)\n\n self.embedd.build_tokenizer_from_dict(token2idx)\n self.length_max = hyper_parameters.get(\"sharing\", {}).get(\"length_max\", 512)\n self.batch_size = hyper_parameters.get(\"sharing\", {}).get(\"batch_size\", 32)\n\n def preprocess_x(self, line_json, limit_lengths: List=None,\n use_seconds: bool = True,\n is_multi: bool = True):\n \"\"\"\n data preprocess of encode\n Args:\n line_json: Dict, input, eg. {\"text\": \"macadam是什么\", \"texts2\": [\"macadam是一个python工具包]} \n limit_lengths: List, max length of each enum in texts2, eg.[128]\n use_seconds: bool, either use [SEP] separate texts2 or not, eg.True\n is_multi: bool, either sign texts2 with [0-1; 0] or not, eg.True\n Returns:\n res: List[Dict]\n \"\"\"\n text = line_json.get(\"text\")\n texts2 = line_json.get(\"texts2\", None)\n idxs = self.embedd.sent2idx(text=text, second_text=texts2, limit_lengths=limit_lengths,\n use_seconds=use_seconds, is_multi=is_multi)\n # sequence接受的是List[List], WORD/RANDOM嵌入时候需要加List\n # idxs = padding_sequences(sequences=[idxs] if type(idxs[0])==int else idxs,\n # length_max=self.length_max, padding=0)\n return idxs\n\n def predict(self, texts: List[Dict],\n use_sort: bool = True) -> List[Dict]:\n \"\"\"\n model predict\n Args:\n texts: input of List<dict>, eg. [{\"text\": \"macadam是什么\", \"texts2\": [\"macadam是一个python工具包]}] \n Returns:\n res: List[Dict]\n \"\"\"\n # embedding编码, bert encode\n xs = []\n for text_i in texts:\n text_i_x = self.preprocess_x(text_i)\n xs.append(text_i_x)\n # numpy处理, numpy.array\n xs_array = []\n idxs_np = np.array(xs)\n for i in range(len(idxs_np[0])):\n idxs_array = np.array([inxi[i] for inxi in idxs_np])\n xs_array.append(idxs_array)\n # 模型预测, model predict\n xs_prob = self.model.predict(xs_array)\n # 后处理, post preprocess\n res = []\n for x_prob in xs_prob:\n x_dict = {}\n for i in range(len(self.i2l)):\n x_dict[self.i2l[str(i)]] = x_prob[i]\n res.append(x_dict)\n\n if use_sort:\n res_sort = [sorted(p.items(), key=lambda x: x[1], reverse=True) for p in res]\n res_sort_order = [OrderedDict(rs) for rs in res_sort]\n res = [{k: v for k, v in x.items()} for x in res_sort_order]\n return res\n\n def evaluate(self, texts: List[Dict]):\n \"\"\"\n evaluate of corpus, 数据集验证/打印报告\n Args:\n texts: input of List<dict>, eg. [{\"text\": \"macadam是什么\", \"texts2\": [\"macadam是一个python工具包]}] \n Returns:\n res: List[Dict]\n \"\"\"\n labels_true = []\n labels_pred = []\n texts_batch = []\n # tqdm显示进度\n for i in tqdm(range(len(texts))):\n line = texts[i]\n texts_batch.append(line)\n if len(texts_batch)==self.batch_size:\n # true_y\n labels_true_batch = [tsb.get(\"y\", []) for tsb in texts_batch]\n # pred_y\n texts_batch_x = [tsb.get(\"x\", {}) for tsb in texts_batch]\n labels_predict_batch = self.predict(texts_batch_x)\n # 处理y_true大于length_max的情况\n for i in range(len(labels_predict_batch)):\n labels_pred += [list(labels_predict_batch[i].keys())[0]]\n labels_true += [labels_true_batch[i]]\n texts_batch = []\n # storage, Less than batch_size, 剩余不足批处理尺寸的\n if texts_batch:\n # true_y\n labels_true_batch = [tsb.get(\"y\", []) for tsb in texts_batch]\n # pred_y\n texts_batch_x = [tsb.get(\"x\", {}) for tsb in texts_batch]\n labels_predict_batch = self.predict(texts_batch_x)\n # 处理y_true大于length_max的情况\n for i in range(len(labels_predict_batch)):\n labels_pred += [list(labels_predict_batch[i].keys())[0]]\n labels_true += [labels_true_batch[i]]\n # 获取评估指标/报告打印\n mertics, report = metrics_report(y_true=labels_true, y_pred=labels_pred)\n return mertics, report\n\n\nif __name__ == '__main__':\n from macadam.conf.path_config import path_root, path_tc_baidu_qa_2019\n from macadam.base.utils import txt_write, txt_read\n import json\n\n # 模型目录与加载\n path_dir = os.path.join(path_root, \"data\", \"model\", \"TextCNN_2020\")\n mp = ModelPredict(path_dir)\n texts = [{\"text\": \"五彩斑斓的黑\",\n \"texts2\": []}]\n # 预测\n res = mp.predict(texts)\n print(res)\n # path_train = os.path.join(path_tc_thucnews, \"train.json\")\n # path_dev = os.path.join(path_tc_thucnews, \"dev.json\")\n path_train = os.path.join(path_tc_baidu_qa_2019, \"train.json\")\n path_dev = os.path.join(path_tc_baidu_qa_2019, \"dev.json\")\n datas_dev = txt_read(path_dev)\n datas_dev = [json.loads(dd.strip()) for dd in datas_dev]\n\n # evaluate\n metrics, report = mp.evaluate(datas_dev)\n print(json.dumps(metrics, ensure_ascii=False, indent=4))\n print(report)\n\n # ccks-2020-task-1\n # path_root_kg_2020 = \"D:/soft_install/dataset/game/CCKS/ccks_kg_2020/ccks_7_1_competition_data\"\n # path_train = \"验证集\"\n # path_all = os.path.join(path_root_kg_2020, path_train, \"entity_validation.txt\")\n # questions = txt_read(path_all)\n #\n # res_last = []\n # for ques in questions:\n # text = ques.strip()\n # texts = {\"text\": text,\n # \"texts2\": []}\n # res = mp.predict([texts])\n # # print(res)\n # # res_0 = res[0]\n # res_sort = [sorted(p.items(), key=lambda x: x[1], reverse=True) for p in res]\n # label = res_sort[0][0][0]\n # line = ques + \"\\t\" + label + \"\\n\"\n # res_last.append(line)\n # txt_write(res_last, \"entity_validation_14.txt\")\n\n mm = 0\n\n\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
G-Wang/Text2Speech-Pytorch
[ "7bacdd0880825c3eeb08b6899b5c499416e53d0c" ]
[ "tts/preprocess/audio.py" ]
[ "import librosa\nimport librosa.filters\nimport math\nimport numpy as np\nfrom scipy import signal\nfrom hparams import hparams\nfrom scipy.io import wavfile\n\nimport lws\n\n\ndef load_wav(path):\n return librosa.core.load(path, sr=hparams.sample_rate)[0]\n\n\ndef save_wav(wav, path):\n wav = wav * 32767 / max(0.01, np.max(np.abs(wav)))\n wavfile.write(path, hparams.sample_rate, wav.astype(np.int16))\n\n\ndef preemphasis(x):\n from nnmnkwii.preprocessing import preemphasis\n return preemphasis(x, hparams.preemphasis)\n\n\ndef inv_preemphasis(x):\n from nnmnkwii.preprocessing import inv_preemphasis\n return inv_preemphasis(x, hparams.preemphasis)\n\n\ndef spectrogram(y):\n D = _lws_processor().stft(preemphasis(y)).T\n S = _amp_to_db(np.abs(D)) - hparams.ref_level_db\n return _normalize(S)\n\n\ndef inv_spectrogram(spectrogram):\n '''Converts spectrogram to waveform using librosa'''\n S = _db_to_amp(_denormalize(spectrogram) + hparams.ref_level_db) # Convert back to linear\n processor = _lws_processor()\n D = processor.run_lws(S.astype(np.float64).T ** hparams.power)\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(y)\n\n\ndef melspectrogram(y):\n D = _lws_processor().stft(preemphasis(y)).T\n S = _amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db\n if not hparams.allow_clipping_in_normalization:\n assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0\n return _normalize(S)\n\n\ndef _lws_processor():\n return lws.lws(hparams.fft_size, hparams.hop_size, mode=\"speech\")\n\n\ndef inv_mel_spectrogram(mel_spectrogram):\n D = _denormalize(mel_spectrogram)\n S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db)) # Convert back to linear\n processor = _lws_processor()\n D = processor.run_lws(S.astype(np.float64).T ** hparams.power)\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(y)\n\n\n_mel_basis = None\n\n_inv_mel_basis = None\n\ndef _mel_to_linear(mel_spectrogram):\n\tglobal _inv_mel_basis\n\tif _inv_mel_basis is None:\n\t\t_inv_mel_basis = np.linalg.pinv(_build_mel_basis())\n\treturn np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))\n\n\ndef _linear_to_mel(spectrogram):\n global _mel_basis\n if _mel_basis is None:\n _mel_basis = _build_mel_basis()\n return np.dot(_mel_basis, spectrogram)\n\n\ndef _build_mel_basis():\n if hparams.fmax is not None:\n assert hparams.fmax <= hparams.sample_rate // 2\n return librosa.filters.mel(hparams.sample_rate, hparams.fft_size,\n fmin=hparams.fmin, fmax=hparams.fmax,\n n_mels=hparams.num_mels)\n\n\ndef _amp_to_db(x):\n min_level = np.exp(hparams.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\n\ndef _db_to_amp(x):\n return np.power(10.0, x * 0.05)\n\n\ndef _normalize(S):\n return np.clip((S - hparams.min_level_db) / -hparams.min_level_db, 0, 1)\n\n\ndef _denormalize(S):\n return (np.clip(S, 0, 1) * -hparams.min_level_db) + hparams.min_level_db" ]
[ [ "numpy.dot", "numpy.log", "numpy.maximum", "numpy.abs", "numpy.clip", "numpy.power" ] ]
taruma/hidrokit
[ "c8b949aa6a81981684a24e5dd1e498ec82cbe0ca" ]
[ "hidrokit/contrib/taruma/hk99.py" ]
[ "\"\"\"manual:\nhttps://gist.github.com/taruma/8dd920bee9fa95cf6eba39cc9d694953\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef thiessen_weight(area):\n area_val = list(area.values())\n area_percent = area_val / np.sum(area_val)\n key = list(area.keys())\n return dict(zip(key, area_percent))\n\n\ndef apply_thiessen(dataset, area, columns=None, as_df=True):\n weight = thiessen_weight(area)\n\n columns = columns if columns is not None else dataset.columns\n\n val = []\n for col in columns:\n val.append(dataset[col].values * weight[col])\n\n np_val = np.stack(val, axis=1)\n\n if as_df:\n return pd.DataFrame(\n data=np_val.sum(axis=1), index=dataset.index, columns=['thiessen']\n )\n else:\n return np_val.sum(axis=1)\n" ]
[ [ "numpy.sum", "numpy.stack" ] ]
NetEase-FuXi/EET
[ "f827cef4bfcf8b18e2d4169469052440fe2b216f" ]
[ "example/python/gpt2_transformers_example.py" ]
[ "import torch\r\nimport numpy as np\r\nfrom eet.transformers.modeling_gpt2 import EETGPT2Model\r\nusing_half = False\r\nseq_len = 128\r\nbatch = 5\r\n\r\ndef main():\r\n input = np.random.randint(1000,9000,seq_len * batch,dtype=\"int64\")\r\n inputs = np.random.randint(1000,9000,1 * batch,dtype=\"int64\")\r\n # prompt context\r\n input_full_decoder = torch.from_numpy(input).long().reshape(batch, seq_len).cuda()\r\n\r\n # prediction \r\n input_inc_decoder = torch.from_numpy(inputs).long().reshape(batch, 1).cuda()\r\n\r\n data_type = torch.float32\r\n if using_half:\r\n data_type = torch.float16\r\n\r\n # load pytorch model\r\n eet_model = EETGPT2Model.from_pretrained('gpt2',max_batch = batch, full_seq_len = seq_len,data_type = data_type)\r\n input_ids = input_full_decoder\r\n\r\n first_pass = True\r\n for i in range(100):\r\n print('i--:',i)\r\n res_eet = eet_model(input_ids,first_pass= first_pass)\r\n if first_pass:\r\n first_pass = False\r\n input_ids = input_inc_decoder\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "torch.from_numpy", "numpy.random.randint" ] ]
kevinkovalchik/my_prosit
[ "befc201df38bbba78e467649d5c27a1d8577bfad" ]
[ "prysit/converters/generic.py" ]
[ "import pandas as pd\nimport numpy as np\nimport multiprocessing as mp\nimport pyteomics.mass\n\nfrom ..constants import MAX_ION, ION_TYPES, MAX_FRAG_CHARGE\nfrom .. import utils\n\n\naa_comp = dict(pyteomics.mass.std_aa_comp)\naa_comp[\"o\"] = pyteomics.mass.Composition({\"O\": 1})\ntranslate2spectronaut = {\"C\": \"C[Carbamidomethyl (C)]\", \"M(ox)\": \"M[Oxidation (M)]\"}\nshape = [MAX_ION, len(ION_TYPES), MAX_FRAG_CHARGE]\nFragmentNumber = np.zeros(shape, dtype=int)\nFragmentType = np.zeros(shape, dtype=\"object\")\nFragmentCharge = np.zeros(shape, dtype=int)\n\nfor z in range(MAX_FRAG_CHARGE):\n for j in range(MAX_ION):\n for tyi, ty in enumerate(ION_TYPES):\n FragmentNumber[j, tyi, z] = j + 1\n FragmentType[j, tyi, z] = ty\n FragmentCharge[j, tyi, z] = z + 1\n\nFragmentNumber = FragmentNumber.flatten()\nFragmentType = FragmentType.flatten()\nFragmentCharge = FragmentCharge.flatten()\n\n\ndef convert_multiple_spectra(data):\n # modified from \"convert_spectrum\" by Kevin Kovalchik to simplify getting a dataframe from the prediction.\n out_df = pd.DataFrame()\n for i in range(data['intensities_pred'].shape[0]):\n df = pd.DataFrame(\n {\n \"RelativeIntensity\": data[\"intensities_pred\"][i, :],\n \"FragmentMz\": data[\"masses_pred\"][i, :],\n \"idx\": list(range(174)),\n }\n )\n spectrum = df[df.RelativeIntensity > 0].reset_index(drop=True)\n idx = list(spectrum.idx)\n sequence = utils.get_sequence(data[\"sequence_integer\"][i])\n charge = int(data[\"precursor_charge_onehot\"][i].argmax() + 1)\n irt = float(data[\"iRT\"][i])\n precursor_mz = pyteomics.mass.calculate_mass(\n sequence=sequence.replace(\"M(ox)\", \"oM\"), charge=charge, aa_comp=aa_comp\n )\n\n spectrum[\"ModifiedPeptide\"] = sequence\n spectrum[\"LabeledPeptide\"] = sequence\n spectrum[\"StrippedPeptide\"] = spectrum.LabeledPeptide.map(\n lambda p: p.replace(\"M(ox)\", \"M\")\n )\n spectrum[\"PrecursorCharge\"] = charge\n spectrum[\"PrecursorMz\"] = precursor_mz\n spectrum[\"iRT\"] = irt\n spectrum[\"FragmentNumber\"] = FragmentNumber[idx]\n spectrum[\"FragmentType\"] = FragmentType[idx]\n spectrum[\"FragmentCharge\"] = FragmentCharge[idx]\n spectrum[\"FragmentLossType\"] = \"noloss\"\n for source, target in translate2spectronaut.items():\n spectrum[\"ModifiedPeptide\"] = spectrum.ModifiedPeptide.map(\n lambda s: s.replace(source, target)\n )\n spectrum[\"ModifiedPeptide\"] = spectrum.ModifiedPeptide.map(lambda s: \"_\" + s + \"_\")\n del spectrum[\"idx\"]\n out_df = pd.concat([out_df, spectrum], ignore_index=True)\n out_df.reset_index(inplace=True, drop=True)\n return out_df\n\n\ndef convert_spectrum(data):\n\n df = pd.DataFrame(\n {\n \"RelativeIntensity\": data[\"intensities_pred\"],\n \"FragmentMz\": data[\"masses_pred\"],\n \"idx\": list(range(174)),\n }\n )\n spectrum = df[df.RelativeIntensity > 0].reset_index(drop=True)\n idx = list(spectrum.idx)\n sequence = utils.get_sequence(data[\"sequence_integer\"])\n charge = int(data[\"precursor_charge_onehot\"].argmax() + 1)\n irt = float(data[\"iRT\"])\n precursor_mz = pyteomics.mass.calculate_mass(\n sequence=sequence.replace(\"M(ox)\", \"oM\"), charge=charge, aa_comp=aa_comp\n )\n\n spectrum[\"ModifiedPeptide\"] = sequence\n spectrum[\"LabeledPeptide\"] = sequence\n spectrum[\"StrippedPeptide\"] = spectrum.LabeledPeptide.map(\n lambda p: p.replace(\"M(ox)\", \"M\")\n )\n spectrum[\"PrecursorCharge\"] = charge\n spectrum[\"PrecursorMz\"] = precursor_mz\n spectrum[\"iRT\"] = irt\n spectrum[\"FragmentNumber\"] = FragmentNumber[idx]\n spectrum[\"FragmentType\"] = FragmentType[idx]\n spectrum[\"FragmentCharge\"] = FragmentCharge[idx]\n spectrum[\"FragmentLossType\"] = \"noloss\"\n for source, target in translate2spectronaut.items():\n spectrum[\"ModifiedPeptide\"] = spectrum.ModifiedPeptide.map(\n lambda s: s.replace(source, target)\n )\n spectrum[\"ModifiedPeptide\"] = spectrum.ModifiedPeptide.map(lambda s: \"_\" + s + \"_\")\n del spectrum[\"idx\"]\n return spectrum\n\n\nclass Converter:\n def __init__(self, data, out_path=None, maxsize=256, batch_size=32): # Kevin Kovalchik changed out_put to default of None\n self.data = data\n self.out_path = out_path\n self.queue = mp.Manager().Queue(maxsize)\n self.batch_size = batch_size\n self.cores = mp.cpu_count()\n\n def batch(self, iterable):\n l = len(iterable)\n for ndx in range(0, l, self.batch_size):\n yield iterable[ndx : min(ndx + self.batch_size, l)]\n\n def slice_data(self, i):\n return {k: d[i] for k, d in self.data.items()}\n\n def fill_queue(self, pool):\n n = self.data[\"sequence_integer\"].shape[0]\n indeces = list(range(n))\n\n for b in self.batch(indeces):\n spectra = pool.map(convert_spectrum, [self.slice_data(i) for i in b])\n for s in spectra:\n self.queue.put(s)\n\n # Stop writing process\n self.queue.put(None)\n\n def get_converted(self):\n while True:\n x = self.queue.get()\n if x is None:\n break\n else:\n yield x\n\n def to_csv(self):\n # keeps file open\n with open(self.out_path, \"w\") as _file:\n converted = self.get_converted()\n spectrum = next(converted)\n spectrum.to_csv(_file, index=False)\n for spectrum in converted:\n spectrum.to_csv(_file, header=False, index=False)\n\n def convert(self):\n io_process = mp.Process(target=self.to_csv)\n io_process.daemon = True\n io_process.start()\n with mp.Pool(processes=self.cores * 2) as pool:\n self.fill_queue(pool)\n io_process.join()\n\n\nif __name__ == \"__main__\":\n\n #data = pwyll.tensorize.read(HDF5_PATH)\n conv = ConverterSP(data, to_spectronaut, OUT_PATH)\n io_process = mp.Process(target=conv.to_csv)\n io_process.daemon = True\n io_process.start()\n with mp.Pool(processes=N_CORES * 2) as pool:\n conv.fill_queue(pool)\n io_process.join()\n" ]
[ [ "pandas.concat", "numpy.zeros", "pandas.DataFrame" ] ]
cirno1w/transport
[ "0eb972c78f9154c0a3f780f197ef9af406b2bb71" ]
[ "src/transbigdata/tests/test_bikedata.py" ]
[ "import transbigdata as tbd\nimport pandas as pd\n\n\nclass TestBikedata:\n def setup_method(self):\n self.data =pd.DataFrame([['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 8:41:46', 0,\n 121.432966, 31.130154],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 8:58:46', 1,\n 121.435436, 31.135094],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 17:27:11', 0,\n 121.442523, 31.128701],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 17:55:01', 0,\n 121.441443, 31.128867],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 17:48:11', 1,\n 121.445683, 31.135021],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 17:59:45', 1,\n 121.445293, 31.136567],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 20:02:16', 0,\n 121.406615, 31.13313],\n ['713ED7A4B5EA3233E0533C0BA8C09291', '2018-08-27 20:08:02', 1,\n 121.413019, 31.128553000000004],\n ['713ED790D6AE3233E0533C0BA8C09291', '2018-08-27 6:55:14', 0,\n 121.355498346732, 31.233843474138],\n ['713ED790D6AE3233E0533C0BA8C09291', '2018-08-27 7:01:51', 1,\n 121.362343645231, 31.2392523563195]], columns=['BIKE_ID', 'DATA_TIME', 'LOCK_STATUS', 'LONGITUDE', 'LATITUDE'])\n\n def test_bikedata(self):\n assert len(tbd.bikedata_to_od(self.data,startend = ['2018-08-27 00:00:00','2018-08-28 00:00:00'])[0])==5\n assert len(tbd.bikedata_to_od(self.data)[1])==3\n\n" ]
[ [ "pandas.DataFrame" ] ]
mjdroz/StatisticsCalculator
[ "6be77b650b16e1c3e8ed6160905d99e58449e9b4" ]
[ "RandomGenerator/randomListSelectionSeed.py" ]
[ "from numpy import random\nfrom RandomGenerator.randomListSelection import randomListSelection\n\ndef randomListSelectionSeed(list, seed):\n state = random.get_state()\n random.seed(seed)\n try:\n seeded_selection = randomListSelection(list)\n return seeded_selection\n finally:\n random.set_state(state)" ]
[ [ "numpy.random.get_state", "numpy.random.set_state", "numpy.random.seed" ] ]
carbonplan/cmip6-downscaling
[ "41401d99d3beef7e80485cc54161cbc8653f583e" ]
[ "cmip6_downscaling/methods/regions.py" ]
[ "from typing import Any, Dict, Tuple, Union\n\nimport numpy as np\nimport regionmask\nimport xarray as xr\n\n\ndef generate_subdomains(\n ex_output_grid: Union[xr.Dataset, xr.DataArray],\n buffer_size: Union[float, int],\n region_def: str = 'ar6',\n) -> Tuple[Dict[Union[int, float], Any], xr.DataArray]:\n \"\"\"\n Given an example output grid, determine all subdomains that need to be process in order to generate the final output.\n Outputs the list of bounding boxes for each subdomain considering the buffer size, as well as a mask in the resolution of the example output grid specifying\n which subdomain's value to use for each grid cell.\n\n Parameters\n ----------\n ex_output_grid : xarray.DataArray or xarray.Dataset\n Example output grid definition. both the bounding box and resolution in lat/lon directions will be used.\n buffer_size : int or float\n Buffer size in unit of degree. for each subdomain, how much buffer area to run\n region_def : str\n Subregion definition name. Options are `'ar6'` or `'srex'`. See the docs https://regionmask.readthedocs.io/en/stable/defined_scientific.html for more details.\n\n Returns\n -------\n subdomains : dict\n Dictionary mapping subdomain code to bounding boxes ([min_lon, min_lat, max_lon, max_lat]) for each subdomain\n mask : xarray.DataArray\n Mask of which subdomain code to use for each grid cell\n \"\"\"\n if region_def == 'ar6':\n regions = regionmask.defined_regions.ar6.land\n elif region_def == 'srex':\n regions = regionmask.defined_regions.srex\n else:\n raise NotImplementedError('region_def must be eitehr ar6 or srex')\n\n mask = regions.mask(ex_output_grid)\n region_codes = np.unique(mask.values)\n region_codes = region_codes[np.isfinite(region_codes)]\n\n subdomains = {}\n for n, bound in zip(regions.numbers, regions.bounds):\n if n in region_codes:\n # max(low, min(high, value))\n min_lon = max(min(bound[0] - buffer_size, 180), -180)\n min_lat = max(min(bound[1] - buffer_size, 90), -90)\n max_lon = max(min(bound[2] + buffer_size, 180), -180)\n max_lat = max(min(bound[3] + buffer_size, 90), -90)\n # there is a small region of eastern siberia that is part of region 28 of AR6\n # but since it's difficult to get a subdomain crossing the -180 longitude line, add this region into region 1 instead\n if n == 1 and region_def == 'ar6':\n min_lon = -180\n elif n == 28 and region_def == 'ar6':\n min_lon = 40.0 - buffer_size\n subdomains[n] = (min_lon, min_lat, max_lon, max_lat)\n\n return subdomains, mask\n\n\ndef combine_outputs(\n ds_dict: Dict[Union[float, int], xr.Dataset],\n mask: xr.DataArray,\n) -> xr.Dataset:\n \"\"\"\n Combines values in ds_dict according to mask. Mask should be a 2D dataarray with lat/lon as the dimensions. The values in mask should\n correspond to the keys in ds_dict.\n\n Parameters\n ----------\n ds_dict : dict\n Dictionary mapping subdomain code to output (subdomain code : xr.Dataset)\n mask : xarray.DataArray\n Mask of which subdomain code to use for each grid cell (2D, lat/lon)\n\n Returns\n --------\n ds : xarray.Dataset\n The combined output where values come from the respective ds in ds_dict according to mask\n \"\"\"\n # compare the region codes in ds_dict and in mask to make sure that they match with each other\n region_codes_available = list(ds_dict.keys())\n region_codes = np.unique(mask.values)\n region_codes = region_codes[np.isfinite(region_codes)]\n for code in region_codes:\n assert code in region_codes_available\n\n # construct the output\n out = xr.Dataset()\n template = ds_dict[region_codes_available[0]]\n for v in template.data_vars:\n out[v] = xr.DataArray(\n np.nan,\n dims=template.dims,\n coords={'time': template.time, 'lat': mask.lat, 'lon': mask.lon},\n )\n for code in region_codes:\n # this is the values in ds_dict, which contains the information for each region separately\n single_region_output = ds_dict[code][v]\n single_region_output = single_region_output.reindex({'lat': mask.lat, 'lon': mask.lon})\n out[v] = xr.where(mask == code, single_region_output, out[v])\n\n return out\n" ]
[ [ "numpy.isfinite", "numpy.unique" ] ]
ajits-github/yolov4
[ "31d833983286d2da942226b589140fc69bc79ba2" ]
[ "tool/tv_reference/coco_utils.py" ]
[ "import copy\nimport os\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data\nimport torchvision\n\nfrom pycocotools import mask as coco_mask\nfrom pycocotools.coco import COCO\n\nfrom . import transforms as T\n\n\nclass FilterAndRemapCocoCategories(object):\n def __init__(self, categories, remap=True):\n self.categories = categories\n self.remap = remap\n\n def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image, target):\n w, h = image.size\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n boxes = boxes[keep]\n classes = classes[keep]\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(dataset, cat_list=None):\n def _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n def _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n min_keypoints_per_image = 10\n\n def _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if cat_list:\n anno = [obj for obj in anno if obj[\"category_id\"] in cat_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds, bbox_fmt='voc'):\n \"\"\"\n \"\"\"\n print(\"in function convert_to_coco_api...\")\n coco_ds = COCO()\n # annotation IDs need to start at 1, not 0, see torchvision issue #1530\n ann_id = 1\n dataset = {'images': [], 'categories': [], 'annotations': []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n img_dict = {}\n img_dict['id'] = image_id\n img_dict['height'] = img.shape[-2]\n img_dict['width'] = img.shape[-1]\n dataset['images'].append(img_dict)\n bboxes = targets[\"boxes\"]\n # to coco format: xmin, ymin, w, h\n if bbox_fmt.lower() == \"voc\": # xmin, ymin, xmax, ymax\n bboxes[:, 2:] -= bboxes[:, :2]\n elif bbox_fmt.lower() == \"yolo\": # xcen, ycen, w, h\n bboxes[:, :2] = bboxes[:, :2] - bboxes[:, 2:]/2\n elif bbox_fmt.lower() == \"coco\":\n pass\n else:\n raise ValueError(f\"bounding box format {bbox_fmt} not supported!\")\n bboxes = bboxes.tolist()\n labels = targets['labels'].tolist()\n areas = targets['area'].tolist()\n iscrowd = targets['iscrowd'].tolist()\n if 'masks' in targets:\n masks = targets['masks']\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if 'keypoints' in targets:\n keypoints = targets['keypoints']\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = {}\n ann['image_id'] = image_id\n ann['bbox'] = bboxes[i]\n ann['category_id'] = labels[i]\n categories.add(labels[i])\n ann['area'] = areas[i]\n ann['iscrowd'] = iscrowd[i]\n ann['id'] = ann_id\n if 'masks' in targets:\n ann[\"segmentation\"] = coco_mask.encode(masks[i].numpy())\n if 'keypoints' in targets:\n ann['keypoints'] = keypoints[i]\n ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])\n dataset['annotations'].append(ann)\n ann_id += 1\n dataset['categories'] = [{'id': i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n # print(\"coco_ds============================\",coco_ds.dataset)\n return coco_ds\n\n\ndef get_coco_api_from_dataset(dataset):\n for _ in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n target = dict(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n\ndef get_coco(root, image_set, transforms, mode='instances'):\n anno_file_template = \"{}_{}2017.json\"\n PATHS = {\n \"train\": (\"train2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"train\"))),\n \"val\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\"))),\n # \"train\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\")))\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = T.Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n img_folder = os.path.join(root, img_folder)\n ann_file = os.path.join(root, ann_file)\n\n dataset = CocoDetection(img_folder, ann_file, transforms=transforms)\n\n if image_set == \"train\":\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n return get_coco(root, image_set, transforms, mode=\"person_keypoints\")\n" ]
[ [ "torch.zeros", "torch.tensor", "torch.as_tensor", "torch.utils.data.Subset", "torch.stack" ] ]
eisenhauer/ADIOS2-Examples
[ "15505deab8f61f395d530ae9b66e24d65f6d97ca" ]
[ "source/cpp/gray-scott/plot/gsplot.py" ]
[ "#!/usr/bin/env python3\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport adios2\nimport argparse\nfrom mpi4py import MPI\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport decomp\nimport time\nimport os\n\n\ndef SetupArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--instream\", \"-i\", help=\"Name of the input stream\", required=True)\n parser.add_argument(\"--outfile\", \"-o\", help=\"Name of the output file\", default=\"screen\")\n parser.add_argument(\"--varname\", \"-v\", help=\"Name of variable read\", default=\"U\")\n parser.add_argument(\"--nompi\", \"-nompi\", help=\"ADIOS was installed without MPI\", action=\"store_true\")\n parser.add_argument(\"--displaysec\", \"-dsec\", help=\"Float representing gap between plot window refresh\", default=0.1)\n parser.add_argument(\"--nx\", \"-nx\", help=\"Integer representing process decomposition in the x direction\",default=1)\n parser.add_argument(\"--ny\", \"-ny\", help=\"Integer representing process decomposition in the y direction\",default=1)\n parser.add_argument(\"--nz\", \"-nz\", help=\"Integer representing process decomposition in the z direction\",default=1)\n parser.add_argument(\"--plane\", \"-plane\", help=\"The 2D plane to be displayed/stored xy/yz/xz/all\", default='yz')\n args = parser.parse_args()\n\n args.displaysec = float(args.displaysec)\n args.nx = int(args.nx)\n args.ny = int(args.ny)\n args.nz = int(args.nz)\n\n if args.plane not in ('xz', 'yz', 'xy', 'all'):\n raise \"Input argument --plane must be one of xz/yz/xy/all\"\n\n return args\n\n\ndef Plot2D(plane_direction, data, args, fullshape, step, fontsize):\n # Plotting part\n displaysec = args.displaysec\n gs = gridspec.GridSpec(1, 1)\n fig = plt.figure(1, figsize=(8,8))\n ax = fig.add_subplot(gs[0, 0])\n colorax = ax.imshow(data, origin='lower', interpolation='quadric',extent=[0, fullshape[1], 0, fullshape[0]], cmap=plt.get_cmap('gist_ncar'))\n cbar = fig.colorbar(colorax, orientation='horizontal')\n cbar.ax.tick_params(labelsize=fontsize-4)\n\n for i in range(args.ny):\n y = fullshape[0] / args.ny * i\n ax.plot([0, fullshape[1]], [y, y], color='black')\n\n for i in range(args.nx):\n x = fullshape[1] / args.nx * i\n ax.plot([x, x], [0, fullshape[0]], color='black')\n\n ax.set_title(\"{0}, {1} plane, step {2}\".format(args.varname, plane_direction, step), fontsize=fontsize)\n ax.set_xlabel(plane_direction[0], fontsize=fontsize)\n ax.set_ylabel(plane_direction[1], fontsize=fontsize)\n plt.tick_params(labelsize = fontsize-8)\n plt.ion()\n if (args.outfile == \"screen\"):\n plt.show()\n plt.pause(displaysec)\n elif args.outfile.endswith(\".bp\"):\n if step == 0:\n global adios\n global ioWriter\n global var\n global writer\n adios = adios2.ADIOS(mpi.comm_app)\n ioWriter = adios.DeclareIO(\"VizOutput\")\n var = ioWriter.DefineVariable(args.varname, data.shape, [0,0], data.shape, adios2.ConstantDims, data)\n writer = ioWriter.Open(args.outfile, adios2.Mode.Write)\n\n writer.BeginStep()\n writer.Put(var, data, adios2.Mode.Sync)\n writer.EndStep()\n else:\n imgfile = args.outfile+\"{0:0>5}\".format(step)+\"_\" + plane_direction + \".png\"\n fig.savefig(imgfile)\n\n plt.clf()\n\n\ndef read_data(args, fr, start_coord, size_dims):\n\n var1 = args.varname\n data= fr.read(var1, start_coord, size_dims )\n data = np.squeeze(data)\n return data\n\n\nif __name__ == \"__main__\":\n # fontsize on plot\n fontsize = 24\n\n args = SetupArgs()\n# print(args)\n\n # Setup up 2D communicators if MPI is installed\n mpi = decomp.MPISetup(args, 3)\n myrank = mpi.rank['app']\n\n # Read the data from this object\n fr = adios2.open(args.instream, \"r\", mpi.comm_app,\"adios2.xml\", \"SimulationOutput\")\n# vars_info = fr.availablevariables()\n\n\n # Get the ADIOS selections -- equally partition the data if parallelization is requested\n\n\n # Read through the steps, one at a time\n plot_step = 0\n for fr_step in fr:\n# if fr_step.current_step()\n start, size, fullshape = mpi.Partition_3D_3D(fr, args)\n cur_step= fr_step.current_step()\n vars_info = fr.available_variables()\n# print (vars_info)\n shape3_str = vars_info[args.varname][\"Shape\"].split(',')\n shape3 = list(map(int,shape3_str))\n sim_step = fr_step.read(\"step\")\n\n if myrank == 0:\n print(\"GS Plot step {0} processing simulation output step {1} or computation step {2}\".format(plot_step,cur_step, sim_step), flush=True)\n# if cur_step == 0:\n# print(\"Variable\" + pdfvar + \" shape is {\" + vars_info[pdfvar][\"Shape\"]+\"}\")\n\n if args.plane in ('xy', 'all'):\n data = read_data (args, fr_step, [0,0,int(shape3[2]/2)], [shape3[0],shape3[1],1])\n Plot2D ('xy', data, args, fullshape, sim_step, fontsize)\n\n if args.plane in ('xz', 'all'):\n data = read_data (args, fr_step, [0,int(shape3[1]/2),0], [shape3[0],1,shape3[2]])\n Plot2D ('xz', data, args, fullshape, sim_step, fontsize)\n\n if args.plane in ('yz', 'all'):\n data = read_data (args, fr_step, [int(shape3[0]/2),0,0], [1,shape3[1],shape3[2]])\n Plot2D ('yz', data, args, fullshape, sim_step, fontsize)\n plot_step = plot_step + 1;\n\n fr.close()\n\n" ]
[ [ "numpy.squeeze", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.clf", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.ion", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.pause", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
whutddk/AlphaZero_Quoridor
[ "52744b64f643ed6c01adad86adea29d3d2b14177" ]
[ "quoridor.py" ]
[ "import numpy as np\nfrom queue import Queue\nimport time\n\nclass Quoridor(object):\n\tHORIZONTAL = 1\n\tVERTICAL = -1\n\n\tdef __init__(self, safe=False):\n\t\tself.safe = safe\n\n\t\t#self.action_space = 140 # 140 64+64 + 4 + 8possible actions in total \n\t\t\n\t\tself.action_space = 84 # 84 36+36 + 4 + 8 possible actions in total \n\t\tself.n_players = 2\n\t\tself.players = [1, 2] # 两个玩家\n\t\tself.reset()\n\n\t# 待改\n\tdef load(self, p1, p2):\n\t\tself.player1 = p1\n\t\tself.player2 = p2\n\n\t# 获取当前玩家\n\tdef get_current_player(self):\n\t\treturn self.current_player\n\n\tdef reset(self):\n\t\tself.current_player = 1\n\t\tself.last_player = -1\n\n\t\t# Initialize Tiles\n\t\t# self.tiles = np.zeros(81) #9*9 #瓷砖(棋盘板格)\n\t\tself.tiles = np.zeros(49) #7*7\n\n\t\t# Initialize Player Locations\n\t\tself._positions = {\t\t\t\t\t#起始点\n\t\t\t1: 3, # 这里要改\n\t\t\t2: 45 # 这里要改\n\t\t}\n\n\t\tself._DIRECTIONS = {\n\t\t\t'N': 0, 'S': 1, 'E': 2, 'W': 3,\n\t\t\t'NN': 4, 'SS': 5, 'EE': 6, 'WW': 7,\n\t\t\t'NE': 8, 'NW': 9, 'SE': 10, 'SW': 11\n\t\t}\n\t\t# self.N_DIRECTIONS = 12\n\t\t# self.N_TILES = 81 \n\t\t# self.N_ROWS = 9 \n\t\t# self.N_INTERSECTIONS = 64 \n\t\tself.N_DIRECTIONS = 12\t\t\t# 可以行动的选择总数\n\t\tself.N_TILES = 49 \t# 瓷砖格子总数 7*7\n\t\tself.N_ROWS = 7 # 行数\n\t\tself.N_INTERSECTIONS = 36 # 可以插入的挡板数组 6*6 十字路口\n\n\t\t# There are 36 possible intersection\n\t\t# Horizontal Walls - 1\n\t\t# No Wall - 0\n\t\t# Vertical Wall - -1\n\t\tself._intersections = np.zeros(36)\t\t# 初始化挡板数组(四个棋子格为一个点),水平置1 ,垂直为-1\n\n\t\tself._player1_walls_remaining = 10 # 这里要改\t到底有几块挡板?\n\t\tself._player2_walls_remaining = 10 # 这里要改\n\n\tdef state(self):\n\t\t\"\"\"Returns a set of 7x7 planes that represent the game state.\n\t\t1. The current player position 当前玩家位置\n\t\t2. The opponent position 对手位置\n\t\t3. Vertical Walls 垂直挡板\n\t\t4. Horizontal Walls 水平挡板\n\t\t5 - 14. Number of walls remaining for current player 当前玩家剩余挡板数\n\t\t15 - 24. Number of walls remaining for opponent 对手剩余挡板数\n\t\t25. Whose turn it is (0 for player 1, 1 for player 2) 当前谁下\n\t\t\"\"\"\n\t\tplayer1_position_plane = self.tiles.copy()\n\t\tplayer1_position_plane[self._positions[1]] = 1\n\t\tplayer1_position_plane = player1_position_plane.reshape([7, 7]) \n\n\t\tplayer2_position_plane = self.tiles.copy()\n\t\tplayer2_position_plane[self._positions[2]] = 1\n\t\tplayer2_position_plane = player2_position_plane.reshape([7, 7]) \n\n\t\tplayer1_walls_plane = np.zeros([10, 7, 7]) \n\t\tplayer2_walls_plane = np.zeros([10, 7, 7]) \n\n\t\tplayer1_walls_plane[self._player1_walls_remaining - 1, :, :] = 1\n\t\tplayer2_walls_plane[self._player2_walls_remaining - 1, :, :] = 1\n\n\t\t# Set the wall planes\n\t\tvertical_walls = np.pad(\n\t\t\tnp.int8(self._intersections == -1).reshape([6, 6]), \n\t\t\t(0, 1),\n\t\t\tmode='constant',\n\t\t\tconstant_values=0\n\t\t)\n\n\t\thorizontal_walls = np.pad(\n\t\t\tnp.int8(self._intersections == 1).reshape([6, 6]), \n\t\t\t(0, 1),\n\t\t\tmode='constant',\n\t\t\tconstant_values=0\n\t\t)\n\n\t\tno_walls = np.pad(\n\t\t\tnp.int8(self._intersections == 0).reshape([6, 6]), \n\t\t\t(0, 1),\n\t\t\tmode='constant',\n\t\t\tconstant_values=0\n\t\t)\n\n\t\t# 不同玩家调整平面顺序\n\t\tif self.current_player == 1:\n\t\t\tstate = np.stack([\n\t\t\t\tno_walls,\n\t\t\t\tvertical_walls,\n\t\t\t\thorizontal_walls,\n\t\t\t\tplayer1_position_plane,\n\t\t\t\tplayer2_position_plane,\n\t\t\t])\n\n\t\t\t# print('Shape is {shape}'.format(shape=state.shape))\n\n\t\t\tcurrent_player_plane = np.zeros([1, 7, 7]) \n\t\t\tstate = np.vstack([state, player1_walls_plane, player2_walls_plane, current_player_plane])\n\n\t\tif self.current_player == 2:\n\t\t\tstate = np.stack([\n\t\t\t\tno_walls,\n\t\t\t\tvertical_walls,\n\t\t\t\thorizontal_walls,\n\t\t\t\tplayer2_position_plane,\n\t\t\t\tplayer1_position_plane,\n\t\t\t])\n\n\t\t\tcurrent_player_plane = np.ones([1, 7, 7]) \n\t\t\tstate = np.vstack([state, player2_walls_plane, player1_walls_plane, current_player_plane])\n\t\t\t# print(state.shape)\n\t\treturn state\n\n\tdef load_state(self, state):\n\t\t\"\"\"Mutates the Quoridor object to match a given state\"\"\"\n\t\tcurrent_player = state[-1] == np.zeros([7, 7]) \n\t\t# TODO: Implement the rest of this\n\n\tdef actions(self):\n\t\tplayer = self.current_player\n\t\tlocation = self._positions[player]\n\n\t\topponent = 1 if player == 2 else 2\n\t\topponent_loc = self._positions[opponent]\n\t\twalls = self._intersections # 长36一维数组\n\t\t# 获得合法棋子动作空间\n\t\tpawn_actions = self._valid_pawn_actions(location=location,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_loc=opponent_loc, walls=walls, player=player)\n\t\t# 如果当前为玩家1并且还有挡板,或者玩家为2,并且还有挡板,则获取挡板的合法动作空间\n\t\tif ((self.current_player == 1 and self._player1_walls_remaining > 0)\n\t\t\tor (self.current_player == 2 and self._player2_walls_remaining > 0)):\n\t\t\twall_actions = self._valid_wall_actions() # 获得合法挡板动作空间\n\n\t\t\t# 调整+12 因为前12个是棋子动作 4+8\n\t\t\twall_actions = [action + 12 for action in wall_actions]\n\t\telse:\n\t\t\twall_actions = []\n\t\treturn pawn_actions + wall_actions\n\n\tdef step(self, action):\n\t\t\"\"\"Take a step in the environment given the current action\"\"\"\n\t\t# self._logger.info(\"Player {player} chooses action {action}\".format(player=self.current_player, action=action))\n\t\tplayer = self.current_player\n\t\tdone = False\n\t\t# 添加\n\t\tself.valid_actions = self.actions()\n\n\t\tif self.safe:\n\t\t\tif not action in self.valid_actions:\n\t\t\t\traise ValueError(\"Invalid Action: {action}\".format(action=action))\n\n\t\tif action < 12:\n\t\t\tself._handle_pawn_action(action, player)\n\t\telse:\n\t\t\tself._handle_wall_action(action - 12)\n\n\t\tgame_over, winner = self.has_a_winner()\n\t\tif game_over:\n\t\t\tprint(\"game over !winner is player\" + str(winner))\n\t\t\tdone = True\n\t\telse:\n\t\t\tself.rotate_players() # 切换玩家\n\t\t\t# observation = self.get_state # get_state未实现\n\t\t\t# observation = self.state()\n\t\t\t# print(\"game over !winner is player\" + str(winner))\n\n\t\treturn done ,winner\n\n\t# 判断游戏是否结束\n\tdef game_end(self):\n\t\tpass\n\n\t# 判断是否有胜者\n\tdef has_a_winner(self):\n\t\tgame_over = False\n\t\twinner = None\n\t\tif self._positions[2] < 7: \n\t\t\twinner = 2\n\t\t\tgame_over = True\n\t\telif self._positions[1] > 41: \n\t\t\twinner = 1\n\t\t\tgame_over = True\n\t\treturn game_over, winner\n\n\t# 获取奖励\n\tdef _get_rewards(self):\n\t\tdone = True\n\t\tif self._positions[2] < 7: \n\t\t\trewards, done = (1, -1)\n\t\telif self._positions[1] > 41: \n\t\t\trewards = (-1, 1)\n\t\telse:\n\t\t\trewards = (0, 0)\n\t\t\tdone = False\n\t\treturn rewards, done\n\n\t# 处理棋子动作\n\tdef _handle_pawn_action(self, action, player):\n\t\tif action == self._DIRECTIONS['N']:\n\t\t\tself._positions[player] += 7 \n\t\telif action == self._DIRECTIONS['S']:\n\t\t\tself._positions[player] -= 7 \n\t\telif action == self._DIRECTIONS['E']:\n\t\t\tself._positions[player] += 1 \n\t\telif action == self._DIRECTIONS['W']:\n\t\t\tself._positions[player] -= 1 \n\t\telif action == self._DIRECTIONS['NN']:\n\t\t\tself._positions[player] += 14 \n\t\telif action == self._DIRECTIONS['SS']:\n\t\t\tself._positions[player] -= 14 \n\t\telif action == self._DIRECTIONS['EE']:\n\t\t\tself._positions[player] += 2 \n\t\telif action == self._DIRECTIONS['WW']:\n\t\t\tself._positions[player] -= 2 \n\t\telif action == self._DIRECTIONS['NW']:\n\t\t\tself._positions[player] += 6 \n\t\telif action == self._DIRECTIONS['NE']:\n\t\t\tself._positions[player] += 8 \n\t\telif action == self._DIRECTIONS['SW']:\n\t\t\tself._positions[player] -= 8 \n\t\telif action == self._DIRECTIONS['SE']:\n\t\t\tself._positions[player] -= 6 \n\t\telse:\n\t\t\traise ValueError(\"Invalid Pawn Action: {action}\".format(action=action))\n\n\t# 处理挡板动作\n\tdef _handle_wall_action(self, action):\n\t\t# Action values less than 36 are horizontal walls\n\t\tif action < 36: \n\t\t\tself._intersections[action] = 1\n\t\t# Action values above 36 are vertical walls\n\t\telse: \n\t\t\tself._intersections[action - 36] = -1\n\n\t\tif self.current_player == 1:\n\t\t\tself._player1_walls_remaining -= 1\n\t\telse:\n\t\t\tself._player2_walls_remaining -= 1\n\t\t# self._logger.info(self._intersections)\n\n\tdef rotate_players(self):\n\t\t\"\"\"Switch the player turn\"\"\"\n\t\t# self._logger.debug(\"Rotating Player\")\n\t\tif self.current_player == 1:\n\t\t\tself.current_player = 2\n\t\t\tself.last_player = 1\n\n\t\telse:\n\t\t\tself.current_player = 1\n\t\t\tself.last_player = 2\n\n\t# walls :长36一维数组 location:int 0-48\n\tdef _valid_pawn_actions(self, walls, location, opponent_loc, player=1):\n\t\tHORIZONTAL = 1\n\t\tVERTICAL = -1\n\n\t\tvalid = []\n\t\t# 判断对面棋子是否相邻 \n\t\t# opponent_loc为对手位置 location 为自己位置 \n\t\topponent_north = location == opponent_loc - 7\n\t\topponent_south = location == opponent_loc + 7\n\t\topponent_east = location == opponent_loc - 1\n\t\topponent_west = location == opponent_loc + 1\n\n\t\tcurrent_row = location // self.N_ROWS\n\n\t\tintersections = self._get_intersections(walls, location)\n\t\t# 判断北面没有水平挡板和对面棋子\n\t\tn = intersections['NW'] != HORIZONTAL and intersections['NE'] != HORIZONTAL and not opponent_north\n\t\t# 判断南面没有水平挡板和对面棋子\n\t\ts = intersections['SW'] != HORIZONTAL and intersections['SE'] != HORIZONTAL and not opponent_south\n\t\t# 判断东面没有竖直挡板和对面棋子\n\t\te = intersections['NE'] != VERTICAL and intersections['SE'] != VERTICAL and not opponent_east\n\t\t# 判断西面没有竖直挡板和对面棋子\n\t\tw = intersections['NW'] != VERTICAL and intersections['SW'] != VERTICAL and not opponent_west\n\t\t# 向北走,两种情况:1,按照上面的判断可走 2,虽到边界但是再走可以获胜\n\t\tif n: valid.append(self._DIRECTIONS['N'])\n\t\t# 同理\n\t\tif s: valid.append(self._DIRECTIONS['S'])\n\t\tif e: valid.append(self._DIRECTIONS['E'])\n\t\tif w: valid.append(self._DIRECTIONS['W'])\n\t\t# 如果北面有对手棋子并且北面没有水平挡板\n\t\tif opponent_north and intersections['NE'] != HORIZONTAL and intersections['NW'] != HORIZONTAL:\n\t\t\t# 获取对手周围的挡板信息\n\t\t\tn_intersections = self._get_intersections(walls, opponent_loc)\n\t\t\t# 如果对手北面没有水平挡板,或者 玩家1在第八行,也就是倒数第二行\n\t\t\tif n_intersections['NW'] != HORIZONTAL and n_intersections['NE'] != HORIZONTAL:\n\t\t\t\t# 可以走向北两步,也就是NN\n\t\t\t\tvalid.append(self._DIRECTIONS['NN'])\n\t\t\t# 如果对手东-北面没有竖直挡板,并且自己东-北面没有竖直挡板,可以走两步NE\n\t\t\tif n_intersections['NE'] != VERTICAL and intersections['NE'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['NE'])\n\n\t\t\tif n_intersections['NW'] != VERTICAL and intersections['NW'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['NW'])\n\n\n\t\telif opponent_south and intersections['SE'] != HORIZONTAL and intersections['SW'] != HORIZONTAL:\n\t\t\ts_intersections = self._get_intersections(walls, opponent_loc)\n\t\t\tif s_intersections['SW'] != HORIZONTAL and s_intersections['SE'] != HORIZONTAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['SS'])\n\n\t\t\tif s_intersections['SE'] != VERTICAL and intersections['SE'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['SE'])\n\n\t\t\tif s_intersections['SW'] != VERTICAL and intersections['SW'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['SW'])\n\n\n\t\telif opponent_east and intersections['SE'] != VERTICAL and intersections['NE'] != VERTICAL:\n\t\t\te_intersections = self._get_intersections(walls, opponent_loc)\n\t\t\tif e_intersections['SE'] != VERTICAL and e_intersections['NE'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['EE'])\n\n\t\t\tif e_intersections['NE'] != HORIZONTAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['NE'])\n\n\t\t\tif e_intersections['SE'] != HORIZONTAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['SE'])\n\n\n\t\telif opponent_west and intersections['SW'] != VERTICAL and intersections['NW'] != VERTICAL:\n\t\t\tw_intersections = self._get_intersections(walls, opponent_loc)\n\t\t\tif w_intersections['NW'] != VERTICAL and w_intersections['SW'] != VERTICAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['WW'])\n\n\t\t\tif w_intersections['NW'] != HORIZONTAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['NW'])\n\n\t\t\tif w_intersections['SW'] != HORIZONTAL:\n\t\t\t\tvalid.append(self._DIRECTIONS['SW'])\n\n\t\treturn valid\n\n\t# intersections: 一维数组 长36 current_tile:当前位置,int 0-48\n\t# 给定棋子位置,判断其东北 西北 东南 西南有没有挡板,边界也算(0,-1,1)\n\tdef _get_intersections(self, intersections, current_tile):\n\t\t\"\"\"Gets the four intersections for a given tile.\"\"\"\n\t\tlocation_row = current_tile // self.N_ROWS\n\t\t# 判断棋子是否在四周边界\n\t\tn_border = current_tile > 41 # 这里要改\n\t\te_border = current_tile % 7 == 6\n\t\ts_border = current_tile < 7 # 这里要改\n\t\tw_border = current_tile % 7 == 0\n\n\t\tif n_border:\t\t#在北棋盘边界上\n\t\t\tne_intersection = 1\t\t\t# 天然边界\n\t\t\tif w_border:\n\t\t\t\tnw_intersection = -1\n\t\t\t\tsw_intersection = -1\n\t\t\t\tse_intersection = intersections[(current_tile - 7) - (location_row - 1)]\n\t\t\telif e_border:\n\t\t\t\tnw_intersection = 1\n\t\t\t\tse_intersection = -1\n\t\t\t\tsw_intersection = intersections[(current_tile - 7) - (location_row - 1) - 1]\n\t\t\telse:\n\t\t\t\tnw_intersection = 1\n\t\t\t\tsw_intersection = intersections[(current_tile - 7) - (location_row - 1) - 1]\n\t\t\t\tse_intersection = intersections[(current_tile - 7) - (location_row - 1)]\n\t\telif s_border:\n\t\t\tsw_intersection = 1\n\t\t\tif w_border:\n\t\t\t\tnw_intersection = -1\n\t\t\t\tse_intersection = 1\n\t\t\t\tne_intersection = intersections[current_tile - location_row]\n\t\t\telif e_border:\n\t\t\t\tse_intersection = -1\n\t\t\t\tne_intersection = -1\n\t\t\t\tnw_intersection = intersections[current_tile - location_row - 1]\n\t\t\telse:\n\t\t\t\tse_intersection = 1\n\t\t\t\tne_intersection = intersections[current_tile - location_row]\n\t\t\t\tnw_intersection = intersections[current_tile - location_row - 1]\n\n\n\t\t# West but not north or south\n\t\telif w_border:\n\t\t\tnw_intersection = -1\n\t\t\tsw_intersection = -1\n\t\t\tne_intersection = intersections[current_tile - location_row]\n\t\t\tse_intersection = intersections[(current_tile - 7) - (location_row - 1)]\n\n\t\telif e_border:\n\t\t\tne_intersection = -1\n\t\t\tse_intersection = -1\n\t\t\tnw_intersection = intersections[current_tile - location_row - 1]\n\t\t\tsw_intersection = intersections[(current_tile - 7) - (location_row - 1) - 1]\n\n\t\t# No borders\n\t\telse:\n\t\t\tne_intersection = intersections[current_tile - location_row]\n\t\t\tnw_intersection = intersections[current_tile - location_row - 1]\n\t\t\tsw_intersection = intersections[(current_tile - 7) - (location_row - 1) - 1]\n\t\t\tse_intersection = intersections[(current_tile - 7) - (location_row - 1)]\n\n\t\treturn {'NW': nw_intersection,\n\t\t\t\t'NE': ne_intersection,\n\t\t\t\t'SE': se_intersection,\n\t\t\t\t'SW': sw_intersection}\n\n\tdef _valid_wall_actions(self):\n\t\tvalid = []\n\t\t# If\n\t\tfor ix in range(self._intersections.size):\n\t\t\tif self._validate_horizontal(ix):\n\t\t\t\tvalid.append(ix)\n\n\t\t\tif self._validate_vertical(ix):\n\t\t\t\tvalid.append(ix + 36)\n\n\t\treturn valid\n\n\tdef _validate_horizontal(self, ix):\t\t#根据旁边的挡板判断当前十字路口能否放水平挡板\n\t\tcolumn = ix % 6\n\n\t\tif self._intersections[ix] != 0:\n\t\t\treturn False\n\n\t\tif column != 0:\n\t\t\tif self._intersections[ix - 1] == 1:\n\t\t\t\treturn False\n\n\t\tif column != 5:\n\t\t\tif self._intersections[ix + 1] == 1:\n\t\t\t\treturn False\n\n\t\treturn not self._blocks_path(ix, self.HORIZONTAL)\n\n\tdef _validate_vertical(self, ix):\n\t\trow = ix // 6\n\t\tif self._intersections[ix] != 0:\n\t\t\treturn False\n\n\t\tif row != 0:\n\t\t\tif self._intersections[ix - 6] == -1:\n\t\t\t\treturn False\n\n\t\tif row != 5:\n\t\t\tif self._intersections[ix + 6] == -1:\n\t\t\t\treturn False\n\n\t\treturn not self._blocks_path(ix, self.VERTICAL)\t# 困死路径\n\n\tdef _blocks_path(self, wall_location, orientation):\n\t\tplayer1_target = 6\n\t\tplayer2_target = 0\n\n\t\tplayer1_position = self._positions[1]\n\t\tplayer2_position = self._positions[2]\n\n\t\tintersections = self._intersections.copy()\n\t\tintersections[wall_location] = orientation\n\n\t\t# BFS to target row\n\t\tplayer1_valid = self._bfs_to_goal(intersections, player1_target, player1_position, player2_position, player=1)\n\t\tplayer2_valid = self._bfs_to_goal(intersections, player2_target, player2_position, player1_position, player=2)\n\n\t\treturn not (player1_valid and player2_valid)\n\n\tdef _bfs_to_goal(self, intersections, target_row, player_position, opponent_position, player=1):\n\t\tvisited = []\n\t\tinvalid_rows = [7, -1]\n\t\tvisit_queue = Queue()\n\t\tvisit_queue.put(player_position)\n\t\ttarget_visited = False\n\n\t\twhile not target_visited and not visit_queue.empty():\n\t\t\tcurrent_position = visit_queue.get()\n\t\t\tvalid_directions = self._valid_pawn_actions(intersections,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocation=current_position,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\topponent_loc=opponent_position,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplayer=player)\n\t\t\tfor direction in valid_directions:\n\t\t\t\tif direction == self._DIRECTIONS['N']:\n\t\t\t\t\tnew_position = current_position + 7\n\t\t\t\telif direction == self._DIRECTIONS['S']:\n\t\t\t\t\tnew_position = current_position - 7\n\t\t\t\telif direction == self._DIRECTIONS['E']:\n\t\t\t\t\tnew_position = current_position + 1\n\t\t\t\telif direction == self._DIRECTIONS['W']:\n\t\t\t\t\tnew_position = current_position - 1\n\t\t\t\telif direction == self._DIRECTIONS['NN']:\n\t\t\t\t\tnew_position = current_position + 14\n\t\t\t\telif direction == self._DIRECTIONS['SS']:\n\t\t\t\t\tnew_position = current_position - 14\n\t\t\t\telif direction == self._DIRECTIONS['EE']:\n\t\t\t\t\tnew_position = current_position + 2\n\t\t\t\telif direction == self._DIRECTIONS['WW']:\n\t\t\t\t\tnew_position = current_position - 2\n\t\t\t\telif direction == self._DIRECTIONS['NE']:\n\t\t\t\t\tnew_position = current_position + 8\n\t\t\t\telif direction == self._DIRECTIONS['NW']:\n\t\t\t\t\tnew_position = current_position + 6\n\t\t\t\telif direction == self._DIRECTIONS['SW']:\n\t\t\t\t\tnew_position = current_position - 8\n\t\t\t\telif direction == self._DIRECTIONS['SE']:\n\t\t\t\t\tnew_position = current_position - 6\n\t\t\t\telse:\n\t\t\t\t\traise ValueError('Invalid direction - should never happen')\n\n\t\t\t\tnew_row = new_position // self.N_ROWS\n\t\t\t\tif new_row == target_row:\n\t\t\t\t\ttarget_visited = True\n\t\t\t\telif new_position not in visited:\n\t\t\t\t\tvisited.append(new_position)\n\t\t\t\t\tif new_row not in invalid_rows:\n\t\t\t\t\t\tvisit_queue.put(new_position)\n\n\t\treturn target_visited\n\n\tdef add_wall(self, wall, orientation):\n\t\tself._intersections[wall] = orientation\n\n\tdef print_board(self):\n\t\tplayer1_row = self._positions[1] // 7\n\t\tplayer1_col = self._positions[1] % 7\n\t\tplayer2_row = self._positions[2] // 7\n\t\tplayer2_col = self._positions[2] % 7\n\n\t\tx = 'X'\n\t\to = 'O'\n\n\t\tv = 'v'\n\t\th = 'h'\n\t\tdash = '-'\n\t\tnone = ''\n\n\t\tgrid = [['{dash:4}'.format(dash=dash) for i in range(7)] for i in range(7)]\n\t\ti_reshaped = self._intersections.reshape([6, 6])\n\n\t\tgrid[player1_row][player1_col] = '{x:4}'.format(x=x)\n\t\tgrid[player2_row][player2_col] = '{o:4}'.format(o=o)\n\n\t\tintersection_row = 5\n\t\tfor i in range(6, -1, -1):\n\t\t\tfor j in range(7):\n\t\t\t\tprint(grid[i][j], end='')\n\t\t\tprint()\n\t\t\tif intersection_row >= 0:\n\t\t\t\tprint('{none:2}'.format(none=none), end='')\n\t\t\t\tfor j in i_reshaped[intersection_row, :]:\n\t\t\t\t\tif j == 1:\n\t\t\t\t\t\tprint('{h:4}'.format(h=h), end='')\n\t\t\t\t\telif j == -1:\n\t\t\t\t\t\tprint('{v:4}'.format(v=v), end='')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('{none:4}'.format(none=none), end='')\n\t\t\t\tintersection_row -= 1\n\t\t\t\tprint()\n\n\tdef clone(self):\n\t\treturn Quoridor()\n\t# 自博弈一次\n\tdef start_self_play(self, player, is_shown=0, temp=1e-3):\n\t\t\"\"\"\n\t\t 开始自博弈,也就是蒙特卡洛树搜索和蒙特卡洛树搜索之间的对抗。\n\t\t 主要是为了产生数据集,训练神经网络,保存的数据形式:(state, mcts_probs, z)\n\t\t\"\"\"\n\t\tself.reset() # 初始化棋盘\n\t\tp1, p2 = self.players\n\t\tstates, mcts_probs, current_players = [], [], [] # 初始化需要保存的信息,胜负情况要在模拟结束时保存\n\n\t\twhile(1): # 循环进行自博弈\n\t\t\t# 待修改\n\t\t\ttic = time.time()\n\t\t\tmove, move_probs = player.choose_action(self, temp=temp, return_prob=1) # 获取落子以及概率\n\t\t\ttoc = time.time()\n\t\t\t# print(\"player %s chosed move : %s ,prob: %.3f spend: %.2f seconds\" % (self.current_player, move, move_probs[move], (toc-tic)))\n\t\t\t# 保存数据\n\t\t\tstates.append(self.state())\n\t\t\tmcts_probs.append(move_probs)\n\t\t\tcurrent_players.append(self.current_player)\n\t\t\t# 进行落子\n\t\t\tself.step(move)\n\t\t\t# if is_shown:\n\t\t\t# self.graphic(self.board, p1, p2)\n\t\t\tend, winner = self.has_a_winner()\n\t\t\tif end:\n\t\t\t\t# 判断游戏是否结束 ,始终以当前玩家视角保存数据\n\t\t\t\twinners_z = np.zeros(len(current_players))\n\t\t\t\tif winner != -1:\n\t\t\t\t\twinners_z[np.array(current_players) == winner] = 1.0 # 当前玩家的所有落子的z都设为1\n\t\t\t\t\twinners_z[np.array(current_players) != winner] = -1.0 # 对手玩家的所有落子的z都设为-1\n\t\t\t\t# 重置MCTS节点\n\t\t\t\tplayer.reset_player()\n\t\t\t\tif is_shown:\n\t\t\t\t\tif winner != -1:\n\t\t\t\t\t\tprint(\"Game end. Winner is player:\", winner)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"Game end. Tie\")\n\t\t\t\treturn winner, zip(states, mcts_probs, winners_z)\n" ]
[ [ "numpy.int8", "numpy.stack", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
cjdans5545/khaiii
[ "328d5a8af456a5941130383354c07d1cd0e47cf5" ]
[ "src/main/python/khaiii/train/trainer.py" ]
[ "# -*- coding: utf-8 -*-\n\n\n\"\"\"\ntraining related library\n__author__ = 'Jamie ([email protected])'\n__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'\n\"\"\"\n\n\n###########\n# imports #\n###########\nfrom argparse import Namespace\nimport copy\nfrom datetime import datetime, timedelta\nimport json\nimport logging\nimport os\nimport pathlib\nimport pprint\nfrom typing import List, Tuple\n\nfrom tensorboardX import SummaryWriter\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom khaiii.train.dataset import PosDataset, PosSentTensor\nfrom khaiii.train.evaluator import Evaluator\nfrom khaiii.train.models import Model\nfrom khaiii.resource.resource import Resource\n\n\n#############\n# functions #\n#############\nclass Trainer:\n \"\"\"\n trainer class\n \"\"\"\n def __init__(self, cfg: Namespace):\n \"\"\"\n Args:\n cfg: config\n \"\"\"\n self.cfg = cfg\n setattr(cfg, 'model_id', self.model_id(cfg))\n setattr(cfg, 'out_dir', '{}/{}'.format(cfg.logdir, cfg.model_id))\n setattr(cfg, 'context_len', 2 * cfg.window + 1)\n self.rsc = Resource(cfg)\n self.model = Model(cfg, self.rsc)\n if torch.cuda.is_available() and cfg.gpu_num >= 0:\n self.model.cuda(device=cfg.gpu_num)\n self.optimizer = torch.optim.Adam(self.model.parameters(), cfg.learning_rate)\n self.criterion = nn.CrossEntropyLoss()\n self.evaler = Evaluator()\n self._load_dataset()\n if 'epoch' not in cfg.__dict__:\n setattr(cfg, 'epoch', 0)\n setattr(cfg, 'best_epoch', 0)\n self.log_file = None # tab separated log file\n self.sum_wrt = None # tensorboard summary writer\n self.loss_trains = []\n self.loss_devs = []\n self.acc_chars = []\n self.acc_words = []\n self.f_scores = []\n self.learning_rates = []\n\n @classmethod\n def model_id(cls, cfg: Namespace) -> str:\n \"\"\"\n get model ID\n Args:\n cfg: config\n Returns:\n model ID\n \"\"\"\n model_cfgs = [\n os.path.basename(cfg.in_pfx),\n 'cut{}'.format(cfg.cutoff),\n 'win{}'.format(cfg.window),\n 'sdo{}'.format(cfg.spc_dropout),\n 'emb{}'.format(cfg.embed_dim),\n 'lr{}'.format(cfg.learning_rate),\n 'lrd{}'.format(cfg.lr_decay),\n 'bs{}'.format(cfg.batch_size),\n ]\n return '.'.join(model_cfgs)\n\n def _load_dataset(self):\n \"\"\"\n load training dataset\n \"\"\"\n dataset_dev_path = '{}.dev'.format(self.cfg.in_pfx)\n self.dataset_dev = PosDataset(self.cfg, self.rsc.restore_dic,\n open(dataset_dev_path, 'r', encoding='UTF-8'))\n dataset_test_path = '{}.test'.format(self.cfg.in_pfx)\n self.dataset_test = PosDataset(self.cfg, self.rsc.restore_dic,\n open(dataset_test_path, 'r', encoding='UTF-8'))\n dataset_train_path = '{}.train'.format(self.cfg.in_pfx)\n self.dataset_train = PosDataset(self.cfg, self.rsc.restore_dic,\n open(dataset_train_path, 'r', encoding='UTF-8'))\n\n @classmethod\n def _dt_str(cls, dt_obj: datetime) -> str:\n \"\"\"\n string formatting for datetime object\n Args:\n dt_obj: datetime object\n Returns:\n string\n \"\"\"\n return dt_obj.strftime('%m/%d %H:%M:%S')\n\n @classmethod\n def _elapsed(cls, td_obj: timedelta) -> str:\n \"\"\"\n string formatting for timedelta object\n Args:\n td_obj: timedelta object\n Returns:\n string\n \"\"\"\n seconds = td_obj.seconds\n if td_obj.days > 0:\n seconds += td_obj.days * 24 * 3600\n hours = seconds // 3600\n seconds -= hours * 3600\n minutes = seconds // 60\n seconds -= minutes * 60\n return '{}:{:02d}:{:02d}'.format(hours, minutes, seconds)\n\n def _restore_prev_train(self):\n \"\"\"\n 기존에 학습하다 중지한 경우 그 이후부터 계속해서 학습할 수 있도록 이전 상태를 복원한다.\n \"\"\"\n out_path = pathlib.Path(self.cfg.out_dir)\n cfg_path = pathlib.Path('{}/config.json'.format(self.cfg.out_dir))\n if not out_path.is_dir() or not cfg_path.is_file():\n return\n logging.info('==== continue training: %s ====', self.cfg.model_id)\n cfg = json.load(open(cfg_path, 'r', encoding='UTF-8'))\n gpu_num = self.cfg.gpu_num\n for key, val in cfg.items():\n setattr(self.cfg, key, val)\n setattr(self.cfg, 'gpu_num', gpu_num)\n self._revert_to_best(False)\n\n f_score_best = 0.0\n best_idx = -1\n for idx, line in enumerate(open('{}/log.tsv'.format(self.cfg.out_dir))):\n line = line.rstrip('\\r\\n')\n if not line:\n continue\n (epoch, loss_train, loss_dev, acc_char, acc_word, f_score, learning_rate) \\\n = line.split('\\t')\n self.cfg.epoch = int(epoch) + 1\n self.cfg.best_epoch = self.cfg.epoch\n self.loss_trains.append(float(loss_train))\n self.loss_devs.append(float(loss_dev))\n self.acc_chars.append(float(acc_char))\n self.acc_words.append(float(acc_word))\n self.f_scores.append(float(f_score))\n self.learning_rates.append(float(learning_rate))\n if float(f_score) > f_score_best:\n f_score_best = float(f_score)\n best_idx = idx\n logging.info('---- [%d] los(trn/dev): %.4f / %.4f, acc(chr/wrd): %.4f / %.4f, ' \\\n 'f-score: %.4f, lr: %.8f ----', self.cfg.epoch,\n self.loss_trains[best_idx], self.loss_devs[best_idx], self.acc_chars[best_idx],\n self.acc_words[best_idx], self.f_scores[best_idx], self.learning_rates[-1])\n\n def train(self):\n \"\"\"\n train model with dataset\n \"\"\"\n self._restore_prev_train()\n logging.info('config: %s', pprint.pformat(self.cfg.__dict__))\n\n train_begin = datetime.now()\n logging.info('{{{{ training begin: %s {{{{', self._dt_str(train_begin))\n pathlib.Path(self.cfg.out_dir).mkdir(parents=True, exist_ok=True)\n self.log_file = open('{}/log.tsv'.format(self.cfg.out_dir), 'at')\n self.sum_wrt = SummaryWriter(self.cfg.out_dir)\n patience = self.cfg.patience\n for _ in range(100000):\n is_best = self._train_epoch()\n if is_best:\n patience = self.cfg.patience\n continue\n if patience <= 0:\n break\n self._revert_to_best(True)\n patience -= 1\n logging.info('==== revert to EPOCH[%d], f-score: %.4f, patience: %d ====',\n self.cfg.best_epoch, max(self.f_scores), patience)\n\n train_end = datetime.now()\n train_elapsed = self._elapsed(train_end - train_begin)\n logging.info('}}}} training end: %s, elapsed: %s, epoch: %s }}}}',\n self._dt_str(train_end), train_elapsed, self.cfg.epoch)\n\n avg_loss, acc_char, acc_word, f_score = self.evaluate(False)\n logging.info('==== test loss: %.4f, char acc: %.4f, word acc: %.4f, f-score: %.4f ====',\n avg_loss, acc_char, acc_word, f_score)\n\n def _revert_to_best(self, is_decay_lr: bool):\n \"\"\"\n 이전 best 모델로 되돌린다.\n Args:\n is_decay_lr: whether multiply decay factor or not\n \"\"\"\n self.model.load('{}/model.state'.format(self.cfg.out_dir))\n if is_decay_lr:\n self.cfg.learning_rate *= self.cfg.lr_decay\n self._load_optim(self.cfg.learning_rate)\n\n def _train_epoch(self) -> bool:\n \"\"\"\n 한 epoch을 학습한다. 배치 단위는 글자 단위\n Returns:\n 현재 epoch이 best 성능을 나타냈는 지 여부\n \"\"\"\n batch_contexts = []\n batch_left_spc_masks = []\n batch_right_spc_masks = []\n batch_labels = []\n batch_spaces = []\n loss_trains = []\n for train_sent in tqdm(self.dataset_train, 'EPOCH[{}]'.format(self.cfg.epoch),\n len(self.dataset_train), mininterval=1, ncols=100):\n # 배치 크기만큼 찰 때까지 문장을 추가\n batch_contexts.extend(train_sent.get_contexts(self.cfg, self.rsc))\n left_spc_masks, right_spc_masks = train_sent.get_spc_masks(self.cfg, self.rsc, True)\n batch_left_spc_masks.extend(left_spc_masks)\n batch_right_spc_masks.extend(right_spc_masks)\n batch_labels.extend(train_sent.get_labels(self.rsc))\n batch_spaces.extend(train_sent.get_spaces())\n if len(batch_labels) < self.cfg.batch_size:\n continue\n\n # 형태소 태깅 모델 학습\n self.model.train()\n batch_outputs_pos, batch_outputs_spc = \\\n self.model(PosSentTensor.to_tensor(batch_contexts, self.cfg.gpu_num),\n PosSentTensor.to_tensor(batch_left_spc_masks, self.cfg.gpu_num),\n PosSentTensor.to_tensor(batch_right_spc_masks, self.cfg.gpu_num))\n batch_outputs_pos.requires_grad_()\n batch_outputs_spc.requires_grad_()\n loss_train_pos = self.criterion(batch_outputs_pos,\n PosSentTensor.to_tensor(batch_labels, self.cfg.gpu_num))\n loss_train_spc = self.criterion(batch_outputs_spc,\n PosSentTensor.to_tensor(batch_spaces, self.cfg.gpu_num))\n loss_train = loss_train_pos + loss_train_spc\n loss_trains.append(loss_train.item())\n loss_train.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # 배치 데이터 초기화\n batch_contexts = []\n batch_left_spc_masks = []\n batch_right_spc_masks = []\n batch_labels = []\n batch_spaces = []\n\n avg_loss_dev, acc_char, acc_word, f_score = self.evaluate(True)\n is_best = self._check_epoch(loss_trains, avg_loss_dev, acc_char, acc_word, f_score)\n self.cfg.epoch += 1\n return is_best\n\n def _check_epoch(self, loss_trains: List[float], avg_loss_dev: float,\n acc_char: float, acc_word: float, f_score: float) -> bool:\n \"\"\"\n 매 epoch마다 수행하는 체크\n Args:\n loss_trains: train 코퍼스에서 각 배치별 loss 리스트\n avg_loss_dev: dev 코퍼스 문장 별 평균 loss\n acc_char: 음절 정확도\n acc_word: 어절 정확도\n f_score: f-score\n Returns:\n 현재 epoch이 best 성능을 나타냈는 지 여부\n \"\"\"\n avg_loss_train = sum(loss_trains) / len(loss_trains)\n loss_trains.clear()\n self.loss_trains.append(avg_loss_train)\n self.loss_devs.append(avg_loss_dev)\n self.acc_chars.append(acc_char)\n self.acc_words.append(acc_word)\n self.f_scores.append(f_score)\n self.learning_rates.append(self.cfg.learning_rate)\n is_best = self._is_best()\n is_best_str = 'BEST' if is_best else '< {:.4f}'.format(max(self.f_scores))\n logging.info('[Los trn] [Los dev] [Acc chr] [Acc wrd] [F-score]' \\\n ' [LR]')\n logging.info('{:9.4f} {:9.4f} {:9.4f} {:9.4f} {:9.4f} {:8} {:.8f}' \\\n .format(avg_loss_train, avg_loss_dev, acc_char, acc_word, f_score, is_best_str,\n self.cfg.learning_rate))\n print('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(self.cfg.epoch, avg_loss_train, avg_loss_dev,\n acc_char, acc_word, f_score,\n self.cfg.learning_rate),\n file=self.log_file)\n self.log_file.flush()\n self.sum_wrt.add_scalar('loss-train', avg_loss_train, self.cfg.epoch)\n self.sum_wrt.add_scalar('loss-dev', avg_loss_dev, self.cfg.epoch)\n self.sum_wrt.add_scalar('acc-char', acc_char, self.cfg.epoch)\n self.sum_wrt.add_scalar('acc-word', acc_word, self.cfg.epoch)\n self.sum_wrt.add_scalar('f-score', f_score, self.cfg.epoch)\n self.sum_wrt.add_scalar('learning-rate', self.cfg.learning_rate, self.cfg.epoch)\n return is_best\n\n def _is_best(self) -> bool:\n \"\"\"\n 이번 epoch에 가장 좋은 성능을 냈는 지 확인하고 그럴 경우 현재 상태를 저장한다.\n Returns:\n 마지막 f-score의 best 여부\n \"\"\"\n if len(self.f_scores) > 1 and max(self.f_scores[:-1]) >= self.f_scores[-1]:\n return False\n # this epoch hits new max value\n self.cfg.best_epoch = self.cfg.epoch\n self.model.save('{}/model.state'.format(self.cfg.out_dir))\n torch.save(self.optimizer.state_dict(), '{}/optimizer.state'.format(self.cfg.out_dir))\n with open('{}/config.json'.format(self.cfg.out_dir), 'w', encoding='UTF-8') as fout:\n json.dump(vars(self.cfg), fout, indent=2, sort_keys=True)\n return True\n\n def _load_optim(self, learning_rate: float):\n \"\"\"\n load optimizer parameters\n Args:\n learning_rate: learning rate\n \"\"\"\n path = '{}/optimizer.state'.format(self.cfg.out_dir)\n state_dict = torch.load(path, map_location=lambda storage, loc: storage)\n self.optimizer = torch.optim.Adam(self.model.parameters(), learning_rate)\n self.optimizer.load_state_dict(state_dict)\n self.optimizer.param_groups[0]['lr'] = learning_rate\n\n def evaluate(self, is_dev: bool) -> Tuple[float, float, float, float]:\n \"\"\"\n evaluate f-score\n Args:\n is_dev: whether evaluate on dev set or not\n Returns:\n average dev loss\n character accuracy\n word accuracy\n f-score\n \"\"\"\n dataset = self.dataset_dev if is_dev else self.dataset_test\n self.model.eval()\n losses = []\n for sent in dataset:\n contexts = sent.get_contexts(self.cfg, self.rsc)\n # 만약 spc_dropout이 1.0 이상이면 공백을 전혀 쓰지 않는 것이므로 평가 시에도 적용한다.\n left_spc_masks, right_spc_masks = sent.get_spc_masks(self.cfg, self.rsc,\n self.cfg.spc_dropout >= 1.0)\n gpu_num = self.cfg.gpu_num\n outputs_pos, outputs_spc = self.model(PosSentTensor.to_tensor(contexts, gpu_num),\n PosSentTensor.to_tensor(left_spc_masks, gpu_num),\n PosSentTensor.to_tensor(right_spc_masks, gpu_num))\n labels = PosSentTensor.to_tensor(sent.get_labels(self.rsc), self.cfg.gpu_num)\n spaces = PosSentTensor.to_tensor(sent.get_spaces(), self.cfg.gpu_num)\n loss_pos = self.criterion(outputs_pos, labels)\n loss_spc = self.criterion(outputs_spc, spaces)\n loss = loss_pos + loss_spc\n losses.append(loss.item())\n _, predicts = F.softmax(outputs_pos, dim=1).max(1)\n pred_tags = [self.rsc.vocab_out[t.item()] for t in predicts]\n pred_sent = copy.deepcopy(sent)\n pred_sent.set_pos_result(pred_tags, self.rsc.restore_dic)\n self.evaler.count(sent, pred_sent)\n avg_loss = sum(losses) / len(losses)\n char_acc, word_acc, f_score = self.evaler.evaluate()\n return avg_loss, char_acc, word_acc, f_score\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.cuda.is_available", "torch.load" ] ]
princeton-vl/oasis
[ "5835d24c331d78e91becba29f7e4a53ccd3e376e" ]
[ "experiments/depth/train2.py" ]
[ "import argparse\nimport os\n\nimport cv2\nimport math\nimport torch\nimport torch.nn.parallel\nimport numpy as np\n\nimport valid2\n\nimport config\nimport TBLogger\n\nfrom utils import makedir_if_not_exist, StoreDictKeyPair, save_obj\nfrom torch import optim\nfrom torch.utils import data\nfrom torch.autograd import Variable\n\n\nfrom HourglassNetwork import HourglassNetwork\nfrom ReDWebNetReluMin import ReDWebNetReluMin, ReDWebNetReluMin_raw\nfrom LocalBackprojLoss2 import LocalBackprojLoss2\nfrom OASISDataset2 import OASISDataset, OASISDatasetVal, OASIS_collate_fn\n\n\ndef save_model(optimizer, model, iter, prev_iter, prefix=''):\n\tmakedir_if_not_exist(config.JOBS_MODEL_DIR)\n\ttorch.save(model.state_dict(), os.path.join(config.JOBS_MODEL_DIR, '%smodel_iter_%d.bin' % (prefix, iter + prev_iter) ))\n\ttorch.save(optimizer.state_dict(), os.path.join(config.JOBS_MODEL_DIR, '%sopt_state_iter_%d.bin' % (prefix, iter + prev_iter) ))\n\ndef get_prev_iter(pretrained_file):\t\n\ttemp = pretrained_file.replace('.bin', '')\n\tprev_iter = int(temp.split('_')[-1])\n\t \n\treturn prev_iter\n\n\ndef vis_depth_by_surface(depths, surface_id):\n\tprint(np.unique(depths))\n\tout = depths.copy()\n\n\tfor id in np.unique(surface_id):\n\t\tif id == 0:\n\t\t\tcontinue\n\t\tmask = surface_id == id\n\t\tout[mask] = out[mask] - np.min(out[mask])\n\t\tout[mask] = out[mask] / np.max(out[mask]) * 255.0\n\t\n\treturn out.astype(np.uint8)\n\ndef vis_depth(depths, mask):\n\tprint(np.unique(depths))\n\tout = depths.copy()\n\n\tout[mask] = out[mask] - np.min(out[mask])\n\tout[mask] = out[mask] / np.max(out[mask]) * 255.0\n\t\n\treturn out.astype(np.uint8)\n\ndef vis_depth_full(depths, mask = None):\n\t# print(np.unique(depths))\n\tout = depths.copy()\n\tif mask is None:\n\t\tmask = out > 0\n\tout = out - np.min(out[mask])\n\tout = out / np.max(out[mask]) * 255.0\n\tout[out>255.0] = 255.0\n\tout[out<0.0] = 0.0\n\treturn out.astype(np.uint8)\t\n\n\ndef train(dataset_name, model_name, loss_name, \n\t\t n_GPUs, b_oppi, b_data_aug, b_sort, \\\n\t\t train_file, valid_file,\\\n\t\t learning_rate, num_iters, num_epoches,\\\n\t\t batch_size, num_loader_workers, pretrained_file,\\\n\t\t model_save_interval, model_eval_interval, exp_name):\n\n\tNetworkType = {\n\t\t\t\t \t\"NIPS\":HourglassNetwork, \n\t\t\t\t \t\"ReDWebNetReluMin\": ReDWebNetReluMin,\n\t\t\t\t \t\"ReDWebNetReluMin_raw\": ReDWebNetReluMin_raw,\n\t\t\t\t }\n\tLossType = {\n\t\t\t\t\t\t\"LocalBackprojLoss2\": LocalBackprojLoss2,\n\t\t\t\t\t}\n\tDatasetsType = {\n\t\t\t\t\t\t\"OASISDataset\":{'train_dataset':OASISDataset, 'val_dataset':OASISDatasetVal, 't_val_dataset':OASISDataset},\n\t\t\t\t\t}\n\n\tdevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\tprint(\"Using CUDA:\", torch.cuda.is_available())\n\t# create (and load) model. Should wrap with torch.nn.parallel.DistributedDataParallel before loading pretraiend model (https://github.com/pytorch/examples/blob/master/imagenet/main.py)\n\tmodel = NetworkType[model_name]().to(device)\n\t\n\tb_resnet_prep = model_name != 'NIPS'\n\n\tif n_GPUs > 1:\n\t\tprint( \"######################################################\")\n\t\tprint( \"Using %d GPUs, batch_size is %d\" % (n_GPUs, batch_size))\n\t\tprint( \"######################################################\")\n\t\tmodel = torch.nn.parallel.DataParallel(model)\n\n\tprint ('num_loader_workers:', num_loader_workers)\n\n\t# resume from a checkpoint model\n\tprev_iter = 0\n\tif pretrained_file:\n\t\tmodel.load_state_dict(torch.load( pretrained_file ))\n\t\tprev_iter = get_prev_iter(pretrained_file)\n\tprint (\"Prev_iter: {}\".format(prev_iter))\n\n\t# set up criterion and optimizer\n\tcriterion = LossType[loss_name]()\n\t\n\toptimizer = optim.RMSprop(model.parameters(), lr=learning_rate)\t\n\n\ttry:\n\t\tif pretrained_file:\n\t\t\tprint (pretrained_file)\n\t\t\toptimizer.load_state_dict(torch.load( pretrained_file.replace('model_', 'opt_state_') ))\n\texcept:\n\t\tprint(\"Exception happens when trying to load optimizer state, possibly due to different learning rate strategy.\")\n\n\t\n\t# create dataset\t\n\tt_dataset = DatasetsType[dataset_name]['train_dataset']( csv_filename= train_file, b_data_aug = b_data_aug, b_resnet_prep = b_resnet_prep, b_oppi = b_oppi )\n\tv_dataset = DatasetsType[dataset_name]['val_dataset']( csv_filename= valid_file, b_resnet_prep = b_resnet_prep )\t\n\ttv_dataset = DatasetsType[dataset_name]['t_val_dataset']( csv_filename= train_file, b_resnet_prep = b_resnet_prep )\n\tt_data_loader = data.DataLoader(t_dataset, batch_size=batch_size, num_workers=num_loader_workers, shuffle=True, collate_fn = OASIS_collate_fn)\n\ttv_data_loader = data.DataLoader(tv_dataset, batch_size=1, num_workers=0, shuffle=False, collate_fn = OASIS_collate_fn)\n\tv_data_loader = data.DataLoader(v_dataset, batch_size=1, num_workers=0, shuffle=False, collate_fn = OASIS_collate_fn)\n\t\n\n\t# create tensorboard logger\n\tlogger = TBLogger.TBLogger(makedir_if_not_exist(config.JOBS_LOG_DIR))\n\n\tcv2.setNumThreads(0)\n\t\n\titer = 1\n\tbest_v_WKDR = float('inf')\n\tbest_siv = float('inf')\n\tfor epoch in range(num_epoches):\n\t\tprint (\"==============epoch = \", epoch)\n\t\tfor step, (inputs, metric_depth, surface_ids, target, _, focals, names) in enumerate(t_data_loader):\n\n\t\t\tif iter >= num_iters:\n\t\t\t\tbreak\n\t\t\t\n\t\t\t###### zero gradient\n\t\t\toptimizer.zero_grad()\n\n\t\t\t###### read in training data\n\t\t\tinput_var = inputs.to(device)\n\t\t\tmetric_depth_var = [a.to(device) for a in metric_depth]\n\t\t\tsurface_ids_var = surface_ids.to(device)\n\t\t\tfocals_gt_var = focals.to(device)\t# TODO\n\t\t\ttarget_var = [a.to(device) for a in target]\n\n\t\t\t###### forwarding\n\t\t\toutput_var, focal_pred_var = model(input_var)\n\t\t\t\n\t\t\t# TODO: remove\n\t\t\tif iter % 3000 == 0 and dataset_name != 'DIWDataset' :\t\n\t\t\t\ttry:\t\t\t\n\t\t\t\t\t# pred_depth = np.exp(output_var.cpu().detach().numpy())\t# when the network is predicting log depth.\n\t\t\t\t\tpred_depth = output_var.cpu().detach().numpy()\t# when the network is predicting absolute depth\n\t\t\t\t\t\n\t\t\t\t\tc = surface_ids.cpu().detach().numpy()\n\t\t\t\t\t_p_img = vis_depth_by_surface(pred_depth[0,0,:,:], c[0,0,:,:])\n\t\t\t\t\t_p_full_img = vis_depth(pred_depth[0,0,:,:], c[0,0,:,:] > 0)\n\t\t\t\t\t\n\t\t\t\t\tlogger.add_image('train/pred_depth_surface', torch.from_numpy(_p_img), (iter + prev_iter), dataformats=\"HW\")\n\t\t\t\t\tlogger.add_image('train/pred_depth', torch.from_numpy(_p_full_img), (iter + prev_iter), dataformats=\"HW\")\n\t\t\t\t\tif b_resnet_prep:\n\t\t\t\t\t\tprint(\"ResNet Prep\")\n\t\t\t\t\t\tout_color = inputs[0].cpu().detach().numpy()\n\t\t\t\t\t\tout_color[0,:,:] = (out_color[0,:,:] * 0.229 + 0.485 ) *255.0 \n\t\t\t\t\t\tout_color[1,:,:] = (out_color[1,:,:] * 0.224 + 0.456 ) *255.0 \n\t\t\t\t\t\tout_color[2,:,:] = (out_color[2,:,:] * 0.225 + 0.406 ) *255.0 \n\t\t\t\t\t\tout_color = out_color.astype(np.uint8)\n\t\t\t\t\t\tlogger.add_image('train/img', torch.from_numpy(out_color), (iter + prev_iter), dataformats=\"CHW\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.add_image('train/img', inputs[0], (iter + prev_iter), dataformats=\"CHW\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\tb = metric_depth[0].cpu().detach().numpy()\t\n\t\t\t\t\t\t_gt_img = vis_depth_full(b, c[0,0,:,:] > 0)\n\t\t\t\t\t\tlogger.add_image('train/gt_depth', torch.from_numpy(_gt_img), (iter + prev_iter), dataformats=\"HW\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tb = np.zeros((240,320), dtype= np.uint8)\n\t\t\t\t\t\tlogger.add_image('train/gt_depth', torch.from_numpy(b), (iter + prev_iter), dataformats=\"HW\")\n\t\t\t\t\t\tprint(\"No data for gt depth.\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\n\t\t\t###### get loss\n\t\t\tif loss_name in [\"LocalBackprojLoss\", \"LocalBackprojLoss2\", \"BackprojLoss\", \"BackprojLoss2\" ]:\n\t\t\t\tloss = criterion(preds = output_var, \t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\tgts = metric_depth_var, \n\t\t\t\t\t\t\t\tsurface_ids = surface_ids_var,\n\t\t\t\t\t\t\t\tfocal_gts = focals_gt_var,\n\t\t\t\t\t\t\t\tfocal_preds = focal_pred_var)\t\t\t\n\t\t\t\n\t\t\t\n\t\t\tprint(iter + prev_iter, \"Total_loss: %g\" % loss.item())\n\t\t\tif math.isnan(loss.item()):\n\t\t\t\timport sys\n\t\t\t\tsys.exit()\n\t\t\tif loss.item() > 1e+8:\n\t\t\t\tprint(names)\n\n\t\t\t\n\t\t\t###### save to log\n\t\t\tlogger.add_value('train/Loss', loss.item(), step=(iter + prev_iter) )\t\t\t\n\t\t\t\n\n\t\t\t###### back propagate\t\t\t\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\t\n\t\t\tif (iter + prev_iter) % model_save_interval == 0:\n\t\t\t\tsave_model(optimizer, model, iter, prev_iter)\n\n\t\t\tif (iter + prev_iter) % model_eval_interval == 0:\t\t\t\t\n\t\t\t\tprint (\"Evaluating at iter %d\" % iter)\n\t\t\t\tmodel.eval()\n\t\t\t\tif n_GPUs > 1:\t\t\n\t\t\t\t\tprint (\"========================================validation set\")\n\t\t\t\t\tv_rel_error, _, _, v_LSIVRMSE = valid2.valid(model.module, v_data_loader, criterion, in_thresh=0.0)\n\t\t\t\t\tprint (\"========================================training set\")\n\t\t\t\t\tt_rel_error, _, _, t_LSIVRMSE = valid2.valid(model.module, tv_data_loader, criterion, in_thresh=0.0, max_iter=500)\n\t\t\t\telse:\n\t\t\t\t\tprint (\"========================================validation set\")\n\t\t\t\t\tv_rel_error, _, _, v_LSIVRMSE = valid2.valid(model, v_data_loader, criterion, in_thresh=0.0, max_iter=500)\n\t\t\t\t\tprint (\"========================================training set\")\n\t\t\t\t\tt_rel_error, _, _, t_LSIVRMSE = valid2.valid(model, tv_data_loader, criterion, in_thresh=0.0, max_iter=500)\n\t\t\t\t\n\t\t\t\tlogger.add_value('train/WKDR', t_rel_error['WKDR_neq'], step=(iter + prev_iter))\n\t\t\t\tlogger.add_value('train/LSIV_RMSE', t_LSIVRMSE[\"LSIV\"], step=(iter + prev_iter))\n\t\t\t\tlogger.add_value('val/WKDR', v_rel_error['WKDR_neq'], step=(iter + prev_iter) )\n\t\t\t\tlogger.add_value('val/LSIV_RMSE', v_LSIVRMSE[\"LSIV\"], step=(iter + prev_iter))\n\t\t\t\tmodel.train()\n\t\t\t\t\n\t\t\t\tif best_v_WKDR > v_rel_error['WKDR_neq']:\n\t\t\t\t\tbest_v_WKDR = v_rel_error['WKDR_neq']\n\t\t\t\t\tsave_model(optimizer, model, iter, prev_iter, prefix = 'best_rel')\n\t\t\t\tif best_siv > v_LSIVRMSE[\"LSIV\"]:\n\t\t\t\t\tbest_siv = v_LSIVRMSE[\"LSIV\"]\n\t\t\t\t\tsave_model(optimizer, model, iter, prev_iter, prefix = 'best_siv')\n\t\t\t\t\n\t\t\t\tsave_model(optimizer, model, iter, prev_iter)\n\t\t\t\t\t\n\t\t\titer += 1\n\n\t\t\tinputs = None\n\t\t\ttarget = None\t\t\t\n\n\t\tif iter >= num_iters:\n\t\t\tbreak\n\n\t\n\n\tsave_model(optimizer, model, iter, prev_iter)\n\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\t# parser.add_argument('--network_name', '-nn', default=config.DEFAULT_NETWORK_NAME)\n\tparser.add_argument('--train_file', '-t', default='')\t# should be absolute path\n\tparser.add_argument('--valid_file', '-v', default='')\n\tparser.add_argument('--dataset_name', '-dn', default='OASISDataset') \n\tparser.add_argument('--model_name', '-mn', default='NIPS') #\n\tparser.add_argument('--loss_name', default='BackprojLoss') # \n\t# parser.add_argument('--optim_name', '-on', default=config.DEFAULT_OPTIM_NAME)\n\tparser.add_argument('--num_iters', '-iter', default=100000, type=int)\n\tparser.add_argument('--num_epoches', '-ne', default=100000, type=int)\n\tparser.add_argument('--batch_size', '-bs', default=4, type=int)\n\tparser.add_argument('--model_save_interval', '-mt', default=2000, type=int)\n\tparser.add_argument('--model_eval_interval', '-et', default=3000, type=int)\n\tparser.add_argument('--learning_rate', '-lr', default=0.001, type=float)\n\tparser.add_argument('--n_GPUs', '-ngpu', default=1, type=int)\t\n\tparser.add_argument('--num_loader_workers', '-nlw', type=int, default=2)\n\tparser.add_argument('--pretrained_file', '-pf', default=None)\n\tparser.add_argument('--b_oppi', '-b_oppi', action='store_true', default=False)\n\tparser.add_argument('--b_sort', '-b_sort', action='store_true', default=False)\n\tparser.add_argument('--b_data_aug', '-b_data_aug', action='store_true', default=False)\n\t# parser.add_argument('--debug', '-d', action='store_true')\n\tparser.add_argument('--exp_name', default='debug') #\n\n\targs = parser.parse_args()\n\n\targs_dict = vars(args)\n\n\tconfig.JOBS_MODEL_DIR = \"./exp/%s/models\" % args.exp_name\n\tconfig.JOBS_LOG_DIR = \"./exp/%s/log\" % args.exp_name\n\tconfig.JOBS_DIR = './exp/%s' % args.exp_name\n\n\tfolder = makedir_if_not_exist(config.JOBS_DIR)\n\tsave_obj(args_dict, os.path.join(config.JOBS_DIR, 'args.pkl'))\n\t\n\ttrain(**args_dict)\n\n\tprint (\"End of train.py\")\n\n\n\n\n\n\n" ]
[ [ "torch.nn.parallel.DataParallel", "numpy.min", "numpy.unique", "torch.load", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.max", "torch.cuda.is_available", "numpy.zeros" ] ]
ondrejdyck/pyTEMlib
[ "b8ed2000f1bb44c7add966cef444a02e456258cb" ]
[ "pyTEMlib/interactive_eels.py" ]
[ "\"\"\" Interactive routines for EELS analysis\n\nthis file provides additional dialogs for EELS quantification\n\nAuthor: Gerd Duscher\n\"\"\"\n\nimport numpy as np\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nimport sidpy\nimport matplotlib.patches as patches\nfrom matplotlib.widgets import RectangleSelector, SpanSelector\n\nimport h5py # TODO: needs to go\nimport matplotlib.pyplot as plt\n\nfrom IPython.display import display\nimport ipywidgets as widgets\n\nfrom pyTEMlib import eels_tools as eels\nfrom pyTEMlib import eels_dialog\n\nfrom pyTEMlib import info_dialog\nfrom pyTEMlib import peak_dialog\n\nmajor_edges = ['K1', 'L3', 'M5', 'N5']\nall_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',\n 'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']\nfirst_close_edges = ['K1', 'L3', 'M5', 'M3', 'N5', 'N3']\n\nCompositionDialog = eels_dialog.EELSDialog\nCurveVisualizer = eels_dialog.CurveVisualizer\nInfoDialog = info_dialog.InfoDialog\nPeakFitDialog = peak_dialog.PeakFitDialog\n\n\nclass PeriodicTableDialog(QtWidgets.QDialog):\n \"\"\" Modal dialog to get a selection of elements.\n\n Elements that are not having a valid cross sections are disabled.\n\n Parameters\n ----------\n initial_elements: list of str\n the elements that are already selected\n energy_scale: list or numpy array\n energy-scale of spectrum/spectra to determine likely edges\n\n Returns\n -------\n list of strings: elements.\n\n Example\n -------\n >> PT_dialog = periodic_table_dialog(None, ['Mn', 'O'])\n >> if PT_dialog.exec_() == periodic_table_dialog.Accepted:\n >> selected_elements = PT_dialog.get_output()\n >> print(selected_elements)\n \"\"\"\n\n signal_selected = QtCore.pyqtSignal(list)\n\n def __init__(self, initial_elements=None, energy_scale=None, parent=None):\n super(PeriodicTableDialog, self).__init__(None, QtCore.Qt.WindowStaysOnTopHint)\n\n if initial_elements is None:\n initial_elements = [' ']\n if energy_scale is None:\n energy_scale = [100., 150., 200.]\n self.parent = parent\n self._output = []\n self.elements_selected = initial_elements\n self.energy_scale = np.array(energy_scale)\n\n self.setWindowTitle(\"Periodic Table\")\n likely_edges = get_likely_edges(self.energy_scale)\n self.likely_edges = likely_edges\n\n # GD:font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.BOLD)\n self.buttons1 = []\n self.button = []\n self.pt_info = get_periodic_table_info()\n self.init_ui()\n\n for button in self.button:\n if button.text() in initial_elements:\n button.toggle()\n pass\n\n def on_close(self):\n self.get_output()\n self.signal_selected[list].emit(self._output)\n self.accept()\n\n def get_output(self):\n self._output = []\n for btn in self.button:\n if btn.isChecked():\n self._output.append(btn.text())\n\n def exec_(self):\n super(PeriodicTableDialog, self).exec_()\n return self._output\n\n def init_ui(self):\n\n v_sizer = QtWidgets.QVBoxLayout()\n g_sizer = QtWidgets.QGridLayout()\n\n main_group = QtWidgets.QWidget()\n\n color1 = \"background-color: lightblue;\\n\"\n color1l = \"background-color: dodgerblue;\\n\"\n color2 = \"background-color: coral;\\n\"\n\n for symbol, parameter in self.pt_info.items():\n self.button.append(QtWidgets.QPushButton(symbol))\n if parameter['PT_row'] > 7:\n self.button[-1].setStyleSheet(color2)\n elif '*' in symbol:\n self.button[-1].setStyleSheet(color2)\n else:\n if symbol in self.likely_edges:\n self.button[-1].setStyleSheet(color1l)\n else:\n self.button[-1].setStyleSheet(color1)\n if parameter['Z'] == 0:\n self.button[-1].setEnabled(False)\n self.button[-1].setFixedWidth(50)\n self.button[-1].setCheckable(True)\n g_sizer.addWidget(self.button[-1], parameter['PT_row'], parameter['PT_col'])\n main_group.setLayout(g_sizer)\n\n v_sizer.addWidget(main_group)\n self.setLayout(v_sizer)\n\n ok_button = QtWidgets.QPushButton('OK')\n ok_button.clicked.connect(self.on_close)\n\n v_sizer.addWidget(ok_button)\n self.setLayout(v_sizer)\n\n\nclass EnergySelector(QtWidgets.QDialog):\n \"\"\"Dialog and cursor to set energy scale\"\"\"\n\n signal_selected = QtCore.pyqtSignal(bool)\n\n def __init__(self, dset=None):\n super(EnergySelector, self).__init__(None, QtCore.Qt.WindowStaysOnTopHint)\n\n if not isinstance(dset, sidpy.Dataset):\n return\n if dset is None:\n return\n if dset.view is None:\n return\n self.dataset = dset\n\n if hasattr(dset.view, 'axis'):\n self.axis = dset.view.axis\n # self.setWindowTitle('p')\n elif hasattr(dset.view, 'axes'):\n self.axis = dset.view.axes[1]\n else:\n return\n\n self.spec_dim = -1\n for dim, axis in self.dataset._axes.items():\n if axis.dimension_type == sidpy.DimensionType.SPECTRAL:\n self.spec_dim = dim\n if self.spec_dim < 0:\n raise TypeError('We need at least one SPECTRAL dimension')\n\n self.energy_scale = self.dataset._axes[self.spec_dim].values\n self.dispersion = self.energy_scale[1] - self.energy_scale[0]\n self.offset = self.energy_scale[0]\n self.spectrum = np.zeros(2)\n\n self.change = 0\n\n self.x_min = self.energy_scale[int(len(self.energy_scale)/4)]\n self.x_max = self.energy_scale[int(len(self.energy_scale) / 4*3)]\n self.setWindowTitle(\"Select Energy\")\n\n valid_float = QtGui.QDoubleValidator()\n\n layout = QtWidgets.QGridLayout()\n layout.setVerticalSpacing(2)\n self.label1 = QtWidgets.QLabel('Start:')\n self.edit1 = QtWidgets.QLineEdit('0')\n self.edit1.setValidator(valid_float)\n self.unit1 = QtWidgets.QLabel('eV')\n\n self.label2 = QtWidgets.QLabel('End:')\n self.edit2 = QtWidgets.QLineEdit('0')\n self.edit2.setValidator(valid_float)\n self.unit2 = QtWidgets.QLabel('eV')\n\n self.label3 = QtWidgets.QLabel('Dispersion:')\n self.edit3 = QtWidgets.QLineEdit('0')\n self.edit3.setValidator(valid_float)\n self.unit3 = QtWidgets.QLabel('eV')\n\n self.edit1.editingFinished.connect(self.on_enter)\n self.edit2.editingFinished.connect(self.on_enter)\n self.edit3.editingFinished.connect(self.on_enter)\n\n layout.addWidget(self.label1, 0, 0)\n layout.addWidget(self.edit1, 0, 1)\n layout.addWidget(self.unit1, 0, 2)\n\n layout.addWidget(self.label2, 1, 0)\n layout.addWidget(self.edit2, 1, 1)\n layout.addWidget(self.unit2, 1, 2)\n\n layout.addWidget(self.label3, 2, 0)\n layout.addWidget(self.edit3, 2, 1)\n layout.addWidget(self.unit3, 2, 2)\n\n self.ok_button = QtWidgets.QPushButton('OK')\n self.ok_button.clicked.connect(self.on_close)\n self.cancel_button = QtWidgets.QPushButton('Cancel')\n self.cancel_button.clicked.connect(self.on_close)\n\n layout.addWidget(self.ok_button, 3, 0)\n layout.addWidget(self.cancel_button, 3, 2)\n\n self.setLayout(layout)\n self.edit1.setFocus()\n self.plot()\n\n self.selector = RangeSelector(self.axis, self.line_select_callback)\n self.edit1.setText(f'{self.x_min:.3f}')\n self.edit2.setText(f'{self.x_max:.3f}')\n self.edit3.setText(f'{self.dispersion:.4f}')\n self.update()\n\n def line_select_callback(self, eclick, erelease):\n y_min, y_max = self.axis.get_ylim()\n self.x_min = self.selector.extents[0]\n self.x_max = self.selector.extents[1]\n self.selector.extents = (self.x_min, self.x_max, y_min, y_max)\n\n self.edit1.setText(f'{self.x_min:.3f}')\n self.edit2.setText(f'{self.x_max:.3f}')\n\n def on_enter(self):\n sender = self.sender()\n\n if sender == self.edit1:\n value = float(str(sender.displayText()).strip())\n if value == self.x_min:\n return\n self.change = value - self.x_min\n self.x_min += self.change\n self.x_max += self.change\n self.offset += self.change\n\n self.edit1.setText(f\"{self.x_min:.2f}\")\n self.edit2.setText(f\"{self.x_max:.2f}\")\n\n self.energy_scale = np.arange(len(self.energy_scale)) * self.dispersion + self.offset\n\n self.update()\n # self.axis.draw()\n # self.setWindowTitle(f'shift, {self.change}, {self.x_min}')\n\n elif sender == self.edit2:\n value = float(str(sender.displayText()).strip())\n if value == self.x_max:\n return\n start_channel = np.searchsorted(self.energy_scale, self.x_min)\n end_channel = np.searchsorted(self.energy_scale, self.x_max)\n\n self.x_max = value\n\n if end_channel - start_channel != 0:\n self.dispersion = (self.x_max - self.x_min) / (end_channel - start_channel)\n self.offset = self.x_min - start_channel * self.dispersion\n self.edit2.setText(f\"{self.x_max:.3f}\")\n self.edit3.setText(f\"{self.dispersion:.4f}\")\n self.energy_scale = np.arange(len(self.energy_scale)) * self.dispersion + self.offset\n\n self.update()\n # self.axis.draw()\n # self.setWindowTitle(f'range, {self.change}, {self.dispersion}')\n\n elif sender == self.edit3:\n value = float(str(sender.displayText()).strip())\n if self.dispersion == value:\n return\n\n start_channel = np.searchsorted(self.energy_scale, self.x_min)\n end_channel = np.searchsorted(self.energy_scale, self.x_max)\n self.dispersion = value\n self.energy_scale = np.arange(len(self.energy_scale)) * self.dispersion + self.offset\n self.x_min = self.energy_scale[start_channel]\n self.x_max = self.energy_scale[end_channel]\n self.update()\n # self.axis.draw()\n self.edit3.setText(f\"{self.dispersion:.3f}\")\n self.change = 0\n\n def on_close(self):\n sender = self.sender()\n if sender == self.ok_button:\n pass\n self.dataset.set_dimension(self.spec_dim, sidpy.Dimension(self.energy_scale, name='energy_scale',\n units='eV', quantity='energy loss',\n dimension_type='spectral'))\n else:\n pass\n self.selector.set_active(False)\n self.signal_selected[bool].emit(True)\n self.accept()\n\n def plot(self):\n if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:\n self.spectrum = self.dataset.view.get_spectrum()\n else:\n self.spectrum = np.array(self.dataset)\n x_limit = self.axis.get_xlim()\n y_limit = self.axis.get_ylim()\n\n self.axis.clear()\n self.cplot = self.axis.plot(self.energy_scale, self.spectrum, label='spectrum')\n self.axis.set_xlim(x_limit)\n self.axis.set_ylim(y_limit)\n\n self.axis.figure.canvas.draw()\n\n def update(self):\n x_limit = self.axis.get_xlim()\n y_limit = self.axis.get_ylim()\n self.selector.extents = (self.x_min, self.x_max, y_limit[0], y_limit[1])\n\n x_limit = np.array(x_limit) + self.change\n\n self.cplot[0].set_data(self.energy_scale, self.spectrum)\n self.axis.set_xlim(x_limit)\n self.axis.set_ylim(y_limit)\n self.axis.figure.canvas.draw()\n\n\nclass RegionSelector(object):\n \"\"\"Selects fitting region and the regions that are excluded for each edge.\n\n Select a region with a spanSelector and then type 'a' for all of the fitting region or a number for the edge\n you want to define the region excluded from the fit (solid state effects).\n\n see Chapter4 'CH4-Working_with_X-Sections,ipynb' notebook\n\n \"\"\"\n\n def __init__(self, ax):\n self.ax = ax\n self.regions = {}\n self.rect = None\n self.xmin = 0\n self.width = 0\n\n self.span = SpanSelector(ax, self.on_select1, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red'), span_stays=True)\n self.cid = ax.figure.canvas.mpl_connect('key_press_event', self.click)\n self.draw = ax.figure.canvas.mpl_connect('draw_event', self.onresize)\n\n def on_select1(self, xmin, xmax):\n self.xmin = xmin\n self.width = xmax - xmin\n\n def onresize(self, event):\n self.update()\n\n def delete_region(self, key):\n if key in self.regions:\n if 'Rect' in self.regions[key]:\n self.regions[key]['Rect'].remove()\n self.regions[key]['Text'].remove()\n del (self.regions[key])\n\n def update(self):\n\n y_min, y_max = self.ax.get_ylim()\n for key in self.regions:\n if 'Rect' in self.regions[key]:\n self.regions[key]['Rect'].remove()\n self.regions[key]['Text'].remove()\n\n xmin = self.regions[key]['xmin']\n width = self.regions[key]['width']\n height = y_max - y_min\n alpha = self.regions[key]['alpha']\n color = self.regions[key]['color']\n self.regions[key]['Rect'] = patches.Rectangle((xmin, y_min), width, height,\n edgecolor=color, alpha=alpha, facecolor=color)\n self.ax.add_patch(self.regions[key]['Rect'])\n\n self.regions[key]['Text'] = self.ax.text(xmin, y_max, self.regions[key]['text'], verticalalignment='top')\n\n def click(self, event):\n if str(event.key) in ['1', '2', '3', '4', '5', '6']:\n key = str(event.key)\n text = 'exclude \\nedge ' + key\n alpha = 0.5\n color = 'red'\n elif str(event.key) in ['a', 'A', 'B', 'b', 'f', 'F']:\n key = '0'\n color = 'blue'\n alpha = 0.2\n text = 'fit region'\n else:\n return\n\n if key not in self.regions:\n self.regions[key] = {}\n\n self.regions[key]['xmin'] = self.xmin\n self.regions[key]['width'] = self.width\n self.regions[key]['color'] = color\n self.regions[key]['alpha'] = alpha\n self.regions[key]['text'] = text\n\n self.update()\n\n def set_regions(self, region, start_x, width):\n key = ''\n if 'fit' in str(region):\n key = '0'\n if region in ['0', '1', '2', '3', '4', '5', '6']:\n key = region\n if region in [0, 1, 2, 3, 4, 5, 6]:\n key = str(region)\n\n if key not in self.regions:\n self.regions[key] = {}\n if key in ['1', '2', '3', '4', '5', '6']:\n self.regions[key]['text'] = 'exclude \\nedge ' + key\n self.regions[key]['alpha'] = 0.5\n self.regions[key]['color'] = 'red'\n elif key == '0':\n self.regions[key]['text'] = 'fit region'\n self.regions[key]['alpha'] = 0.2\n self.regions[key]['color'] = 'blue'\n\n self.regions[key]['xmin'] = start_x\n self.regions[key]['width'] = width\n\n self.update()\n\n def get_regions(self):\n tags = {}\n for key in self.regions:\n if key == '0':\n area = 'fit_area'\n else:\n area = key\n tags[area] = {}\n tags[area]['start_x'] = self.regions[key]['xmin']\n tags[area]['width_x'] = self.regions[key]['width']\n\n return tags\n\n def disconnect(self):\n for key in self.regions:\n if 'Rect' in self.regions[key]:\n self.regions[key]['Rect'].remove()\n self.regions[key]['Text'].remove()\n del self.span\n self.ax.figure.canvas.mpl_disconnect(self.cid)\n # self.ax.figure.canvas.mpl_disconnect(self.draw)\n pass\n\n\nclass RangeSelector(RectangleSelector):\n \"\"\"Select ranges of edge fitting interactively\"\"\"\n def __init__(self, ax, on_select):\n drawtype = 'box'\n spancoords = 'data'\n rectprops = dict(facecolor=\"blue\", edgecolor=\"black\", alpha=0.2, fill=True)\n\n super().__init__(ax, on_select, drawtype=drawtype,\n minspanx=0, minspany=0, useblit=False,\n lineprops=None, rectprops=rectprops, spancoords=spancoords,\n button=None, maxdist=10, marker_props=None,\n interactive=True, state_modifier_keys=None)\n\n self.artists = [self.to_draw, self._center_handle.artist,\n self._edge_handles.artist]\n\n def draw_shape(self, extents):\n x0, x1, y0, y1 = extents\n xmin, xmax = sorted([x0, x1])\n # ymin, ymax = sorted([y0, y1])\n xlim = sorted(self.ax.get_xlim())\n ylim = sorted(self.ax.get_ylim())\n\n xmin = max(xlim[0], xmin)\n ymin = ylim[0]\n xmax = min(xmax, xlim[1])\n ymax = ylim[1]\n\n self.to_draw.set_x(xmin)\n self.to_draw.set_y(ymin)\n self.to_draw.set_width(xmax - xmin)\n self.to_draw.set_height(ymax - ymin)\n\n\ndef get_likely_edges(energy_scale):\n \"\"\"get likely ionization edges within energy_scale\"\"\"\n x_sections = eels.get_x_sections()\n # print(energy_scale)\n energy_origin = energy_scale[0]\n energy_window = energy_scale[-1] - energy_origin\n selected_edges_unsorted = {}\n likely_edges = []\n selected_elements = []\n for element in range(1, 83):\n # print(element)\n element_z = str(eels.get_z(element))\n\n for key in x_sections[element_z]:\n if key in all_edges:\n onset = x_sections[element_z][key]['onset']\n if onset > energy_origin:\n if onset - energy_origin < energy_window:\n if element not in selected_edges_unsorted:\n selected_edges_unsorted[element] = {}\n # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])\n # text = f\"\\n {x_sections[element_z]['name']:2s}-{key}: \" \\\n # f\"{x_sections[element_z][key]['onset']:8.1f} eV \"\n # print(text)\n\n selected_edges_unsorted[element][key] = {}\n selected_edges_unsorted[element][key]['onset'] = x_sections[element_z][key]['onset']\n\n if key in major_edges:\n selected_edges_unsorted[element][key]['intensity'] = 'major'\n selected_elements.append(x_sections[element_z]['name'])\n else:\n selected_edges_unsorted[element][key]['intensity'] = 'minor'\n\n if element in selected_edges_unsorted:\n for key in selected_edges_unsorted[element]:\n if selected_edges_unsorted[element][key]['intensity'] == 'major':\n likely_edges.append(x_sections[str(element)]['name']) # = {'Z':element, 'symmetry': key}\n\n return likely_edges\n\n\ndef get_periodic_table_info():\n \"\"\"Info for periodic table dialog\"\"\"\n pt_info = \\\n {'H': {'PT_row': 0, 'PT_col': 0, 'Z': 0},\n 'He': {'PT_row': 0, 'PT_col': 17, 'Z': 2}, 'Li': {'PT_row': 1, 'PT_col': 0, 'Z': 3},\n 'Be': {'PT_row': 1, 'PT_col': 1, 'Z': 4}, 'B': {'PT_row': 1, 'PT_col': 12, 'Z': 5},\n 'C': {'PT_row': 1, 'PT_col': 13, 'Z': 6}, 'N': {'PT_row': 1, 'PT_col': 14, 'Z': 7},\n 'O': {'PT_row': 1, 'PT_col': 15, 'Z': 8}, 'F': {'PT_row': 1, 'PT_col': 16, 'Z': 9},\n 'Ne': {'PT_row': 1, 'PT_col': 17, 'Z': 10}, 'Na': {'PT_row': 2, 'PT_col': 0, 'Z': 11},\n 'Mg': {'PT_row': 2, 'PT_col': 1, 'Z': 12}, 'Al': {'PT_row': 2, 'PT_col': 12, 'Z': 13},\n 'Si': {'PT_row': 2, 'PT_col': 13, 'Z': 14}, 'P': {'PT_row': 2, 'PT_col': 14, 'Z': 15},\n 'S': {'PT_row': 2, 'PT_col': 15, 'Z': 16}, 'Cl': {'PT_row': 2, 'PT_col': 16, 'Z': 17},\n 'Ar': {'PT_row': 2, 'PT_col': 17, 'Z': 18}, 'K': {'PT_row': 3, 'PT_col': 0, 'Z': 19},\n 'Ca': {'PT_row': 3, 'PT_col': 1, 'Z': 20}, 'Sc': {'PT_row': 3, 'PT_col': 2, 'Z': 21},\n 'Ti': {'PT_row': 3, 'PT_col': 3, 'Z': 22}, 'V ': {'PT_row': 3, 'PT_col': 4, 'Z': 23},\n 'Cr': {'PT_row': 3, 'PT_col': 5, 'Z': 24}, 'Mn': {'PT_row': 3, 'PT_col': 6, 'Z': 25},\n 'Fe': {'PT_row': 3, 'PT_col': 7, 'Z': 26}, 'Co': {'PT_row': 3, 'PT_col': 8, 'Z': 27},\n 'Ni': {'PT_row': 3, 'PT_col': 9, 'Z': 28}, 'Cu': {'PT_row': 3, 'PT_col': 10, 'Z': 29},\n 'Zn': {'PT_row': 3, 'PT_col': 11, 'Z': 30}, 'Ga': {'PT_row': 3, 'PT_col': 12, 'Z': 31},\n 'Ge': {'PT_row': 3, 'PT_col': 13, 'Z': 32}, 'As': {'PT_row': 3, 'PT_col': 14, 'Z': 33},\n 'Se': {'PT_row': 3, 'PT_col': 15, 'Z': 34}, 'Br': {'PT_row': 3, 'PT_col': 16, 'Z': 35},\n 'Kr': {'PT_row': 3, 'PT_col': 17, 'Z': 36}, 'Rb': {'PT_row': 4, 'PT_col': 0, 'Z': 37},\n 'Sr': {'PT_row': 4, 'PT_col': 1, 'Z': 38}, 'Y': {'PT_row': 4, 'PT_col': 2, 'Z': 39},\n 'Zr': {'PT_row': 4, 'PT_col': 3, 'Z': 40}, 'Nb': {'PT_row': 4, 'PT_col': 4, 'Z': 41},\n 'Mo': {'PT_row': 4, 'PT_col': 5, 'Z': 42}, 'Tc': {'PT_row': 4, 'PT_col': 6, 'Z': 43},\n 'Ru': {'PT_row': 4, 'PT_col': 7, 'Z': 44}, 'Rh': {'PT_row': 4, 'PT_col': 8, 'Z': 45},\n 'Pd': {'PT_row': 4, 'PT_col': 9, 'Z': 46}, 'Ag': {'PT_row': 4, 'PT_col': 10, 'Z': 47},\n 'Cd': {'PT_row': 4, 'PT_col': 11, 'Z': 48}, 'In': {'PT_row': 4, 'PT_col': 12, 'Z': 49},\n 'Sn': {'PT_row': 4, 'PT_col': 13, 'Z': 50}, 'Sb': {'PT_row': 4, 'PT_col': 14, 'Z': 51},\n 'Te': {'PT_row': 4, 'PT_col': 15, 'Z': 52}, 'I': {'PT_row': 4, 'PT_col': 16, 'Z': 53},\n 'Xe': {'PT_row': 4, 'PT_col': 17, 'Z': 54}, 'Cs': {'PT_row': 5, 'PT_col': 0, 'Z': 55},\n 'Ba': {'PT_row': 5, 'PT_col': 1, 'Z': 56}, 'Hf': {'PT_row': 5, 'PT_col': 3, 'Z': 72},\n 'Ta': {'PT_row': 5, 'PT_col': 4, 'Z': 73}, 'W': {'PT_row': 5, 'PT_col': 5, 'Z': 74},\n 'Re': {'PT_row': 5, 'PT_col': 6, 'Z': 75}, 'Os': {'PT_row': 5, 'PT_col': 7, 'Z': 76},\n 'Ir': {'PT_row': 5, 'PT_col': 8, 'Z': 77}, 'Pt': {'PT_row': 5, 'PT_col': 9, 'Z': 78},\n 'Au': {'PT_row': 5, 'PT_col': 10, 'Z': 79}, 'Hg': {'PT_row': 5, 'PT_col': 11, 'Z': 80},\n 'Pb': {'PT_row': 5, 'PT_col': 13, 'Z': 82}, 'Bi': {'PT_row': 5, 'PT_col': 14, 'Z': 0},\n 'Po': {'PT_row': 5, 'PT_col': 15, 'Z': 0}, 'At': {'PT_row': 5, 'PT_col': 16, 'Z': 0},\n 'Rn': {'PT_row': 5, 'PT_col': 17, 'Z': 0}, 'Fr': {'PT_row': 6, 'PT_col': 0, 'Z': 0},\n 'Ra': {'PT_row': 6, 'PT_col': 1, 'Z': 0}, 'Rf': {'PT_row': 6, 'PT_col': 3, 'Z': 0},\n 'Db': {'PT_row': 6, 'PT_col': 4, 'Z': 0}, 'Sg': {'PT_row': 6, 'PT_col': 5, 'Z': 0},\n 'Bh': {'PT_row': 6, 'PT_col': 6, 'Z': 0}, 'Hs': {'PT_row': 6, 'PT_col': 7, 'Z': 0},\n 'Mt': {'PT_row': 6, 'PT_col': 8, 'Z': 0}, 'Ds': {'PT_row': 6, 'PT_col': 9, 'Z': 0},\n 'Rg': {'PT_row': 6, 'PT_col': 10, 'Z': 0}, 'La': {'PT_row': 8, 'PT_col': 3, 'Z': 57},\n 'Ce': {'PT_row': 8, 'PT_col': 4, 'Z': 58}, 'Pr': {'PT_row': 8, 'PT_col': 5, 'Z': 59},\n 'Nd': {'PT_row': 8, 'PT_col': 6, 'Z': 60}, 'Pm': {'PT_row': 8, 'PT_col': 7, 'Z': 61},\n 'Sm': {'PT_row': 8, 'PT_col': 8, 'Z': 62}, 'Eu': {'PT_row': 8, 'PT_col': 9, 'Z': 63},\n 'Gd': {'PT_row': 8, 'PT_col': 10, 'Z': 64}, 'Tb': {'PT_row': 8, 'PT_col': 11, 'Z': 65},\n 'Dy': {'PT_row': 8, 'PT_col': 12, 'Z': 66}, 'Ho': {'PT_row': 8, 'PT_col': 13, 'Z': 67},\n 'Er': {'PT_row': 8, 'PT_col': 14, 'Z': 68}, 'Tm': {'PT_row': 8, 'PT_col': 15, 'Z': 69},\n 'Yb': {'PT_row': 8, 'PT_col': 16, 'Z': 70}, 'Lu': {'PT_row': 8, 'PT_col': 17, 'Z': 71},\n 'Ac': {'PT_row': 9, 'PT_col': 3, 'Z': 0}, 'Th': {'PT_row': 9, 'PT_col': 4, 'Z': 0},\n 'Pa': {'PT_row': 9, 'PT_col': 5, 'Z': 0}, 'U': {'PT_row': 9, 'PT_col': 6, 'Z': 0},\n 'Np': {'PT_row': 9, 'PT_col': 7, 'Z': 0}, 'Pu': {'PT_row': 9, 'PT_col': 8, 'Z': 0},\n 'Am': {'PT_row': 9, 'PT_col': 9, 'Z': 0}, 'Cm': {'PT_row': 9, 'PT_col': 10, 'Z': 0},\n 'Bk': {'PT_row': 9, 'PT_col': 11, 'Z': 0}, 'Cf': {'PT_row': 9, 'PT_col': 12, 'Z': 0},\n 'Es': {'PT_row': 9, 'PT_col': 13, 'Z': 0}, 'Fm': {'PT_row': 9, 'PT_col': 14, 'Z': 0},\n 'Md': {'PT_row': 9, 'PT_col': 15, 'Z': 0}, 'No': {'PT_row': 9, 'PT_col': 16, 'Z': 0},\n 'Lr': {'PT_row': 9, 'PT_col': 17, 'Z': 0},\n '*': {'PT_row': 5, 'PT_col': 2, 'PT_col2': 8, 'PT_row2': 2, 'Z': 0},\n '**': {'PT_row': 6, 'PT_col': 2, 'PT_col2': 9, 'PT_row2': 2, 'Z': 0}}\n\n return pt_info\n\n\nclass InteractiveSpectrumImage(object):\n \"\"\"Interactive spectrum imaging plot\n\n Attributes:\n -----------\n dictionary with a minimum of the following keys:\n ['image']: displayed image\n ['data']: data cube\n ['intensity_scale_ppm']: intensity scale\n ['ylabel']: intensity label\n ['spectra'] dictionary which contains dictionaries for each spectrum style ['1-2']:\n ['spectrum'] = tags['cube'][y,x,:]\n ['spectra'][f'{x}-{y}']['energy_scale'] = tags['energy_scale']\n ['intensity_scale'] = 1/tags['cube'][y,x,:].sum()*1e6\n\n Please note the possibility to load any image for the selection of the spectrum\n Also there is the possibility to display the survey image.\n\n For analysis we have the following options:\n 'fix_energy': set zero-loss peak maximum to zero !! Low loss spectra only!!\n 'fit_zero_loss': fit zero-loss peak with model function !! Low loss spectra only!!\n 'fit_low_loss': fit low-loss spectrum with model peaks !! Low loss spectra only!!\n\n\n 'fit_composition': fit core-loss spectrum with background and cross sections!! Core loss spectra only!!\n 'fit_ELNES': fit core-loss edge with model peaks !! Core loss spectra only!!\n \"\"\"\n\n def __init__(self, data_source, horizontal=True):\n\n box_layout = widgets.Layout(display='flex',\n flex_flow='row',\n align_items='stretch',\n width='100%')\n\n words = ['fix_energy', 'fit_zero_loss', 'fit_low_loss', 'fit_composition', 'fit_ELNES']\n\n self.buttons = [widgets.ToggleButton(value=False, description=word, disabled=False) for word in words]\n box = widgets.Box(children=self.buttons, layout=box_layout)\n display(box)\n\n # MAKE Dictionary\n\n if isinstance(data_source, dict):\n self.tags = data_source\n elif isinstance(data_source, h5py.Group):\n self.tags = self.set_tags(data_source)\n else:\n print('Data source must be a dictionary or channel')\n return\n\n # Button(description='edge_quantification')\n for button in self.buttons:\n button.observe(self.on_button_clicked, 'value') # on_click(self.on_button_clicked)\n\n self.figure = plt.figure()\n self.horizontal = horizontal\n self.x = 0\n self.y = 0\n\n self.extent = [0, self.tags['cube'].shape[1], self.tags['cube'].shape[0], 0]\n self.rectangle = [0, self.tags['cube'].shape[1], 0, self.tags['cube'].shape[0]]\n self.scaleX = 1.0\n self.scaleY = 1.0\n self.analysis = []\n self.plot_legend = False\n if 'ylabel' not in self.tags:\n self.tags['ylabel'] = 'intensity [a.u.]'\n self.SI = False\n\n if horizontal:\n self.ax1 = plt.subplot(1, 2, 1)\n self.ax2 = plt.subplot(1, 2, 2)\n else:\n self.ax1 = plt.subplot(2, 1, 1)\n self.ax2 = plt.subplot(2, 1, 2)\n\n self.cube = self.tags['cube']\n self.image = self.tags['cube'].sum(axis=2)\n\n self.ax1.imshow(self.image, extent=self.extent)\n if horizontal:\n self.ax1.set_xlabel('distance [pixels]')\n else:\n self.ax1.set_ylabel('distance [pixels]')\n self.ax1.set_aspect('equal')\n\n self.rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='r', facecolor='red', alpha=0.2)\n self.ax1.add_patch(self.rect)\n self.intensity_scale = self.tags['spectra'][f'{self.x}-{self.y}']['intensity_scale']\n self.spectrum = self.tags['spectra'][f'{self.x}-{self.y}']['spectrum'] * self.intensity_scale\n self.energy_scale = self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale']\n\n self.ax2.plot(self.energy_scale, self.spectrum)\n self.ax2.set_title(f' spectrum {self.x},{self.y} ')\n self.ax2.set_xlabel('energy loss [eV]')\n self.ax2.set_ylabel(self.tags['ylabel'])\n self.cid = self.figure.canvas.mpl_connect('button_press_event', self.onclick)\n\n plt.tight_layout()\n\n def on_button_clicked(self, b):\n # print(b['owner'].description)\n selection = b['owner'].description\n if b['new']:\n if selection == 'fit_composition':\n if 'region_tags' in self.tags and 'edges_present' in self.tags \\\n and 'acceleration_voltage' in self.tags \\\n and 'collection_angle' in self.tags:\n pass\n else:\n self.buttons[3].value = False\n return\n elif selection in ['fix_energy', 'fit_zero_loss']:\n if self.energy_scale[0] > 0:\n button_index = ['fix_energy', 'fit_zero_loss'].index(selection)\n self.buttons[button_index].value = False\n return\n self.analysis.append(selection)\n self.update()\n else:\n\n if selection in self.analysis:\n self.analysis.remove(selection)\n\n def do_all(self, selection=None, verbose=True):\n x = self.x\n y = self.y\n if selection is None:\n selection = self.analysis\n for self.x in range(self.cube.shape[1]):\n if verbose:\n print(f' row: {self.x}')\n for self.y in range(self.cube.shape[0]):\n\n if 'fit_zero_loss' in selection:\n title = self.fit_zero_loss(plot_this=False)\n\n elif 'fix_energy' in selection:\n self.ax2.set_title('bn')\n title = self.fix_energy()\n\n elif 'fit_composition' in selection:\n title = self.fit_quantification(plot_this=False)\n\n self.x = x\n self.y = y\n\n def onclick(self, event):\n x = int(event.xdata)\n y = int(event.ydata)\n\n # print(x,y)\n if self.rectangle[0] <= x < self.rectangle[0] + self.rectangle[1]:\n if self.rectangle[2] <= y < self.rectangle[2] + self.rectangle[3]:\n self.x = int((x - self.rectangle[0]) / self.rectangle[1] * self.cube.shape[1])\n self.y = int((y - self.rectangle[2]) / self.rectangle[3] * self.cube.shape[0])\n else:\n return\n else:\n return\n\n if event.inaxes in [self.ax1]:\n x = (self.x * self.rectangle[1] / self.cube.shape[1] + self.rectangle[0])\n y = (self.y * self.rectangle[3] / self.cube.shape[0] + self.rectangle[2])\n\n self.rect.set_xy([x, y])\n self.update()\n\n def update(self):\n xlim = self.ax2.get_xlim()\n ylim = self.ax2.get_ylim()\n self.ax2.clear()\n self.intensity_scale = self.tags['spectra'][f'{self.x}-{self.y}']['intensity_scale']\n self.spectrum = self.tags['spectra'][f'{self.x}-{self.y}']['spectrum'] * self.intensity_scale\n self.energy_scale = self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale']\n\n if 'fit_zero_loss' in self.analysis:\n title = self.fit_zero_loss()\n self.ax2.set_title(title)\n elif 'fix_energy' in self.analysis:\n self.ax2.set_title('bn')\n title = self.fix_energy()\n self.ax2.set_title(title)\n\n elif 'fit_composition' in self.analysis:\n title = self.fit_quantification()\n self.ax2.set_title(title)\n\n else:\n self.ax2.set_title(f' spectrum {self.x},{self.y} ')\n self.ax2.plot(self.energy_scale, self.spectrum, color='#1f77b4', label='experiment')\n\n if self.plot_legend:\n self.ax2.legend(shadow=True)\n self.ax2.set_xlim(xlim)\n self.ax2.set_ylim(ylim)\n self.ax2.set_xlabel('energy loss [eV]')\n self.ax2.set_ylabel(self.tags['ylabel'])\n self.ax2.set_xlim(xlim)\n\n # self.ax2.draw()\n\n def set_tags(self, channel):\n # TODO: change to sidpy dataset tags = ft.h5_get_dictionary(channel)\n tags = {}\n if tags['data_type'] == 'spectrum_image':\n tags['image'] = tags['data']\n tags['data'] = tags['cube'][0, 0, :]\n if 'intensity_scale_ppm' not in channel:\n channel['intensity_scale_ppm'] = 1\n\n tags['ylabel'] = 'intensity [a.u.]'\n tags['spectra'] = {}\n for x in range(tags['spatial_size_y']):\n for y in range(tags['spatial_size_x']):\n tags['spectra'][f'{x}-{y}'] = {}\n tags['spectra'][f'{x}-{y}']['spectrum'] = tags['cube'][y, x, :]\n tags['spectra'][f'{x}-{y}']['energy_scale'] = tags['energy_scale']\n tags['spectra'][f'{x}-{y}']['intensity_scale'] = 1 / tags['cube'][y, x, :].sum() * 1e6\n tags['ylabel'] = 'inel. scat. int. [ppm]'\n\n return tags\n\n def fix_energy(self):\n\n energy_scale = self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale']\n spectrum = self.tags['spectra'][f'{self.x}-{self.y}']['spectrum'] * self.intensity_scale\n fwhm, delta_e = eels.fix_energy_scale(spectrum, energy_scale)\n self.tags['spectra'][f'{self.x}-{self.y}']['delta_e'] = delta_e\n self.tags['spectra'][f'{self.x}-{self.y}']['fwhm'] = fwhm\n self.energy_scale = energy_scale - delta_e\n title = f'spectrum {self.x},{self.y} fwhm: {fwhm:.2f}, dE: {delta_e:.3f}'\n return title\n\n def fit_zero_loss(self, plot_this=True):\n\n energy_scale = self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale']\n spectrum = self.tags['spectra'][f'{self.x}-{self.y}']['spectrum'] * self.intensity_scale\n if 'zero_loss_fit_width' not in self.tags:\n self.tags['zero_loss_fit_width'] = .5\n if self.tags['zero_loss_fit_width'] / (energy_scale[1] - energy_scale[0]) < 6:\n self.tags['zero_loss_fit_width'] = (energy_scale[1] - energy_scale[0]) * 6\n fwhm, delta_e = eels.fix_energy_scale(spectrum, energy_scale)\n energy_scale = energy_scale - delta_e\n z_oss, p_zl = eels.resolution_function(energy_scale, spectrum, self.tags['zero_loss_fit_width'])\n fwhm2, delta_e2 = eels.fix_energy_scale(z_oss, energy_scale)\n\n self.tags['spectra'][f'{self.x}-{self.y}']['resolution_function'] = z_oss\n self.tags['spectra'][f'{self.x}-{self.y}']['p_zl'] = p_zl\n self.tags['spectra'][f'{self.x}-{self.y}']['delta_e'] = delta_e\n self.tags['spectra'][f'{self.x}-{self.y}']['fwhm_resolution'] = fwhm2\n self.tags['spectra'][f'{self.x}-{self.y}']['fwhm'] = fwhm\n\n if plot_this:\n self.ax2.plot(energy_scale, z_oss, label='resolution function', color='black')\n self.ax2.plot(energy_scale, self.spectrum - z_oss, label='difference', color='orange')\n self.ax2.axhline(linewidth=0.5, color='black')\n self.energy_scale = energy_scale\n title = f'spectrum {self.x},{self.y} fwhm: {fwhm:.2f}' # ', dE: {delta_e2:.5e}'\n return title\n\n def fit_quantification(self, plot_this=True):\n energy_scale = self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale']\n spectrum = self.tags['spectra'][f'{self.x}-{self.y}']['spectrum'] * self.intensity_scale\n edges = eels.make_edges(self.tags['edges_present'], energy_scale, self.tags['acceleration_voltage'],\n self.tags['collection_angle'])\n edges = eels.fit_edges(spectrum, self.tags['spectra'][f'{self.x}-{self.y}']['energy_scale'],\n self.tags['region_tags'], edges)\n self.tags['spectra'][f'{self.x}-{self.y}']['edges'] = edges.copy()\n if plot_this:\n self.ax2.plot(energy_scale, edges['model']['spectrum'], label='model')\n self.ax2.plot(energy_scale, self.spectrum - edges['model']['spectrum'], label='difference')\n self.ax2.axhline(linewidth=0.5, color='black')\n else:\n self.tags['spectra'][f'{self.x}-{self.y}']['do_all'] = 'done'\n title = f'spectrum {self.x},{self.y} '\n\n for key in edges:\n if key.isdigit():\n title = title + f\"{edges[key]['element']}: {edges[key]['areal_density']:.2e}; \"\n\n return title\n\n def set_legend(self, set_legend):\n self.plot_legend = set_legend\n\n def get_xy(self):\n return [self.x, self.y]\n\n def get_current_spectrum(self):\n return self.cube[self.y, self.x, :]\n\n def set_z_contrast_image(self, z_channel=None):\n if z_channel is not None:\n self.tags['Z_contrast_channel'] = z_channel\n if 'Z_contrast_channel' not in self.tags:\n print('add Z contrast channel group to dictionary first!')\n return\n\n z_tags = {} # TODO change to sidpy dataset ft.h5_get_dictionary(z_channel)\n extent = [self.rectangle[0], self.rectangle[0] + self.rectangle[1],\n self.rectangle[2] + self.rectangle[3], self.rectangle[2]]\n self.ax1.imshow(z_tags['data'], extent=extent, cmap='gray')\n\n def overlay_z_contrast_image(self, z_channel=None):\n\n if self.SI:\n if z_channel is not None:\n self.tags['Z_contrast_channel'] = z_channel\n if 'Z_contrast_channel' not in self.tags:\n print('add survey channel group to dictionary first!')\n return\n\n z_tags = {} # TODO: change to sidpy ft.h5_get_dictionary(self.tags['Z_contrast_channel'])\n\n xlim = self.ax1.get_xlim()\n ylim = self.ax1.get_ylim()\n extent = [self.rectangle[0], self.rectangle[0] + self.rectangle[1],\n self.rectangle[2] + self.rectangle[3], self.rectangle[2]]\n self.ax1.imshow(z_tags['data'], extent=extent, cmap='viridis', alpha=0.5)\n self.ax1.set_ylim(ylim)\n self.ax1.set_xlim(xlim)\n\n def overlay_data(self, data=None):\n\n if self.SI:\n if data is None:\n data = self.cube.sum(axis=2)\n\n xlim = self.ax1.get_xlim()\n ylim = self.ax1.get_ylim()\n extent = [self.rectangle[0], self.rectangle[0] + self.rectangle[1],\n self.rectangle[2] + self.rectangle[3], self.rectangle[2]]\n self.ax1.imshow(data, extent=extent, alpha=0.7, cmap='viridis')\n self.ax1.set_ylim(ylim)\n self.ax1.set_xlim(xlim)\n\n def set_survey_image(self, si_channel=None):\n\n if si_channel is not None:\n self.tags['survey_channel'] = si_channel\n if 'survey_channel' not in self.tags:\n print('add survey channel group to dictionary first!')\n return\n si_channel = self.tags['survey_channel']\n si_tags = {} # TODO: change to sidpy ft.h5_get_dictionary(si_channel)\n tags2 = dict(si_channel.attrs)\n\n self.ax1.set_aspect('equal')\n self.scaleX = si_channel['spatial_scale_x'][()]\n self.scaleY = si_channel['spatial_scale_y'][()]\n\n self.ax1.imshow(si_tags['data'], extent=si_tags['extent'], cmap='gray')\n if self.horizontal:\n self.ax1.set_xlabel('distance [nm]')\n else:\n self.ax1.set_ylabel('distance [nm]')\n\n annotation_done = []\n for key in tags2:\n if 'annotations' in key:\n annotation_number = key[12]\n if annotation_number not in annotation_done:\n annotation_done.append(annotation_number)\n\n if tags2['annotations_' + annotation_number + '_type'] == 'text':\n x = tags2['annotations_' + annotation_number + '_x']\n y = tags2['annotations_' + annotation_number + '_y']\n text = tags2['annotations_' + annotation_number + '_text']\n self.ax1.text(x, y, text, color='r')\n\n elif tags2['annotations_' + annotation_number + '_type'] == 'circle':\n radius = 20 * self.scaleX # tags['annotations'][key]['radius']\n xy = tags2['annotations_' + annotation_number + '_position']\n circle = patches.Circle(xy, radius, color='r', fill=False)\n self.ax1.add_artist(circle)\n\n elif tags2['annotations_' + annotation_number + '_type'] == 'spectrum image':\n width = tags2['annotations_' + annotation_number + '_width']\n height = tags2['annotations_' + annotation_number + '_height']\n position = tags2['annotations_' + annotation_number + '_position']\n rectangle = patches.Rectangle(position, width, height, color='r', fill=False)\n self.rectangle = [position[0], width, position[1], height]\n self.ax1.add_artist(rectangle)\n self.ax1.text(position[0], position[1], 'Spectrum Image', color='r')\n self.rect.set_width(width / self.cube.shape[1])\n self.rect.set_height(height / self.cube.shape[0])\n self.SI = True\n\n\nclass ElementalEdges(object):\n \"\"\" Adds ionization edges of element z to plot with axis ax\n\n There is an optional parameter maximum_chemical_shift which allows to change\n the energy range in which the edges are searched.\n\n available functions:\n - update(): updates the drawing of ionization edges\n - set_edge(Z) : changes atomic number and updates everything accordingly\n - disconnect: makes everything invisible and stops drawing\n - reconnect: undo of disconnect\n\n usage:\n >> fig, ax = plt.subplots()\n >> ax.plot(energy_scale, spectrum)\n >> Z= 42\n >> cursor = ElementalEdges(ax, Z)\n\n\n see Chapter4 'CH4-Working_with_X-Sections' notebook\n \"\"\"\n\n def __init__(self, ax, z):\n self.ax = ax\n self.labels = None\n self.lines = None\n self.Z = eels.get_z(z)\n self.color = 'black'\n self.x_sections = eels.get_x_sections()\n self.cid = ax.figure.canvas.mpl_connect('draw_event', self.onresize)\n # self.update() is not necessary because of a drawing event is issued\n\n def set_edge(self, z):\n self.Z = eels.get_z(z)\n if self.cid is None:\n self.cid = self.ax.figure.canvas.mpl_connect('draw_event', self.onresize)\n self.update()\n\n def onresize(self, event):\n self.update()\n\n def update(self):\n if self.labels is not None:\n for label in self.labels:\n label.remove()\n if self.lines is not None:\n for line in self.lines:\n line.remove()\n self.labels = []\n self.lines = []\n x_min, x_max = self.ax.get_xlim()\n y_min, y_max = self.ax.get_ylim()\n\n element = str(self.Z)\n x_sections = self.x_sections\n for key in all_edges:\n if key in x_sections[element] and 'onset' in x_sections[element][key]:\n x = x_sections[element][key]['onset']\n if x_min < x < x_max:\n if key in first_close_edges:\n label2 = self.ax.text(x, y_max, f\"{x_sections[element]['name']}-{key}\",\n verticalalignment='top', rotation=0, color=self.color)\n else:\n label2 = self.ax.text(x, y_max, f\"\\n{x_sections[element]['name']}-{key}\",\n verticalalignment='top', color=self.color)\n line2 = self.ax.axvline(x, ymin=0, ymax=1, color=self.color)\n\n self.labels.append(label2)\n self.lines.append(line2)\n\n def reconnect(self):\n self.cid = self.ax.figure.canvas.mpl_connect('draw_event', self.onresize)\n self.update()\n\n def disconnect(self):\n if self.labels is not None:\n for label in self.labels:\n label.remove()\n if self.lines is not None:\n for line in self.lines:\n line.remove()\n self.labels = None\n self.lines = None\n self.ax.figure.canvas.mpl_disconnect(self.cid)\n\n\nclass EdgesAtCursor(object):\n \"\"\"\n Adds a Cursor to a plot, which plots all major (possible) ionization edges at\n the cursor location if left (right) mouse button is clicked.\n\n Attributes\n ----------\n ax: matplotlib axis\n x: numpy array\n energy_scale of spectrum\n y: numpy array\n intensities of spectrum\n maximal_chemical_shift: float\n optional parameter maximum_chemical_shift which allows to change the energy range in which the edges\n are searched.\n\n Example\n -------\n fig, ax = plt.subplots()\n ax.plot(energy_scale, spectrum)\n cursor = EdgesAtCursor(ax, energy_scale, spectrum)\n\n see Chapter4 'CH4-Working_with_X-Sections' notebook\n\n \"\"\"\n\n def __init__(self, ax, x, y, maximal_chemical_shift=5):\n self.ax = ax\n self.ly = ax.axvline(x[0], color='k', alpha=0.2) # the vert line\n self.marker, = ax.plot(x[0], y[0], marker=\"o\", color=\"crimson\", zorder=3)\n self.x = x\n self.y = y\n self.txt = ax.text(0.7, 0.9, '', verticalalignment='bottom')\n self.select = 0\n self.label = None\n self.line = None\n self.cid = ax.figure.canvas.mpl_connect('button_press_event', self.click)\n self.mouse_cid = ax.figure.canvas.mpl_connect('motion_notify_event', self.mouse_move)\n self.maximal_chemical_shift = maximal_chemical_shift\n\n def click(self, event):\n\n # print('click', event)\n if not event.inaxes:\n return\n x, y = event.xdata, event.ydata\n\n index = np.searchsorted(self.x, [x])[0]\n x = self.x[index]\n y = self.y[index]\n self.select = x\n\n y_min, y_max = self.ax.get_ylim()\n\n if self.label is not None:\n self.label.remove()\n self.line.remove()\n if event.button == 1:\n self.label = self.ax.text(x, y_max, eels.find_major_edges(event.xdata, self.maximal_chemical_shift),\n verticalalignment='top')\n self.line, = self.ax.plot([x, x], [y_min, y_max], color='black')\n if event.button == 3:\n self.line, = self.ax.plot([x, x], [y_min, y_max], color='black')\n self.label = self.ax.text(x, y_max, eels.find_all_edges(event.xdata, self.maximal_chemical_shift),\n verticalalignment='top')\n self.ax.set_ylim(y_min, y_max)\n\n def mouse_move(self, event):\n if not event.inaxes:\n return\n\n x, y = event.xdata, event.ydata\n index = np.searchsorted(self.x, [x])[0]\n x = self.x[index]\n y = self.y[index]\n self.select = x\n self.ly.set_xdata(x)\n self.marker.set_data([x], [y])\n self.txt.set_text(f'\\n x={x:1.2f}, y={y:1.2g}\\n')\n\n # self.ax.text(x, y*2,find_major_edges(x))\n self.txt.set_position((x, y))\n self.ax.figure.canvas.draw_idle()\n\n def del_edges(self):\n if self.label is not None:\n self.label.remove()\n self.line.remove()\n self.label = None\n\n def disconnect(self):\n self.ly.remove()\n self.marker.remove()\n self.txt.remove()\n\n self.ax.figure.canvas.mpl_disconnect(self.cid)\n self.ax.figure.canvas.mpl_disconnect(self.mouse_cid)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.patches.Rectangle", "matplotlib.patches.Circle", "matplotlib.pyplot.subplot", "numpy.searchsorted", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
bkmi/schnetpack
[ "3c58fd1a0b9fa2b046a88e89eb0d0c9051973046" ]
[ "src/schnetpack/datasets/qm9.py" ]
[ "import logging\nimport os\nimport re\nimport shutil\nimport tarfile\nimport tempfile\nfrom urllib import request as request\n\nimport numpy as np\nfrom ase.io.extxyz import read_xyz\nfrom ase.units import Debye, Bohr, Hartree, eV\n\nimport schnetpack as spk\nfrom schnetpack.datasets import DownloadableAtomsData\n\n__all__ = [\"QM9\"]\n\n\nclass QM9(DownloadableAtomsData):\n \"\"\"QM9 benchmark database for organic molecules.\n\n The QM9 database contains small organic molecules with up to nine non-hydrogen atoms\n from including C, O, N, F. This class adds convenient functions to download QM9 from\n figshare and load the data into pytorch.\n\n Args:\n dbpath (str): path to directory containing database.\n download (bool, optional): enable downloading if database does not exists.\n subset (list, optional): indices to subset. Set to None for entire database.\n load_only (list, optional): reduced set of properties to be loaded\n collect_triples (bool, optional): Set to True if angular features are needed.\n remove_uncharacterized (bool, optional): remove uncharacterized molecules.\n environment_provider (spk.environment.BaseEnvironmentProvider): define how\n neighborhood is calculated\n (default=spk.environment.SimpleEnvironmentProvider).\n\n References:\n .. [#qm9_1] https://ndownloader.figshare.com/files/3195404\n\n \"\"\"\n\n # properties\n A = \"rotational_constant_A\"\n B = \"rotational_constant_B\"\n C = \"rotational_constant_C\"\n mu = \"dipole_moment\"\n alpha = \"isotropic_polarizability\"\n homo = \"homo\"\n lumo = \"lumo\"\n gap = \"gap\"\n r2 = \"electronic_spatial_extent\"\n zpve = \"zpve\"\n U0 = \"energy_U0\"\n U = \"energy_U\"\n H = \"enthalpy_H\"\n G = \"free_energy\"\n Cv = \"heat_capacity\"\n\n properties = {\n \"A\": \"rotational_constant_A\",\n \"B\": \"rotational_constant_B\",\n \"C\": \"rotational_constant_C\",\n \"mu\": \"dipole_moment\",\n \"alpha\": \"isotropic_polarizability\",\n \"homo\": \"homo\",\n \"lumo\": \"lumo\",\n \"gap\": \"gap\",\n \"r2\": \"electronic_spatial_extent\",\n \"zpve\": \"zpve\",\n \"U0\": \"energy_U0\",\n \"U\": \"energy_U\",\n \"H\": \"enthalpy_H\",\n \"G\": \"free_energy\",\n \"Cv\": \"heat_capacity\",\n }\n\n reference = {zpve: 0, U0: 1, U: 2, H: 3, G: 4, Cv: 5}\n\n def __init__(\n self,\n dbpath,\n download=True,\n subset=None,\n load_only=None,\n collect_triples=False,\n remove_uncharacterized=False,\n environment_provider=spk.environment.SimpleEnvironmentProvider(),\n **kwargs\n ):\n\n self.remove_uncharacterized = remove_uncharacterized\n\n available_properties = [\n QM9.A,\n QM9.B,\n QM9.C,\n QM9.mu,\n QM9.alpha,\n QM9.homo,\n QM9.lumo,\n QM9.gap,\n QM9.r2,\n QM9.zpve,\n QM9.U0,\n QM9.U,\n QM9.H,\n QM9.G,\n QM9.Cv,\n ]\n\n units = [\n 1.0,\n 1.0,\n 1.0,\n Debye,\n Bohr ** 3,\n Hartree,\n Hartree,\n Hartree,\n Bohr ** 2,\n Hartree,\n Hartree,\n Hartree,\n Hartree,\n Hartree,\n 1.0,\n ]\n\n super().__init__(\n dbpath=dbpath,\n subset=subset,\n load_only=load_only,\n collect_triples=collect_triples,\n download=download,\n available_properties=available_properties,\n units=units,\n environment_provider=environment_provider,\n **kwargs\n )\n\n def create_subset(self, idx):\n idx = np.array(idx)\n subidx = idx if self.subset is None else np.array(self.subset)[idx]\n\n return QM9(\n dbpath=self.dbpath,\n download=False,\n subset=subidx,\n load_only=self.load_only,\n collect_triples=self.collect_triples,\n remove_uncharacterized=False,\n environment_provider=self.environment_provider,\n )\n\n def _download(self):\n if self.remove_uncharacterized:\n evilmols = self._load_evilmols()\n else:\n evilmols = None\n\n self._load_data(evilmols)\n\n atref, labels = self._load_atomrefs()\n self.set_metadata({\"atomrefs\": atref.tolist(), \"atref_labels\": labels})\n\n def _load_atomrefs(self):\n logging.info(\"Downloading GDB-9 atom references...\")\n at_url = \"https://ndownloader.figshare.com/files/3195395\"\n tmpdir = tempfile.mkdtemp(\"gdb9\")\n tmp_path = os.path.join(tmpdir, \"atomrefs.txt\")\n\n request.urlretrieve(at_url, tmp_path)\n logging.info(\"Done.\")\n\n atref = np.zeros((100, 6))\n labels = [QM9.zpve, QM9.U0, QM9.U, QM9.H, QM9.G, QM9.Cv]\n with open(tmp_path) as f:\n lines = f.readlines()\n for z, l in zip([1, 6, 7, 8, 9], lines[5:10]):\n atref[z, 0] = float(l.split()[1])\n atref[z, 1] = float(l.split()[2]) * Hartree / eV\n atref[z, 2] = float(l.split()[3]) * Hartree / eV\n atref[z, 3] = float(l.split()[4]) * Hartree / eV\n atref[z, 4] = float(l.split()[5]) * Hartree / eV\n atref[z, 5] = float(l.split()[6])\n return atref, labels\n\n def _load_evilmols(self):\n logging.info(\"Downloading list of uncharacterized molecules...\")\n at_url = \"https://ndownloader.figshare.com/files/3195404\"\n tmpdir = tempfile.mkdtemp(\"gdb9\")\n tmp_path = os.path.join(tmpdir, \"uncharacterized.txt\")\n\n request.urlretrieve(at_url, tmp_path)\n logging.info(\"Done.\")\n\n evilmols = []\n with open(tmp_path) as f:\n lines = f.readlines()\n for line in lines[9:-1]:\n evilmols.append(int(line.split()[0]))\n return np.array(evilmols)\n\n def _load_data(self, evilmols=None):\n logging.info(\"Downloading GDB-9 data...\")\n tmpdir = tempfile.mkdtemp(\"gdb9\")\n tar_path = os.path.join(tmpdir, \"gdb9.tar.gz\")\n raw_path = os.path.join(tmpdir, \"gdb9_xyz\")\n url = \"https://ndownloader.figshare.com/files/3195389\"\n\n request.urlretrieve(url, tar_path)\n logging.info(\"Done.\")\n\n logging.info(\"Extracting files...\")\n tar = tarfile.open(tar_path)\n tar.extractall(raw_path)\n tar.close()\n logging.info(\"Done.\")\n\n logging.info(\"Parse xyz files...\")\n ordered_files = sorted(\n os.listdir(raw_path), key=lambda x: (int(re.sub(\"\\D\", \"\", x)), x)\n )\n\n all_atoms = []\n all_properties = []\n\n irange = np.arange(len(ordered_files), dtype=np.int)\n if evilmols is not None:\n irange = np.setdiff1d(irange, evilmols - 1)\n\n for i in irange:\n xyzfile = os.path.join(raw_path, ordered_files[i])\n\n if (i + 1) % 10000 == 0:\n logging.info(\"Parsed: {:6d} / 133885\".format(i + 1))\n properties = {}\n tmp = os.path.join(tmpdir, \"tmp.xyz\")\n\n with open(xyzfile, \"r\") as f:\n lines = f.readlines()\n l = lines[1].split()[2:]\n for pn, p in zip(self.available_properties, l):\n properties[pn] = np.array([float(p) * self.units[pn]])\n with open(tmp, \"wt\") as fout:\n for line in lines:\n fout.write(line.replace(\"*^\", \"e\"))\n\n with open(tmp, \"r\") as f:\n ats = list(read_xyz(f, 0))[0]\n all_atoms.append(ats)\n all_properties.append(properties)\n\n logging.info(\"Write atoms to db...\")\n self.add_systems(all_atoms, all_properties)\n logging.info(\"Done.\")\n\n shutil.rmtree(tmpdir)\n\n return True\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.setdiff1d" ] ]
shaoeric/BNN_NoBN
[ "e1d007c7ec15c4793f15375508752eee3ad7e4e0" ]
[ "models/Qa_reactnet_18_none.py" ]
[ "'''\nReact-birealnet-18(modified from resnet)\n\nBN setting: remove all BatchNorm layers\nConv setting: original Conv2d\nBinary setting: only activation are binarized\n\n'''\n\n\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.functional as F\n\nfrom layers import *\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\ndef binaryconv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return HardBinaryConv(in_planes, out_planes, kernel_size=3, stride=stride, padding=1)\n\ndef binaryconv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return HardBinaryConv(in_planes, out_planes, kernel_size=1, stride=stride, padding=0)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n\n self.move0 = LearnableBias(inplanes)\n self.binary_activation = BinaryActivation()\n self.binary_conv = conv3x3(inplanes, planes, stride=stride)\n self.move1 = LearnableBias(planes)\n self.prelu = nn.PReLU(planes)\n self.move2 = LearnableBias(planes)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.move0(x)\n out = self.binary_activation(out)\n out = self.binary_conv(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.move1(out)\n out = self.prelu(out)\n out = self.move2(out)\n\n return out\n\nclass BiRealNet(nn.Module):\n\n def __init__(self, block, layers, imagenet=True, num_classes=1000):\n super(BiRealNet, self).__init__()\n self.inplanes = 64\n\n if imagenet:\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n else:\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.maxpool = nn.Identity()\n\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.AvgPool2d(kernel_size=2, stride=stride),\n conv1x1(self.inplanes, planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef birealnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a BiRealNet-18 model. \"\"\"\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.PReLU", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.AvgPool2d" ] ]
mevtorres/pvextractor
[ "0048a43ae06f39313645205e087c4fbec1168a52" ]
[ "pvextractor/gui.py" ]
[ "from __future__ import print_function\n\nimport os\nimport math\nimport warnings\n\nimport numpy as np\n\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.transforms import Bbox\nfrom matplotlib.patches import Polygon\n\nfrom .geometry.path import Path, get_endpoints\nfrom . import extract_pv_slice\n\n\ndef distance(x1, y1, x2, y2, x3, y3):\n \"\"\"\n Find the shortest distance between a point (x3, y3) and the line passing\n through the points (x1, y1) and (x2, y2).\n \"\"\"\n\n px = x2-x1\n py = y2-y1\n\n something = px * px + py * py\n\n u = ((x3 - x1) * px + (y3 - y1) * py) / float(something)\n\n x = x1 + u * px\n y = y1 + u * py\n\n dx = x - x3\n dy = y - y3\n\n dist = math.sqrt(dx*dx + dy*dy)\n\n return dist\n\n\nclass MovableSliceBox(object):\n\n def __init__(self, box, callback):\n self.box = box\n self.press = None\n self.background = None\n self.point_counter = 0\n self.callback = callback\n self.mode = 0\n self.show_poly = False\n self.cidpress = self.box.figure.canvas.mpl_connect('draw_event', self.draw_slicer)\n\n def connect(self):\n self.cidpress = self.box.figure.canvas.mpl_connect('key_press_event', self.key_press)\n self.cidpress = self.box.figure.canvas.mpl_connect('button_press_event', self.on_press)\n self.cidmotion = self.box.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)\n\n def draw_slicer(self, event):\n\n axes = self.box.axes\n canvas = self.box.figure.canvas\n\n self.box.axes.draw_artist(self.box)\n\n if self.show_poly:\n\n path = Path(zip(self.box.x, self.box.y))\n path.width = self.box.width\n\n patches = path.to_patches(1, ec='green', fc='none',\n transform=self.box.axes.transData,\n clip_on=True, clip_box=self.box.axes.bbox)\n\n for patch in patches:\n self.box.axes.draw_artist(patch)\n\n def on_press(self, event):\n\n if self.box.figure.canvas.toolbar.mode != '':\n return\n\n if event.inaxes != self.box.axes:\n return\n\n if self.mode == 1:\n self.callback(self.box)\n self.mode += 1\n return\n\n if self.mode == 2:\n self.box.x = []\n self.box.y = []\n self.mode = 0\n self.point_counter = 0\n\n self.press = event.xdata, event.ydata\n\n self.point_counter += 1\n\n axes = self.box.axes\n canvas = self.box.figure.canvas\n\n if self.point_counter == 1: # first point\n\n self.box.x.append(event.xdata)\n self.box.x.append(event.xdata)\n self.box.y.append(event.ydata)\n self.box.y.append(event.ydata)\n\n self.box.width = 0.\n\n self.box.set_animated(True)\n canvas.draw()\n self.background = canvas.copy_from_bbox(self.box.axes.bbox)\n\n elif self.mode == 0:\n\n self.box.x.append(event.xdata)\n self.box.y.append(event.ydata)\n\n self.box._update_segments()\n\n # now redraw just the lineangle\n axes.draw_artist(self.box)\n\n def key_press(self, event):\n\n if self.box.figure.canvas.toolbar.mode != '':\n return\n\n if event.key == 'enter' and self.mode == 0:\n self.mode += 1\n self.box.x = self.box.x[:-1]\n self.box.y = self.box.y[:-1]\n\n if event.key == 'y' and self.mode == 2:\n self.show_poly = not self.show_poly\n self.draw_slicer(event)\n self.box.figure.canvas.draw()\n\n def on_motion(self, event):\n\n if self.box.figure.canvas.toolbar.mode != '':\n return\n\n if self.point_counter == 0:\n return\n\n if self.mode == 2:\n return\n\n canvas = self.box.figure.canvas\n axes = self.box.axes\n canvas.restore_region(self.background)\n\n if event.inaxes != self.box.axes:\n return\n\n if self.mode == 0:\n self.box.x[-1] = event.xdata\n self.box.y[-1] = event.ydata\n elif self.mode == 1:\n self.box.width = distance(self.box.x[-2], self.box.y[-2], self.box.x[-1], self.box.y[-1], event.xdata, event.ydata) * 2\n\n self.box._update_segments()\n\n # redraw just the current lineangle\n axes.draw_artist(self.box)\n\n # blit just the redrawn area\n canvas.blit(axes.bbox)\n\n def disconnect(self):\n self.box.figure.canvas.mpl_disconnect(self.cidpress)\n self.box.figure.canvas.mpl_disconnect(self.cidmotion)\n\n\nclass SliceCurve(LineCollection):\n\n def __init__(self, x=[], y=[], width=None, **kwargs):\n\n super(SliceCurve, self).__init__([], **kwargs)\n\n self.x = x\n self.y = y\n self.width = width\n\n self._update_segments()\n\n def _update_segments(self):\n\n if not self.x or self.width is None or len(self.x) < 2:\n return\n\n # Find central line\n line = zip(self.x, self.y)\n\n if self.width:\n\n x1, y1, x2, y2 = get_endpoints(self.x, self.y, self.width)\n\n # Find bounding rectangle\n rect = zip(np.hstack([x1,x2[::-1], x1[0]]),\n np.hstack([y1,y2[::-1], y1[0]]))\n\n self.set_segments([list(line), list(rect)])\n self.set_linestyles(['solid', 'dashed'])\n self.set_linewidths([2, 1])\n\n else:\n\n self.set_segments([list(line)])\n self.set_linestyles(['solid'])\n self.set_linewidths([2,])\n\n\ndef unitless(x):\n if hasattr(x, 'unit'):\n return x.value\n else:\n return x\n\nclass PVSlicer(object):\n\n def __init__(self, filename_or_cube, backend=\"Qt5Agg\", clim=None):\n\n\n try:\n from spectral_cube import SpectralCube\n if isinstance(filename_or_cube, SpectralCube):\n cube = filename_or_cube\n else:\n cube = SpectralCube.read(filename_or_cube, format='fits')\n self.cube = cube\n self.array = self.cube\n self.shape = cube.shape\n except ImportError:\n warnings.warn(\"spectral_cube package is not available - using astropy.io.fits directly\")\n from astropy.io import fits\n self.array = fits.getdata(filename_or_cube)\n self.shape = array.shape\n self.ok_mask = np.isfinite(self.array)\n if self.array.ndim != 3:\n raise ValueError(\"dataset does not have 3 dimensions (install the spectral_cube package to avoid this error)\")\n\n self.backend = backend\n\n import matplotlib as mpl\n\n mpl.use(self.backend)\n import matplotlib.pyplot as plt\n\n self.fig = plt.figure(figsize=(14, 8))\n\n self.ax1 = self.fig.add_axes([0.1, 0.1, 0.4, 0.7])\n\n if clim is None:\n warnings.warn(\"clim not defined and will be determined from the data\")\n # To work with large arrays, sub-sample the data\n # (but don't do it for small arrays)\n n1 = int(np.round(max(self.shape[0] / 10, 1)))\n n2 = int(np.round(max(self.shape[1] / 10, 1)))\n n3 = int(np.round(max(self.shape[2] / 10, 1)))\n if hasattr(self,'cube'):\n sub_cube = self.cube[::n1,::n2,::n3]\n cmin = sub_cube.min().value\n cmax = sub_cube.max().value\n else:\n sub_array = self.array[::n1,::n2,::n3]\n sub_mask = self.ok_mask[::n1,::n2,::n3]\n cmin = sub_array[sub_mask].min()\n cmax = sub_array[sub_mask].max()\n crange = cmax - cmin\n self._clim = (cmin - crange, cmax + crange)\n else:\n self._clim = clim\n\n self.slice = int(round(self.shape[0] / 2.))\n\n from matplotlib.widgets import Slider\n\n self.slice_slider_ax = self.fig.add_axes([0.1, 0.95, 0.4, 0.03])\n self.slice_slider_ax.set_xticklabels(\"\")\n self.slice_slider_ax.set_yticklabels(\"\")\n self.slice_slider = Slider(self.slice_slider_ax, \"3-d slice\", 0, self.shape[0]-1, valinit=self.slice, valfmt=\"%i\")\n self.slice_slider.on_changed(self.update_slice)\n self.slice_slider.drawon = False\n\n self.image = self.ax1.imshow(unitless(self.array[self.slice, :,:]),\n origin='lower', interpolation='nearest',\n vmin=self._clim[0], vmax=self._clim[1],\n cmap=plt.cm.gray)\n\n self.vmin_slider_ax = self.fig.add_axes([0.1, 0.90, 0.4, 0.03])\n self.vmin_slider_ax.set_xticklabels(\"\")\n self.vmin_slider_ax.set_yticklabels(\"\")\n self.vmin_slider = Slider(self.vmin_slider_ax, \"vmin\", self._clim[0], self._clim[1], valinit=self._clim[0])\n self.vmin_slider.on_changed(self.update_vmin)\n self.vmin_slider.drawon = False\n\n self.vmax_slider_ax = self.fig.add_axes([0.1, 0.85, 0.4, 0.03])\n self.vmax_slider_ax.set_xticklabels(\"\")\n self.vmax_slider_ax.set_yticklabels(\"\")\n self.vmax_slider = Slider(self.vmax_slider_ax, \"vmax\", self._clim[0], self._clim[1], valinit=self._clim[1])\n self.vmax_slider.on_changed(self.update_vmax)\n self.vmax_slider.drawon = False\n\n self.grid1 = None\n self.grid2 = None\n self.grid3 = None\n\n self.ax2 = self.fig.add_axes([0.55, 0.1, 0.4, 0.7])\n\n # Add slicing box\n self.box = SliceCurve(colors=(0.8, 0.0, 0.0))\n self.ax1.add_collection(self.box)\n self.movable = MovableSliceBox(self.box, callback=self.update_pv_slice)\n self.movable.connect()\n\n # Add save button\n from matplotlib.widgets import Button\n self.save_button_ax = self.fig.add_axes([0.65, 0.90, 0.20, 0.05])\n self.save_button = Button(self.save_button_ax, 'Save slice to FITS')\n self.save_button.on_clicked(self.save_fits)\n self.file_status_text = self.fig.text(0.75, 0.875, \"\", ha='center', va='center')\n self.set_file_status(None)\n\n self.set_file_status(None)\n self.pv_slice = None\n\n self.cidpress = self.fig.canvas.mpl_connect('button_press_event', self.click)\n\n def set_file_status(self, status, filename=None):\n if status == 'instructions':\n self.file_status_text.set_text('Please enter filename in terminal')\n self.file_status_text.set_color('red')\n elif status == 'saved':\n self.file_status_text.set_text('File successfully saved to {0}'.format(filename))\n self.file_status_text.set_color('green')\n else:\n self.file_status_text.set_text('')\n self.file_status_text.set_color('black')\n self.fig.canvas.draw()\n\n def click(self, event):\n\n if event.inaxes != self.ax2:\n return\n\n self.slice_slider.set_val(event.ydata)\n\n def save_fits(self, *args, **kwargs):\n\n self.set_file_status('instructions')\n\n print(\"Enter filename: \", end='')\n try:\n plot_name = raw_input()\n except NameError:\n plot_name = input()\n\n if self.pv_slice is None:\n return\n\n from astropy.io import fits\n self.pv_slice.writeto(plot_name, clobber=True)\n print(\"Saved file to: \", plot_name)\n\n self.set_file_status('saved', filename=plot_name)\n\n def update_pv_slice(self, box):\n\n path = Path(zip(box.x, box.y))\n path.width = box.width\n\n self.pv_slice = extract_pv_slice(self.array, path)\n\n self.ax2.cla()\n self.ax2.imshow(self.pv_slice.data, origin='lower', aspect='auto', interpolation='nearest')\n\n self.fig.canvas.draw()\n\n def show(self, block=True):\n import matplotlib.pyplot as plt\n plt.show(block=block)\n\n def update_slice(self, pos=None):\n\n if self.array.ndim == 2:\n self.image.set_array(unitless(self.array))\n else:\n self.slice = int(round(pos))\n self.image.set_array(unitless(self.array[self.slice, :, :]))\n\n self.fig.canvas.draw()\n\n def update_vmin(self, vmin):\n if vmin > self._clim[1]:\n self._clim = (self._clim[1], self._clim[1])\n else:\n self._clim = (vmin, self._clim[1])\n self.image.set_clim(*self._clim)\n self.fig.canvas.draw()\n\n def update_vmax(self, vmax):\n if vmax < self._clim[0]:\n self._clim = (self._clim[0], self._clim[0])\n else:\n self._clim = (self._clim[0], vmax)\n self.image.set_clim(*self._clim)\n self.fig.canvas.draw()\n" ]
[ [ "numpy.hstack", "numpy.isfinite", "matplotlib.widgets.Button", "matplotlib.use", "matplotlib.widgets.Slider", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
cseveriano/evolving_clustering
[ "50dd2b4e38ee11aba9382f1a8e04f530b7c9c949" ]
[ "src/models/evolving/util/util.py" ]
[ "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\ndef adjust_labels(y_pred, y):\n new_y_pred = np.array(y_pred.copy())\n\n pred_labels = np.unique(y_pred)\n\n for l in pred_labels:\n labels = y[y_pred == l]\n\n uniqueValues, occurCount = np.unique(labels, return_counts=True)\n\n new_label = uniqueValues[np.argmax(occurCount)]\n new_y_pred[y_pred == l] = new_label\n\n return new_y_pred.tolist()\n\ndef plot_macro_clusters(X, model):\n\n\n macro_clusters = model.active_macro_clusters\n colors = cm.rainbow(np.linspace(0, 1, len(macro_clusters)))\n\n ax = plt.gca()\n\n ax.scatter(X[:, 0], X[:, 1], s=1, color='b')\n\n for mg, c in zip(macro_clusters, colors):\n for i in mg:\n mi = next(item for item in model.micro_clusters if item[\"id\"] == i)\n\n mean = mi[\"mean\"]\n std = math.sqrt(mi[\"variance\"])\n circle = plt.Circle(mean, 2 * std, color= c, fill=False)\n ax.add_artist(circle)\n\n plt.draw()\n\n\ndef plot_data_labels(X, y):\n ax = plt.gca()\n colors = cm.rainbow(np.linspace(0, 1, np.max(y)))\n for ind, label in enumerate(np.unique(y)):\n ax.scatter(X[y==label, 0], X[y==label, 1], s=1, color=colors[ind])\n\n plt.draw()\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.unique", "matplotlib.pyplot.draw", "matplotlib.pyplot.Circle", "numpy.max", "numpy.argmax" ] ]
hsiang271828/vnpy
[ "54e1658b253b4e32c36dcb27b51134980cae6411" ]
[ "examples/vn_trader/save_tdx_data.py" ]
[ "'''\n使用说明:\n\nfuture_download状态,下载期货数据时设置为True,下载股票数据时设置为False\nfile_path:通达信数据保存路径大家自行替换\n通达信期货数据对齐datetime到文华财经后数据与文化财经完全一致,包括指数合约\n单个文件较大时多进程只有两个进程在运行,原因不明\n通达信股票只能下载最近100天数据,期货数据下载没有时间限制\n期货数据存储路径:D:\\tdx\\vipdoc\\ds,上交所股票数据路径:D:\\tdx\\vipdoc\\sh,深交所股票数据路径:D:\\tdx\\vipdoc\\sz\n建议下载通达信期货通可以同时下载股票和期货数据,enjoy it!\n附上vnpy\\trader\\engine.py里面的合约数据保存读取代码\n\n'''\n\n\nfrom typing import TextIO\nfrom datetime import datetime,timedelta\nfrom datetime import time as dtime\nfrom time import time\nimport numpy as np\nimport pandas as pd\nimport csv\nimport os\nimport multiprocessing\nimport string\n\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.constant import (Exchange, Interval)\nfrom vnpy.trader.object import (BarData, TickData)\nfrom vnpy.trader.utility import (load_json, save_json,extract_vt_symbol)\nfrom peewee import chunked\nfrom vnpy.event import EventEngine\nfrom vnpy.trader.engine import MainEngine\n\n\nfrom vnpy.trader.database.initialize import init_sql\nfrom vnpy.trader.database.database import Driver\n\n\n#--------------------------------------------------------------------------------------------\ndef save_tdx_data(file_path,vt_symbol:str,future_download:bool,interval: Interval = Interval.MINUTE):\n \"\"\"\n 保存通达信导出的lc1分钟数据,期货数据对齐datetime到文华财经\n \"\"\"\n print(\"%(processName)s %(message)s save_tdx_data函数\")\n\n symbol,exchange= extract_vt_symbol(vt_symbol)\n #读取二进制文件\n dt = np.dtype([\n ('date', 'u2'),\n ('time', 'u2'),\n ('open_price', 'f4'),\n ('high_price', 'f4'),\n ('low_price', 'f4'),\n ('close_price', 'f4'),\n ('amount', 'f4'),\n ('volume', 'u4'),\n ('reserve','u4')])\n data = np.fromfile(file_path, dtype=dt)\n df = pd.DataFrame(data, columns=data.dtype.names)\n df.eval('''\n year=floor(date/2048)+2004\n month=floor((date%2048)/100)\n day=floor(date%2048%100)\n hour = floor(time/60)\n minute = time%60\n ''',inplace=True)\n\n # 需要标准datetime格式,非datetime64[ns],timeStamp,此处将datetime64[ns]生成int型timestamp\n df['datetime2']=pd.to_datetime(df.loc[:,['year','month','day','hour','minute']])\n #tz_localize('Asia/Shanghai') 下面处理时区的问题 '1970-01-01T00:00:00-08:00' 与 UTC差8个小时\n df['datetime3'] =((df['datetime2'] - np.datetime64('1970-01-01T00:00:00-08:00')) / np.timedelta64(1, 's'))\n df['datetime'] = df['datetime3'].astype(int)\n #df['datetime'] = datetime.fromtimestamp(df['datetime4'] ) #这一步将int型timestamp转换为datetime,放到最后的BarData赋值时\n\n\n #删除多余字段\n df.drop(['date','time','year','month','day','hour','minute',\"amount\",\"reserve\",'datetime2','datetime3'],1,inplace=True)\n\n #补全信息\n df['symbol'] = symbol\n df['exchange'] = exchange\n df['interval'] = interval\n #将整理好的df存入数据库\n return move_df_to_db(df,future_download)\n\n\ndef formate_DBbar(bar: BarData):\n \"\"\"\n 从BarData生成DbBarData\n \"\"\"\n print(\"formate_DBbar 函数\")\n try:\n #datetime timestamp\n dt = bar.datetime.astimezone('Asia/Shanghai')\n except:\n #pandas timestamp\n dt = bar.datetime.tz_localize('Asia/Shanghai')\n bar.datetime = dt\n return bar\n#--------------------------------------------------------------------------------------------\n\n# 封装函数\ndef move_df_to_db(imported_data:pd.DataFrame,future_download:bool):\n print(\"move_df_to_db 函数\")\n\n bars = []\n count = 0\n time_consuming_start = time()\n tmpsymbol = None\n start_time = None\n\n for row in imported_data.itertuples():\n bar = BarData(\n\n datetime=datetime.fromtimestamp(row.datetime), #为标准datetime格式,非datetime64[ns],timeStamp\n symbol=row.symbol,\n exchange=row.exchange,\n interval=row.interval,\n\n open_price=row.open_price,\n high_price=row.high_price,\n low_price=row.low_price,\n close_price=row.close_price,\n\n # open_interest=row.open_interest,\n volume=row.volume,\n gateway_name=\"DB\",\n\n )\n if not tmpsymbol :\n tmpsymbol = bar.symbol\n\n if future_download:\n # 夜盘时间21:00 - 2:30 日期减1天\n if bar.datetime.time() >= dtime(21,0) or bar.datetime.time() <= dtime(2,30):\n bar.datetime -= timedelta(days=1)\n # 其他时间分钟减1 ???\n bar.datetime-= timedelta(minutes=1)\n\n if not start_time:\n start_time = bar.datetime\n\n bars.append(bar)\n # do some statistics\n count += 1\n end_time = bar.datetime\n\n # insert into database\n for bar_data in chunked(bars, 10000): # 分批保存数据\n database_manager.save_bar_data(bar_data)\n\n time_consuming_end =time()\n print(f'载入通达信标的:{tmpsymbol} 分钟数据,开始时间:{start_time},结束时间:{end_time},数据量:{count},耗时:{round(time_consuming_end-time_consuming_start,3)}秒')\n\nif __name__ == '__main__':\n file_path = \"D:\\\\Software\\\\TDX\\\\new_tdxqh\\\\vipdoc\\\\ds\\\\minline\"\n event_engine = EventEngine()\n main_engine = MainEngine(event_engine)\n contracts = main_engine.load_contracts()\n if contracts:\n print(\"vnpy contracts: %d\" % len(contracts))\n\n if not contracts:\n print(\"load_contracts = 0,return\")\n exit(0)\n\n file_names =[] # 文件名列表\n vt_symbols = [] # vt_symbol列表\n future_download = True # 期货数据下载状态\n for dirpath, dirnames, filenames in os.walk(file_path):\n\n print(\"通达信本地文件数量: %d\" % len(filenames))\n for file_name in filenames: #当前目录所有文件名\n b_found = False\n vt_symbol = \"\"\n #过滤压缩文件\n if file_name.split(\".\")[1] in [\"rar\",\"7z\"]:\n continue\n if file_name.endswith(\"lc1\"):\n if future_download:\n symbol = file_name.split(\".\")[0].split(\"#\")[1]\n for contract in list(contracts.values()): #查看合约是否在constracts列表中\n #指数合约vt_symbol合成,给大家提供个例子, 这里合成了99合约后需要跟交易所_index对比才能找出交易所后缀\n if b_found == True:\n break\n if symbol.endswith(\"L9\"): #如TSL9\n tmp_symbol = symbol.split(\"L9\")[0] #把TSL9合成TS\n tmp_contract_symbol:str = contract.symbol.split(\".\")[0].rstrip(string.digits)\n if tmp_symbol in [tmp_contract_symbol.upper(), tmp_contract_symbol.lower()]:\n vt_symbol = tmp_contract_symbol+\"99.\"+contract.exchange.value #把TSL9合成TS99.CFFEX\n b_found = True\n break\n elif symbol.endswith(\"L8\"): # 如TSL8\n break\n else:\n #合约symbol与文件名相同(满足合约大写或小写)使用合约vt_symbol,这是contracts的,如'sc2109.INE'\n if contract.symbol in [symbol, symbol.lower()]:\n vt_symbol = contract.vt_symbol\n b_found = True\n break\n else: #股票数据的处理\n symbol = file_name.split(\".\")[0]\n if symbol.startswith(\"sh\"):\n exchange_str = \"SSE\"\n b_found = True\n break\n elif symbol.startswith(\"sz\"):\n exchange_str = \"SZSE\"\n vt_symbol = symbol[-6:] + \".\" + exchange_str #后6位.SSE/SZSE\n b_found = True\n #收集的vt_symbol放入vt_symbls:\n if(b_found == True) and (vt_symbol not in vt_symbols ) :\n # 将文件夹内文件整理到file_names中\n #if file_name not in file_names:\n file_names.append(f\"{file_path}\\\\{file_name}\")\n vt_symbols.append(vt_symbol)\n pass\n\n print(\"过滤处理后的合约文件数量: %d\" % len(vt_symbols))\n pool = multiprocessing.Pool(multiprocessing.cpu_count(), maxtasksperchild=1)\n print(\"multiprocessing.cpu_count(): %d\" % multiprocessing.cpu_count())\n for setting in list(zip(file_names,vt_symbols)):\n if setting[1].strip()=='':\n continue\n\n setting += (future_download,)\n\n #pool.apply_async(save_tdx_data, setting)\n pool.apply(save_tdx_data, setting)\n pool.close()\n pool.join()\n\n\n #保存股票列表\n if not future_download:\n stock_vt_symbols = load_json(\"stock_vt_symbols.json\")\n for vt_symbol in vt_symbols:\n if vt_symbol not in stock_vt_symbols:\n stock_vt_symbols.append(vt_symbol)\n save_json(\"stock_vt_symbols.json\",stock_vt_symbols)\n\n print(\"程序运行结束...自动退出\")" ]
[ [ "numpy.fromfile", "pandas.to_datetime", "numpy.dtype", "pandas.DataFrame", "numpy.timedelta64", "numpy.datetime64" ] ]
SSS135/ppo-pytorch
[ "04cd026116bfbd7353274f8dbb4951cddfc66e6b", "04cd026116bfbd7353274f8dbb4951cddfc66e6b" ]
[ "ppo_pytorch/common/repeat_env.py", "ppo_pytorch/common/rl_base.py" ]
[ "import random\n\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.envs.registration import register\n\nregister(\n id='Repeat-v0',\n entry_point='ppo_pytorch.common.repeat_env:RepeatEnv',\n)\n\nregister(\n id='RepeatNondeterministic-v0',\n entry_point='ppo_pytorch.common.repeat_env:RepeatNondeterministic',\n)\n\n\nclass RepeatEnv(gym.Env):\n def __init__(self, deterministic=True):\n high = np.array([1])\n self.observation_space = spaces.Box(-high, high)\n self.action_space = spaces.Discrete(2)\n self.positive = False\n self.iter = None\n self.deterministic = deterministic\n\n def _step(self, action):\n assert self.iter is not None\n state = np.array([(1 if self.positive else -1) if self.deterministic else 0])\n if self.iter == 0:\n self.iter += 1\n return state, 0, False, {}\n elif self.iter == 1:\n r = 1 if self.positive == (action == 1) else -1\n self.iter = None\n return state, r, True, {}\n\n def _reset(self):\n self.iter = 0\n self.positive = random.random() > 0.5\n return np.array([1 if self.positive else -1])\n\n\nclass RepeatNondeterministic(RepeatEnv):\n def __init__(self):\n super().__init__(False)\n", "import time\nfrom enum import Enum\n\nimport gym\nimport numpy as np\nfrom gym.spaces import Discrete\n\n\nclass RLStep(Enum):\n \"\"\"Internal state of `RLBase`\"\"\"\n EVAL = 0\n REWARD = 1\n FINISH = 2\n\n\nclass RLBase:\n def __init__(self, observation_space: gym.Space, action_space: gym.Space,\n num_actors=1, log_time_interval: float=None):\n \"\"\"\n Base class for all reinforcement learning algorithms. Supports running parallely on multiple envs.\n Args:\n observation_space: Env observation space.\n action_space: Env action space.\n log_time_interval: Logging interval in seconds. None disables logging.\n \"\"\"\n self.observation_space = observation_space\n self.action_space = action_space\n self.num_actors = num_actors\n self.step_type = RLStep.EVAL\n self.cur_states, self.prev_states, self.rewards, self.dones = [None] * 4\n self._logger = None\n self.log_time_interval = log_time_interval\n self._last_log_time = 0\n self._do_log = False\n self.step = 0\n\n @property\n def frame(self):\n \"\"\"Processed frames across all actors\"\"\"\n return self.step * self.num_actors\n\n @property\n def logger(self):\n \"\"\"\n Return logging class. Could be of any type, but only `TensorboardEnvLogger` is currently used.\n None if logging is disabled.\n \"\"\"\n return self._logger\n\n @logger.setter\n def logger(self, log):\n self._logger = log\n self._log_set()\n\n def _step(self, prev_states: np.ndarray, rewardss: np.ndarray,\n doness: np.ndarray, cur_states: np.ndarray) -> np.ndarray:\n \"\"\"\n Internal RL algorithm step.\n Args:\n prev_states: Previous observations.\n rewards: Rewards received after actig on `prev_states`\n dones: Episode end flags.\n cur_states: Current observations.\n\n Returns: Actions for `current_states`\n \"\"\"\n raise NotImplementedError\n\n def eval(self, input: np.ndarray or list) -> np.ndarray:\n \"\"\"\n Process new observations and return actions.\n Args:\n input: List of observations across all `envs`\n envs: List of parallely running envs.\n\n Returns: Taken actions.\n \"\"\"\n self.prev_states = self.cur_states\n self.cur_states = self._check_states(input)\n actions = self._step(self.prev_states, self.rewards, self.dones, self.cur_states)\n self.step += 1\n if actions is None:\n return None\n if isinstance(self.action_space, Discrete):\n actions = np.reshape(actions, (self.num_actors,))\n else:\n actions = np.reshape(actions, (self.num_actors, -1))\n return actions\n\n def reward(self, reward: np.ndarray or list) -> None:\n \"\"\"\n Reward for taken actions at `self.eval` call.\n Args:\n reward: Rewards\n \"\"\"\n self.rewards = self._check_rewards(reward)\n\n def finish_episodes(self, done: np.ndarray or list) -> None:\n \"\"\"\n Notify for ended episodes after taking actions from `self.eval`.\n Args:\n done: Episode end flags\n \"\"\"\n self.dones = self._check_dones(done)\n\n def _log_set(self):\n \"\"\"Called when logger is set or changed\"\"\"\n pass\n\n def _check_states(self, input) -> np.ndarray:\n \"\"\"\n Check if observations have correct shape and type and convert them to numpy array.\n Also check if it's allowed to call that function in current `self.step_type`\n Args:\n input: Observations\n\n Returns: Observations converted to numpy array\n \"\"\"\n assert self.step_type == RLStep.EVAL\n input = np.asarray(input, dtype=np.float32)\n assert input.shape[1:] == self.observation_space.shape, f'{input.shape[1:]} {self.observation_space.shape}'\n self.step_type = RLStep.REWARD\n return input\n\n def _check_rewards(self, rewards):\n \"\"\"\n Check if rewards have correct shape and type and convert them to numpy array.\n Also check if it's allowed to call that function in current `self.step_type`\n Args:\n rewards: Rewards\n\n Returns: Rewards converted to numpy array\n \"\"\"\n assert self.step_type == RLStep.REWARD\n rewards = np.asarray(rewards, dtype=np.float32).reshape(-1)\n assert rewards.shape == (self.num_actors,), f'wrong reward {rewards} shape {rewards.shape}'\n self.step_type = RLStep.FINISH\n return rewards\n\n def _check_dones(self, done):\n \"\"\"\n Check if done flags have correct shape and type and convert them to numpy array.\n Also check if it's allowed to call that function in current `self.step_type`\n Args:\n done: Episode end flags\n\n Returns: Episode end flags converted to numpy array\n \"\"\"\n assert self.step_type == RLStep.FINISH\n done = np.asarray(done, dtype=bool).reshape(-1)\n assert done.shape == (self.num_actors,)\n self.step_type = RLStep.EVAL\n return done\n\n def _check_log(self):\n \"\"\"Check if logging should be enabled for current step.\"\"\"\n if self.logger is not None and self.log_time_interval is not None and \\\n self._last_log_time + self.log_time_interval < time.time():\n self._last_log_time = time.time()\n self._do_log = True\n else:\n self._do_log = False" ]
[ [ "numpy.array" ], [ "numpy.asarray", "numpy.reshape" ] ]
CloudChaoszero/Theano-PyMC
[ "c32c1d34f9ea7e11e877bd454cb9b08305812720" ]
[ "tests/tensor/random/test_op.py" ]
[ "import numpy as np\nfrom pytest import fixture, raises\n\nimport aesara.tensor as aet\nfrom aesara import config\nfrom aesara.assert_op import Assert\nfrom aesara.gradient import NullTypeGradError, grad\nfrom aesara.tensor.math import eq\nfrom aesara.tensor.random.op import RandomVariable, default_shape_from_params\nfrom aesara.tensor.type import all_dtypes, iscalar, tensor\n\n\n@fixture(scope=\"module\", autouse=True)\ndef set_aesara_flags():\n with config.change_flags(cxx=\"\", compute_test_value=\"raise\"):\n yield\n\n\ndef test_default_shape_from_params():\n with raises(ValueError, match=\"^ndim_supp*\"):\n default_shape_from_params(0, (np.array([1, 2]), 0))\n\n res = default_shape_from_params(1, (np.array([1, 2]), np.eye(2)), rep_param_idx=0)\n assert res == (2,)\n\n res = default_shape_from_params(1, (np.array([1, 2]), 0), param_shapes=((2,), ()))\n assert res == (2,)\n\n with raises(ValueError, match=\"^Reference parameter*\"):\n default_shape_from_params(1, (np.array(1),), rep_param_idx=0)\n\n res = default_shape_from_params(\n 2, (np.array([1, 2]), np.ones((2, 3, 4))), rep_param_idx=1\n )\n assert res == (3, 4)\n\n\ndef test_RandomVariable_basics():\n\n str_res = str(\n RandomVariable(\n \"normal\",\n 0,\n [0, 0],\n \"float32\",\n inplace=True,\n )\n )\n\n assert str_res == \"normal_rv{0, (0, 0), float32, True}\"\n\n # `ndims_params` should be a `Sequence` type\n with raises(TypeError, match=\"^Parameter ndims_params*\"):\n RandomVariable(\n \"normal\",\n 0,\n 0,\n config.floatX,\n inplace=True,\n )\n\n # `size` should be a `Sequence` type\n with raises(TypeError, match=\"^Parameter size*\"):\n RandomVariable(\n \"normal\",\n 0,\n [0, 0],\n config.floatX,\n inplace=True,\n )(0, 1, size={1, 2})\n\n # No dtype\n with raises(TypeError, match=\"^dtype*\"):\n RandomVariable(\n \"normal\",\n 0,\n [0, 0],\n inplace=True,\n )(0, 1)\n\n # Confirm that `inplace` works\n rv = RandomVariable(\n \"normal\",\n 0,\n [0, 0],\n \"normal\",\n inplace=True,\n )\n\n assert rv.inplace\n assert rv.destroy_map == {0: [3]}\n\n # A no-params `RandomVariable`\n rv = RandomVariable(name=\"test_rv\", ndim_supp=0, ndims_params=())\n\n with raises(TypeError):\n rv.make_node(rng=1)\n\n # `RandomVariable._infer_shape` should handle no parameters\n rv_shape = rv._infer_shape(aet.constant([]), (), [])\n assert rv_shape.equals(aet.constant([], dtype=\"int64\"))\n\n # Integer-specificed `dtype`\n dtype_1 = all_dtypes[1]\n rv_node = rv.make_node(None, None, 1)\n rv_out = rv_node.outputs[1]\n rv_out.tag.test_value = 1\n\n assert rv_out.dtype == dtype_1\n\n with raises(NullTypeGradError):\n grad(rv_out, [rv_node.inputs[0]])\n\n\ndef test_RandomVariable_bcast():\n rv = RandomVariable(\"normal\", 0, [0, 0], config.floatX, inplace=True)\n\n mu = tensor(config.floatX, [True, False, False])\n mu.tag.test_value = np.zeros((1, 2, 3)).astype(config.floatX)\n sd = tensor(config.floatX, [False, False])\n sd.tag.test_value = np.ones((2, 3)).astype(config.floatX)\n\n s1 = iscalar()\n s1.tag.test_value = 1\n s2 = iscalar()\n s2.tag.test_value = 2\n s3 = iscalar()\n s3.tag.test_value = 3\n s3 = Assert(\"testing\")(s3, eq(s1, 1))\n\n res = rv.compute_bcast([mu, sd], (s1, s2, s3))\n assert res == [False] * 3\n\n size = aet.as_tensor((1, 2, 3), dtype=np.int32).astype(np.int64)\n res = rv.compute_bcast([mu, sd], size)\n assert res == [True, False, False]\n\n\ndef test_RandomVariable_floatX():\n test_rv_op = RandomVariable(\n \"normal\",\n 0,\n [0, 0],\n \"floatX\",\n inplace=True,\n )\n\n assert test_rv_op.dtype == \"floatX\"\n\n assert test_rv_op(0, 1).dtype == config.floatX\n\n new_floatX = \"float64\" if config.floatX == \"float32\" else \"float32\"\n\n with config.change_flags(floatX=new_floatX):\n assert test_rv_op(0, 1).dtype == new_floatX\n" ]
[ [ "numpy.eye", "numpy.array", "numpy.zeros", "numpy.ones" ] ]
braidedlogix/pymodelfit
[ "de8a02a27d13646b1f4b011d056edbed76540473" ]
[ "pymodelfit/utils.py" ]
[ "#Copyright 2008 Erik Tollerud\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInternal utility functions used in pymodelfit - most of these are lifted from\n:mod:`astropysics` for a few specific purposes\n\"\"\"\nimport numpy as np\nfrom contextlib import contextmanager\n\n#all of these are CGS\nme = 9.1093897e-28 #electron mass\nkb = 1.3807e-16 #boltzmann's constant\nc = 2.99792458e10 #speed of light - exact\n\n_mpl_show_default = False\n@contextmanager\ndef mpl_context(show=None,clf=False,savefn=None):\n \"\"\"\n Used for with statements containing matplotlib plots. Usage::\n \n with _mpl_context() as plt:\n plt.plot(x,y,...)\n plt.scatter(xs,ys,...)\n \n :param bool show: \n If True,:func:`pyplot.show` will be called when plotting is completed.\n This blocks execution until the user closes the plotting window.\n :param bool clf: If True, the figure will be cleared before plotting.\n :param savefn: \n A string to save the figure to via the :func:`matplotlib.pyplot.savefig`\n function, or None to not save the figure.\n \"\"\"\n import matplotlib.pyplot as plt\n \n if show is None:\n show = _mpl_show_default\n \n isinter = plt.isinteractive()\n try:\n if isinter:\n #TODO: figure out why this is necessary (probably an mpl/ipython problem)\n plt.gcf()\n plt.interactive(False)\n if clf:\n plt.clf()\n \n yield plt\n \n finally:\n plt.interactive(isinter)\n \n if savefn:\n plt.savefig(savefn)\n if show:\n plt.draw()\n plt.show()\n else:\n plt.draw_if_interactive()\n \n \ndef cartesian_to_polar(x,y,degrees=False):\n \"\"\"\n Converts arrays in 2D rectangular Cartesian coordinates to polar\n coordinates.\n \n :param x: First cartesian coordinate\n :type x: :class:`numpy.ndarray`\n :param y: Second cartesian coordinate\n :type y: :class:`numpy.ndarray`\n :param degrees: \n If True, the output theta angle will be in degrees, otherwise radians.\n :type degrees: boolean\n \n :returns: \n (r,theta) where theta is measured from the +x axis increasing towards\n the +y axis\n \"\"\"\n r = (x*x+y*y)**0.5\n t = np.arctan2(y,x)\n if degrees:\n t = np.degrees(t)\n \n return r,t\n\ndef polar_to_cartesian(r,t,degrees=False):\n \"\"\"\n Converts arrays in 2D polar coordinates to rectangular cartesian\n coordinates.\n \n Note that the spherical coordinates are in *physicist* convention such that\n (1,0,pi/2) is x-axis.\n \n :param r: Radial coordinate\n :type r: :class:`numpy.ndarray`\n :param t: Azimuthal angle from +x-axis increasing towards +y-axis\n :type t: :class:`numpy.ndarray`\n :param degrees: \n If True, the input angles will be in degrees, otherwise radians.\n :type degrees: boolean\n \n :returns: arrays (x,y)\n \"\"\"\n if degrees:\n t=np.radians(t)\n \n return r*np.cos(t),r*np.sin(t)\n\ndef binned_weights(values,n,log=False):\n \"\"\"\n Produces an array of weights that are generated by subdividing the values\n into n bins such that each bin has an equal share of the total number of\n values.\n \n :param values: the input values\n :type values: array-like\n :param n: number of bins\n :type n: int\n :param log: \n If True, the values are evenly-spaced on logarithmic intervals,\n otherwise, linear.\n :type log: bool\n \n :returns: An array of weights on [0,1] with shape matching `values`\n \n \"\"\"\n \n if log:\n values = np.log(values).ravel()\n else:\n values = np.array(values,copy=False).ravel()\n \n mx,mi = np.max(values),np.min(values)\n \n n,edges = np.histogram(values)\n ws = np.zeros_like(values)\n \n wsr = ws.ravel()\n for i,w in enumerate(1.0/n):\n m = (edges[i]<=values) & (values<edges[i+1])\n wsr[m] = w \n wsr[edges[-1]==values] = w\n \n return ws\n\n" ]
[ [ "numpy.radians", "numpy.arctan2", "numpy.max", "numpy.zeros_like", "numpy.histogram", "matplotlib.pyplot.isinteractive", "matplotlib.pyplot.gcf", "numpy.sin", "numpy.log", "numpy.min", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.interactive", "numpy.degrees", "numpy.cos", "matplotlib.pyplot.draw", "matplotlib.pyplot.clf", "matplotlib.pyplot.draw_if_interactive" ] ]
maxkferg/mink-reality
[ "6eed945af797f164dddd0d69a7f47183b621db22" ]
[ "src/simulator/sim/simulation/environment/simulation_env.py" ]
[ "import os\nimport gym\nimport math\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport time\nimport pybullet\nimport transforms3d\nfrom . import SimRobot\nfrom . import bullet_client\nfrom .robot_models import Husky\nfrom .robot_models import Turtlebot\nfrom .config import URDF_ROOT\nfrom .simulation_objects import create_object_by_name\nfrom OpenGL.GL import glGetIntegerv, glGetDoublev\nfrom OpenGL.GLU import gluUnProject\nimport random\n\nRENDER_WIDTH = 960\nRENDER_HEIGHT = 720\n\nZED_MIN_RANGE = 0.2\nZED_MAX_RANGE = 0.8\nZED_NOISE = 0.005\n\ndef rotate(origin, point, angle):\n \"\"\"\n Rotate a point counterclockwise by a given angle around a given origin.\n The angle should be given in radians.\n \"\"\"\n ox, oy = origin\n px, py = point\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy\n\n\ndef make_camera(height, width, view_matrix, proj_matrix):\n # make camera matrix\n camera = {\n \"class_name\" : \"PinholeCameraParameters\",\n \"extrinsic\" : view_matrix,\n \"intrinsic\" : {\n \"height\": height,\n \"width\": width,\n \"intrinsic_matrix\": [\n 935.30743608719376,\n 0,\n 0,\n 0,\n 935.30743608719376,\n 0,\n 959.5,\n 539.5,\n 1\n ]\n },\n \"version_major\" : 1,\n \"version_minor\" : 0\n }\n return camera\n\n\nclass SimRobotEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n\n def __init__(self,\n urdfRoot=URDF_ROOT,\n world=\"y2e2/pybullet/world.sdf\",\n floor=\"y2e2/pybullet/floor.urdf\",\n actionRepeat=50,\n buildingScale=0.15,\n isEnableSelfCollision=True,\n isDiscrete=False,\n renders=False,\n reflection=False):\n\n self.world = world\n self.floor = floor\n self.timeStep = .01\n self.urdfRoot = urdfRoot\n self.actionRepeat = actionRepeat\n self.isEnableSelfCollision = isEnableSelfCollision\n self.observation = []\n self.ballUniqueId = -1\n self.robot = None # The controlled robot\n self.buildingIds = [] # Each plane is given an id\n self.buildingScale = buildingScale\n self.width = 320\n self.height = 240\n self.reflection = reflection\n\n self.envStepCounter = 0\n self.renders = renders\n self.isDiscrete = isDiscrete\n if self.renders:\n self.physics = bullet_client.BulletClient(connection_mode=pybullet.GUI)\n else:\n self.physics = bullet_client.BulletClient()\n\n self.seed()\n observationDim = 2 # len(self.getExtendedObservation())\n observation_high = np.ones(observationDim) * 1000 # np.inf\n if isDiscrete:\n self.action_space = spaces.Discrete(9)\n else:\n action_dim = 2\n self._action_bound = 1\n action_high = np.array([self._action_bound] * action_dim)\n self.action_space = spaces.Box(-action_high, action_high)\n self.observation_space = spaces.Box(-observation_high, observation_high)\n self.viewer = None\n\n\n def reset(self):\n print(\"Resetting environment\")\n floor_path = os.path.join(self.urdfRoot, self.floor)\n world_path = os.path.join(self.urdfRoot, self.world)\n base_orientation = pybullet.getQuaternionFromEuler([0, 0, 1.54])\n self.physics.resetSimulation()\n self.cam_yaw = 0\n\n print(\"Loading floor geometry from \",floor_path)\n print(\"Loading world geometry from \",world_path)\n self.floorIds = self.physics.loadURDF(floor_path, baseOrientation=base_orientation, globalScaling=self.buildingScale)\n self.buildingIds = self.physics.loadSDF(world_path, globalScaling=self.buildingScale)\n\n # Disable rendering while we load the robot. Enable reflection\n self.physics.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING,0)\n if self.reflection:\n self.physics.configureDebugVisualizer(pybullet.COV_ENABLE_PLANAR_REFLECTION,0)\n self.physics.configureDebugVisualizer(pybullet.COV_ENABLE_GUI,0)\n self.physics.configureDebugVisualizer(pybullet.COV_ENABLE_TINY_RENDERER,0)\n self.physics.setGravity(0, 0, -10)\n\n config = {}\n config['initial_pos'] = (0,0,0)\n config[\"target_pos\"] = (1, 1, 1)\n config[\"resolution\"] = None\n config[\"is_discrete\"] = False\n\n #self.robot = SimRobot.SimRobot(self.physics, urdfRootPath=self.urdfRoot, timeStep=self.timeStep)\n self.robot = Turtlebot(config, self.physics)\n self.envStepCounter = 0\n for i in range(100):\n self.physics.stepSimulation()\n self.observation = self.getExtendedObservation()\n # Enable rendering\n pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 1)\n return np.array(self.observation)\n\n\n def transform_camera_to_absolute(self,pts):\n \"\"\"\n Return the position of points in the global coordinate system\n @points is a column matrix of points in the camera coordinate system\n \"\"\"\n #pos, orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n R1 = np.eye(3)#np.array([[0,0,1],[0,1,0],[1,0,0]])\n RY = np.eye(3)#np.array([[0,0,1],[0,1,0],[-1,0,0]])\n R2 = np.eye(3)#np.linalg.inv(transforms3d.quaternions.quat2mat(orn))\n #print(pos)\n #return np.dot(pts, R1)\n \n\n correct = np.dot(np.dot(pts, R1), R2.T)\n return np.dot(correct,RY)# - pos\n\n\n def get_absolute_position(self,x,y,z,t):\n \"\"\"\n Get the absolute position of an object if it appears to\n be at position x,y,z, and rotation t from the car.\n \"\"\"\n x = -x # Relative position is defined left=negative. Absolute is the opposite\n car_pos, car_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n car_yaw = pybullet.getEulerFromQuaternion(car_orn)[2]\n car_point = (car_pos[0], car_pos[1])\n cone_point = (car_pos[0]+z, car_pos[1]+x)\n rotated_cone_point = rotate(car_point, cone_point, car_yaw)\n X = rotated_cone_point[1]\n Y = y\n Z = rotated_cone_point[0]\n return X,Y,Z,t\n\n\n def get_relative_position(self,X,Y,Z,t):\n \"\"\"\n Inverse function of get_absolute_position\n \"\"\"\n # Get car position\n car_pos, car_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n car_yaw = pybullet.getEulerFromQuaternion(car_orn)[2]\n car_point = (car_pos[0], car_pos[1])\n # Get point relative to car\n rotated_object_point = (Z,X)\n object_point = rotate(car_point, rotated_object_point, -car_yaw)\n # Shift point relative to car\n object_point_shifted = (object_point[0]-car_pos[0], object_point[1]-car_pos[1])\n x = object_point_shifted[1]\n z = object_point_shifted[0]\n x = -x # Relative position is defined left=negative. Absolute is the opposite\n return x,Y,z,t\n\n\n def spawn_object_by_name(self, class_name, X, Y, Z):\n \"\"\"\n Spawn an object at [x,y,z] relative to the car\n Dimensions are defined as follows:\n X: Horizontal offset\n Y: Vertical direction\n Z: Horizontal offset (distance from camera)\n \"\"\"\n _, new_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n new_pos = np.array([Z,X,Y])\n return create_object_by_name(self.physics, class_name, new_pos, new_orn)\n\n\n def move_car(self, x, y, yaw):\n \"\"\"Move the car to a new position and orientation\"\"\"\n # Change the x,y position\n old_pos, old_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n new_pos = (x, y, old_pos[2])\n # Change the yaw\n old_rot = pybullet.getEulerFromQuaternion(old_orn)\n new_rot = (old_rot[0], old_rot[1], yaw)\n new_orn = pybullet.getQuaternionFromEuler(new_rot)\n self.physics.resetBasePositionAndOrientation(self.robot.racecarUniqueId, new_pos, new_orn)\n self.cam_yaw = yaw\n\n\n def __del__(self):\n self.physics = 0\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n\n #def getExtendedObservation(self):\n # TODO: Add 12 angle ray-collision test (verify details)\n #self.observation = [] # self._racecar.getObservation()\n #carpos, carorn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n #ballpos, ballorn = self.physics.getBasePositionAndOrientation(self.ballUniqueId)\n #invCarPos, invCarOrn = self.physics.invertTransform(carpos, carorn)\n #ballPosInCar, ballOrnInCar = self.physics.multiplyTransforms(invCarPos, invCarOrn, ballpos, ballorn)\n\n #self.observation.extend([ballPosInCar[0], ballPosInCar[1]])\n #return self.observation\n\n def getExtendedObservation(self):\n return\n carpos, carorn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n carmat = self.physics.getMatrixFromQuaternion(carorn)\n ballpos, ballorn = self.physics.getBasePositionAndOrientation(self.ballUniqueId)\n invCarPos, invCarOrn = self.physics.invertTransform(carpos, carorn)\n ballPosInCar, ballOrnInCar = self.physics.multiplyTransforms(invCarPos, invCarOrn, ballpos, ballorn)\n dist0 = 0.3\n dist1 = 1.\n eyePos = [carpos[0] + dist0 * carmat[0], carpos[1] + dist0 * carmat[3], carpos[2] + dist0 * carmat[6] + 0.3]\n targetPos = [carpos[0] + dist1 * carmat[0], carpos[1] + dist1 * carmat[3],\n carpos[2] + dist1 * carmat[6] + 0.3]\n up = [carmat[2], carmat[5], carmat[8]]\n viewMat = self.physics.computeViewMatrix(eyePos, targetPos, up)\n # viewMat = self._p.computeViewMatrixFromYawPitchRoll(carpos,1,0,0,0,2)\n # print(\"projectionMatrix:\")\n # print(self._p.getDebugVisualizerCamera()[3])\n projMatrix = [0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0,\n 0.0, 0.0, -0.02000020071864128, 0.0]\n img_arr = self.physics.getCameraImage(width=self.width, height=self.height, viewMatrix=viewMat,\n projectionMatrix=projMatrix)\n rgb = img_arr[2]\n np_img_arr = np.reshape(rgb, (self.height, self.width, 4))\n self.observation = np_img_arr\n return self.observation\n\n def step(self, action):\n if self.renders:\n basePos, orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n # self.physics.resetDebugVisualizerCamera(1, 30, -40, basePos)\n\n if self.isDiscrete:\n fwd = [-1, -1, -1, 0, 0, 0, 1, 1, 1]\n steerings = [-0.6, 0, 0.6, -0.6, 0, 0.6, -0.6, 0, 0.6]\n forward = fwd[action]\n steer = steerings[action]\n realaction = [forward, steer]\n else:\n realaction = action\n\n self.robot.applyAction(realaction)\n for i in range(self.actionRepeat):\n self.physics.stepSimulation()\n if self.renders:\n time.sleep(self.timeStep)\n self.observation = self.getExtendedObservation()\n\n if self.termination():\n break\n self.envStepCounter += 1\n reward = self.reward()\n done = self.termination()\n # print(\"len=%r\" % len(self._observation))\n\n return np.array(self.observation), reward, done, {}\n\n\n def render(self, width, height):\n # Move the camera with the base_pos\n base_pos, orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n cam_yaw = 180 * self.cam_yaw / math.pi + 90\n cam_yaw = cam_yaw % 360\n cam_dist = 1\n cam_roll = 0\n cam_pitch = 0\n\n view_matrix = self.physics.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=base_pos,\n distance=cam_dist,\n yaw=cam_yaw,\n pitch=cam_pitch,\n roll=cam_roll,\n upAxisIndex=2)\n proj_matrix = self.physics.computeProjectionMatrixFOV(\n fov=60, aspect=float(width) / height,\n nearVal=0.1, farVal=100.0)\n (_, _, px, _, _) = self.physics.getCameraImage(\n width=width, height=height, viewMatrix=view_matrix,\n projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)\n rgb_array = np.array(px)\n rgb_array = rgb_array[:, :, :3]\n return rgb_array\n\n\n def render_pointcloud(self, width, height, cam_yaw=0):\n \"\"\"Return an image as 3D points\"\"\"\n # Move the camera with the base_pos\n base_pos, orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)\n\n cam_yaw = 180 * cam_yaw / math.pi + 90\n cam_yaw = cam_yaw % 360\n cam_dist = 1\n cam_roll = 0\n cam_pitch = 0\n\n base_pos = (base_pos[0], base_pos[1], base_pos[2]+1)\n view_matrix = self.physics.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=base_pos,\n distance=cam_dist,\n yaw=cam_yaw,\n pitch=cam_pitch,\n roll=cam_roll,\n upAxisIndex=2)\n\n view_matrix_reverse = self.physics.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=base_pos,\n distance=cam_dist,\n yaw=(cam_yaw+180)%360,\n pitch=cam_pitch,\n roll=cam_roll,\n upAxisIndex=2)\n\n proj_matrix = self.physics.computeProjectionMatrixFOV(\n fov=60, aspect=float(width) / height,\n nearVal=1, farVal=40.0)\n\n (_, _, rgb, depth, segmentation) = self.physics.getCameraImage(\n width=width, height=height, viewMatrix=view_matrix,\n projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)\n\n # Make image\n rgb_image = np.array(rgb)\n rgb_image = rgb_image[:, :, :3]\n\n # Make points\n viewport = (0, 0, width, height)\n matrixModelView = view_matrix\n matrixProjection = proj_matrix\n\n # Make points slightly noise\n depth += np.random.normal(scale=ZED_NOISE, size=depth.shape)\n\n # Make camera params\n camera = make_camera(height, width, view_matrix_reverse, proj_matrix)\n\n # Output vectors\n pts = np.zeros((width*height, 3))\n pts_rgb = np.zeros((width*height, 3))\n pts_seg = np.zeros((width*height, ))\n\n for y in range(height):\n if y%10==0:\n pass\n #print('row:',y)\n for x in range(width):\n z = depth[y,x]\n if z==1.0: continue\n if z<ZED_MIN_RANGE or z>ZED_MAX_RANGE: continue\n i = x + y*width # Simple counter\n pts[i,:] = gluUnProject(x, y, z, matrixModelView, matrixProjection, viewport)\n pts_rgb[i,:] = rgb_image[y,x,:] / 255\n pts_seg[i] = segmentation[y,x]\n return pts, pts_rgb, pts_seg, camera\n\n\n def termination(self):\n return self.envStepCounter > 1000\n\n\n def reward(self):\n # Adapt the reward to:\n # 1 if target reached, else 0\n # -1 if wall collision\n closestPoints = self.physics.getClosestPoints(self.robot.racecarUniqueId, self.ballUniqueId, 10000)\n\n numPt = len(closestPoints)\n reward = -1000\n # print(numPt)\n if (numPt > 0):\n # print(\"reward:\")\n reward = -closestPoints[0][8]\n # print(reward)\n return reward\n" ]
[ [ "numpy.dot", "numpy.reshape", "numpy.eye", "numpy.ones", "numpy.random.normal", "numpy.array", "numpy.zeros" ] ]
ChamiLamelas/Math36B_FinalProject
[ "0bdb5d17769553a4edb163534c21cc641860a07a" ]
[ "code/statistical_tests.py" ]
[ "import scipy.stats\nimport numpy as np\n\n\ndef f_test(sample_x, sample_y, larger_varx_alt):\n \"\"\"\n Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.\n\n Parameters\n ----------\n sample_x : list\n A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2. \n sample_y : list\n A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.\n larger_varx_alt : bool\n True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2. \n\n Returns\n -------\n f_value : float\n Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.\n p_value : float\n Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left. \n \"\"\"\n\n # calculate unbiased sample variances (n-1 in the denominator)\n sample_var_x = np.var(sample_x, ddof=1)\n sample_var_y = np.var(sample_y, ddof=1)\n f_value = sample_var_x/sample_var_y\n nx = len(sample_x)\n ny = len(sample_y)\n\n # compute P(F < f_value) with nx-1, ny-1 df\n cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)\n\n # More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.\n # More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.\n p_value = 1 - cdf if larger_varx_alt else cdf\n return f_value, p_value\n\n\ndef f1_test(sample_x, sample_y, larger_varx_alt):\n \"\"\"\n Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis. \n\n Parameters\n ----------\n sample_x : list\n A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2. \n sample_y : list\n A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.\n larger_varx_alt : bool\n True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2. \n\n Returns\n -------\n p_value : float\n Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. \n \"\"\"\n\n # calculate unbiased sample variances (n-1 in the denominator)\n sample_var_x = np.var(sample_x, ddof=1)\n sample_var_y = np.var(sample_y, ddof=1)\n f_value = sample_var_x/sample_var_y\n nx = len(sample_x)\n ny = len(sample_y)\n xmean = np.mean(sample_x)\n ymean = np.mean(sample_y)\n\n # compute moment, variance below equatio (1) of Shoemaker paper\n fourth_moment = (np.sum((sample_x - xmean)**4) +\n np.sum((sample_y - ymean)**4))/(nx + ny)\n pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)\n\n # see equation (1) of Shoemaker paper\n rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))\n ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))\n\n # compute P(F < f_value) with rx-1, ry-1 df\n cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)\n\n # More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.\n # More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.\n p_value = 1 - cdf if larger_varx_alt else cdf\n return p_value\n\n\ndef count_five(sample_x, sample_y, center):\n \"\"\"\n Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.\n\n Parameters\n ----------\n sample_x : list\n A random sample x1,...,xn. \n sample_y : list\n A random sample y1,...,ym.\n center : str\n Whether to use 'mean' or 'median' for centering. \n Returns\n -------\n extreme_count_x : int\n C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.\n extreme_count_y : int\n C_y defined analogously to C_x above.\n Raises\n ------\n ValueError\n If center is neither 'mean' or 'median'.\n \"\"\"\n\n if center not in {'mean', 'median'}:\n raise ValueError('Invalid center %s' % (center))\n if center == 'mean':\n centering_x = np.mean(sample_x)\n centering_y = np.mean(sample_y)\n else:\n centering_x = np.median(sample_x)\n centering_y = np.median(sample_y)\n\n # compute absolute deviations from centering for x, y samples\n abs_dev_x = np.abs(np.array(sample_x) - centering_x)\n abs_dev_y = np.abs(np.array(sample_y) - centering_y)\n\n # count number of X deviations greater than max Y deviation and vice versa\n # see equation (1) of Count Five paper\n extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))\n extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))\n return extreme_count_x, extreme_count_y\n" ]
[ [ "numpy.median", "numpy.max", "numpy.mean", "numpy.var", "numpy.array", "numpy.sum" ] ]
dorogam/autarquicas
[ "9295ef112f247cb5d3d33bce89ec6c993f51b0b9" ]
[ "app.py" ]
[ "# -*- coding: utf-8 -*-\n\n# import libraries \n\nimport pandas as pd \nimport json \n\n# Open json files \n\n\t# 2009 \n\nwith open('json_files/autarquicas_2009.json') as f2009:\n data_2009 = json.load(f2009)\n\n\t# 2013\n\nwith open('json_files/autarquicas_2013.json') as f2013:\n data_2013 = json.load(f2013)\n\n\t# 2017\n\nwith open('json_files/autarquicas_2017.json') as f2017:\n data_2017 = json.load(f2017)\n\n # 2021\n\nwith open('json_files/autarquicas_2021.json') as f2021:\n data_2021 = json.load(f2021)\n\n\n\n# Create lists \n\n\t#2009\nres_2009 = [{**z, **{'district': x, 'county': y}} for x, y in data_2009.items() for y, z in y.items()]\n\n\t# Normalize json on candidates for 2009\ndf_09 = pd.json_normalize(res_2009, record_path=['candidates'], meta=['total_votes', 'county', 'district'])\n\n\t# Expand total_votes dict for 2009 \ndf_2009 = pd.concat([df_09, pd.json_normalize(df_09['total_votes'])], axis=1)\n\n\n\t#2013\nres_2013 = [{**z, **{'district': x, 'county': y}} for x, y in data_2013.items() for y, z in y.items()]\n\n\t# Normalize json on candidates for 2013\ndf_13 = pd.json_normalize(res_2013, record_path=['candidates'], meta=['total_votes', 'county', 'district'])\n\n\t# Expand total_votes dict for 2013\ndf_2013 = pd.concat([df_13, pd.json_normalize(df_13['total_votes'])], axis=1)\n\n\t#2017\nres_2017 = [{**z, **{'district': x, 'county': y}} for x, y in data_2017.items() for y, z in y.items()]\n\n\t# Normalize json on candidates for 2017\ndf_17 = pd.json_normalize(res_2017, record_path=['candidates'], meta=['total_votes', 'county', 'district'])\n\n\t# Expand total_votes dict for 2017\ndf_2017 = pd.concat([df_17, pd.json_normalize(df_17['total_votes'])], axis=1)\n\n\t#2021\nres_2021 = [{**z, **{'district': x, 'county': y}} for x, y in data_2021.items() for y, z in y.items()]\n\n\t# Normalize json on candidates for 2021\ndf_21 = pd.json_normalize(res_2021, record_path=['candidates'], meta=['total_votes', 'county', 'district'])\n\n\t# Expand total_votes dict for 2021\ndf_2021 = pd.concat([df_21, pd.json_normalize(df_21['total_votes'])], axis=1)\n\n\n# Save resulting csv files (raw no data treatment)\n\ndf_2009.to_csv('raw_csv/autarquicas_2009_raw.csv')\ndf_2013.to_csv('raw_csv/autarquicas_2013_raw.csv')\ndf_2017.to_csv('raw_csv/autarquicas_2017_raw.csv')\ndf_2021.to_csv('raw_csv/autarquicas_2021_raw.csv')\n\n# data treatment \n\nprint(df_2021.info())\n\n\n\n\t# Define datasets list \ndatasets = [df_2009,df_2013,df_2017,df_2021]\n\n\n\t# Create loop to drop total_values dict columnm, extract candidates name from candidates list, and drop other non relevant columns\nfor dataset in datasets:\n\tdataset.drop('total_votes', axis=1, inplace=True)\n\tdataset['candidatos'] = dataset['effectiveCandidates'].apply(pd.Series)\n\tdataset.drop('effectiveCandidates', axis=1, inplace=True)\n\tdataset.drop('votes.acronym', axis=1, inplace=True)\n\n\n# Create Year Column for each dataframe\n\ndf_2009['year']='2009'\ndf_2013['year']='2013'\ndf_2017['year']='2017'\ndf_2021['year']='2021'\n\n# Save treated dataframes to CSV \n\n\ndf_2009.to_csv('final_csv/autarquicas_2009_treated.csv')\ndf_2013.to_csv('final_csv/autarquicas_2013_treated.csv')\ndf_2017.to_csv('final_csv/autarquicas_2017_treated.csv')\ndf_2021.to_csv('final_csv/autarquicas_2021_treated.csv')\n\n\n# Last revison on 01OUT2021\n\n" ]
[ [ "pandas.json_normalize" ] ]
OldMetalmind/daily_weather_report
[ "fe652ba5bf836509e04f1c53396739d70ec3f998" ]
[ "app.py" ]
[ "# -*- coding: utf-8 -*-\n# Original Code: Jorge Gomes \n# Optimization: João Pina \n\n# ------------------------------\n# DESCRIPTION\n# ------------------------------\n\n# This app scrapes information from IPMA and generates an image\n# The image is to be shared automatically on social media \n\n# ------------------------------\n# IMPORT LIBRARIES\n# ------------------------------\n\nimport requests\nimport pandas as pd\nimport regex as re\nimport json\nfrom datetime import datetime, timedelta\nfrom PIL import Image, ImageFont, ImageDraw \n\n\n# ---------------------------------------\n# GET DATA AND GENERATE DATAFRAMES\n# ----------------------------------------\n\n# Define URL \n\nurl = 'https://www.ipma.pt/pt/otempo/obs.superficie/table-top-stations-all.jsp'\n\n# Get URL content \npage = requests.get(url)\n\n\n# Based on this soluton \n# https://gist.github.com/falsovsky/aa5423db4c71ff3dbfeeff48b9102ed5 \n\n# Use Regex to extract json\n\nsearch = re.search('var observations = (.*?);',page.text,re.DOTALL);\njson_data = json.loads(search.group(1))\n\n# Create Dataframe from json data\n\nipma_data = pd.concat({k: pd.DataFrame(v).T for k, v in json_data.items()}, axis=0).reset_index()\n\nprint (ipma_data.info())\n# Rename resulting level_x columns\n\nipma_data = ipma_data.rename(columns={'level_0': 'date','level_1':'stationId'})\n\nprint (ipma_data.info())\n\n# Sort dataframe by date \n\nipma_data = ipma_data.sort_values(by=['date'])\n\n# Check yesterday's date and create string\n\nyesterday = datetime.now() - timedelta(1)\nyesterday_date = datetime.strftime(yesterday, '%Y-%m-%d')\nreport_date = str(yesterday_date)\n\n# Create new datafraeme with only yesterday's results\n\nipma_data_yesterday = ipma_data[ipma_data['date'] == yesterday_date]\n\n\nprint (ipma_data_yesterday.info())\n\n\n# Define function to fetch stationId's name \ndef getStationNameById(id):\n url_bar = f\"https://api.fogos.pt/v2/weather/stations\" \\\n f\"?id={id}\" \n # Get response from URL \n response_id = requests.get(url_bar)\n # Get the json content from the response\n json_id = response_id.json()\n # Create Datafframe from json file\n df_id = pd.json_normalize(json_id)\n return df_id\n\n\n# Create empty list for territory\nterritory = []\n\n# Get max records on dataframe\nmax_records = len(ipma_data_yesterday)\n\n# Get territory for each station on the Dataframe \n\nfor x in range(max_records):\n info = getStationNameById(ipma_data_yesterday.iloc[x].stationId)\n region = info.place.values[0]\n print(region)\n territory.append(region)\n\n# Create new column called \"territory\" using the list generated above \n\nipma_data_yesterday.loc[:,'territory'] = pd.Series(territory).values\n\n# Create dataframe for Madeira's values from yesterday \ndf_madeira_yesterday = ipma_data_yesterday[ipma_data_yesterday.territory == \"Madeira\"]\n\n# Create dataframe for Azores's values from yesterday \ndf_azores_yesterday = ipma_data_yesterday[ipma_data_yesterday.territory == \"Açores\"]\n\n# Create dataframe for Portugal's values from yesterday \ndf_portugal_yesterday = ipma_data_yesterday[ipma_data_yesterday.territory == \"Portugal\"]\n\n\n# -----------------------------------\n# DEFINE MAX TEMP, \n# MIN TEMP, MAX GUST, AND RAIN ACCU\n# FOR ALL TERRITORIES\n# -----------------------------------\n\n# Max Temperatures \n# Create Dataframe based on yesterday's data, sorting maximum temperature and extracting top 4 values\n\nfour_temp_max_mad = df_madeira_yesterday.sort_values(by=['temp_max'],ascending=False).head(4)\nfour_temp_max_az = df_azores_yesterday.sort_values(by=['temp_max'],ascending=False).head(4)\nfour_temp_max_pt = df_portugal_yesterday.sort_values(by=['temp_max'],ascending=False).head(4)\n\n# Min Temperatures\n# Create Dataframes based on yesterday's data, sorting by minimum temperature\n\nfour_temp_min_mad = df_madeira_yesterday.sort_values(by=['temp_min'],ascending=True)\nfour_temp_min_az = df_azores_yesterday.sort_values(by=['temp_min'],ascending=True)\nfour_temp_min_pt = df_portugal_yesterday.sort_values(by=['temp_min'],ascending=True)\n# Drop all -99.0 values, since those are IPMA's way of saying station is fucked up or not working or somehting\n# (Yes!Really!)\n# and keep top four results\nfour_temp_min_mad = four_temp_min_mad[four_temp_min_mad.temp_min != -99.0].head(4)\nfour_temp_min_az = four_temp_min_az[four_temp_min_az.temp_min != -99.0].head(4)\nfour_temp_min_pt = four_temp_min_pt[four_temp_min_pt.temp_min != -99.0].head(4)\n\n# Max Wind Gust \n# Create Dataframes based on yesterday's data, sorting by maximum wind gust\nfour_wind_max_mad = df_madeira_yesterday.sort_values(by=['vento_int_max_inst'],ascending=False).head(4)\nfour_wind_max_az = df_azores_yesterday.sort_values(by=['vento_int_max_inst'],ascending=False).head(4)\nfour_wind_max_pt = df_portugal_yesterday.sort_values(by=['vento_int_max_inst'],ascending=False).head(4)\n\n# Max rain accumulated\n# Create Dataframes based on yesterday's data, sorting by maximum accumulated rain\nfour_rain_accu_mad = df_madeira_yesterday.sort_values(by=['prec_quant'],ascending=False).head(4)\nfour_rain_accu_az = df_azores_yesterday.sort_values(by=['prec_quant'],ascending=False).head(4)\nfour_rain_accu_pt = df_portugal_yesterday.sort_values(by=['prec_quant'],ascending=False).head(4)\n\n# Humidity Min \nfour_hum_min_mad = df_madeira_yesterday.sort_values(by=['hum_min'],ascending=True)\nfour_hum_min_az = df_azores_yesterday.sort_values(by=['hum_min'],ascending=True)\nfour_hum_min_pt = df_portugal_yesterday.sort_values(by=['hum_min'],ascending=True)\n\nfour_hum_min_mad = four_hum_min_mad[four_hum_min_mad.hum_min != -99.0].head(4)\nfour_hum_min_az = four_hum_min_az[four_hum_min_az.hum_min != -99.0].head(4)\nfour_hum_min_pt = four_hum_min_pt[four_hum_min_pt.hum_min != -99.0].head(4)\n\n# Humidity Max\nfour_hum_max_mad = df_madeira_yesterday.sort_values(by=['hum_max'],ascending=False)\nfour_hum_max_az = df_azores_yesterday.sort_values(by=['hum_max'],ascending=False)\nfour_hum_max_pt = df_portugal_yesterday.sort_values(by=['hum_max'],ascending=False)\n\nfour_hum_max_mad = four_hum_max_mad[four_hum_max_mad.hum_max != -99.0].head(4)\nfour_hum_max_az = four_hum_max_az[four_hum_max_az.hum_max != -99.0].head(4)\nfour_hum_max_pt = four_hum_max_pt[four_hum_max_pt.hum_max != -99.0].head(4)\n\n\n# ----------------------------------\n# THERMAL AMPLITUDES\n# -----------------------------------\n\n# Create a copy of the dataframes\ndf_amplitude_pt = df_portugal_yesterday\ndf_amplitude_az = df_azores_yesterday\ndf_amplitude_mad = df_madeira_yesterday\n\n# Drop -99 values from all dataframes\ndf_amplitude_pt=df_amplitude_pt[df_amplitude_pt.temp_min != -99.0]\ndf_amplitude_pt=df_amplitude_pt[df_amplitude_pt.temp_max != -99.0]\ndf_amplitude_az=df_amplitude_az[df_amplitude_az.temp_min != -99.0]\ndf_amplitude_az=df_amplitude_az[df_amplitude_az.temp_max != -99.0]\ndf_amplitude_mad=df_amplitude_mad[df_amplitude_mad.temp_min != -99.0]\ndf_amplitude_mad=df_amplitude_mad[df_amplitude_mad.temp_max != -99.0]\n\n\n# Create Amplitude Column for each dataframe\ndf_amplitude_pt['amplitude']=df_amplitude_pt.temp_max - df_amplitude_pt.temp_min\ndf_amplitude_az['amplitude']=df_amplitude_az.temp_max - df_amplitude_az.temp_min\ndf_amplitude_mad['amplitude']=df_amplitude_mad.temp_max - df_amplitude_mad.temp_min\n\n# Sort Dataframe by amplitude and filter forthe the top record\ndf_amplitude_pt = df_amplitude_pt.sort_values(by=['amplitude'],ascending=False).head(1)\ndf_amplitude_az = df_amplitude_az.sort_values(by=['amplitude'],ascending=False).head(1)\ndf_amplitude_mad = df_amplitude_mad.sort_values(by=['amplitude'],ascending=False).head(1)\n\n\n# ----------------------------------\n# ADDING COLORS TO DATAFRAMES\n# -----------------------------------\n\n# Create Colors Lists \n\ncolors_temp_max = [(154,7,7),(144,37,37),(134,67,67),(124,97,97)]\ncolors_temp_min = [(89,165,222),(89,165,222),(107,176,226),(141,195,233)]\ncolors_hum_max = [(89,165,222),(89,165,222),(107,176,226),(141,195,233)]\ncolors_hum_min = [(154,7,7),(234,9,9),(240,157,57),(239,129,129)]\ncolors_wind_max = [(89,133,187),(122,160,210),(147,179,224),(189,208,234)]\ncolors_rain_max = [(112,121,164),(138,147,189),(163,175,213),(185,194,226)]\n\n# Max Temperature Colors\n\nfour_temp_max_mad.loc[:,'colors'] = pd.Series(colors_temp_max).values\nfour_temp_max_az.loc[:,'colors'] = pd.Series(colors_temp_max).values\nfour_temp_max_pt.loc[:,'colors'] = pd.Series(colors_temp_max).values\n\n# Min Temperature Colors\n\nfour_temp_min_mad.loc[:,'colors'] = pd.Series(colors_temp_min).values\nfour_temp_min_az.loc[:,'colors'] = pd.Series(colors_temp_min).values\nfour_temp_min_pt.loc[:,'colors'] = pd.Series(colors_temp_min).values\n\n\n# Max Rainfall \n\nfour_rain_accu_mad.loc[:,'colors'] = pd.Series(colors_rain_max).values\nfour_rain_accu_az.loc[:,'colors'] = pd.Series(colors_rain_max).values\nfour_rain_accu_pt.loc[:,'colors'] = pd.Series(colors_rain_max).values\n\n\n# Max Wind Gust \n\nfour_wind_max_mad.loc[:,'colors'] = pd.Series(colors_wind_max).values\nfour_wind_max_az.loc[:,'colors'] = pd.Series(colors_wind_max).values\nfour_wind_max_pt.loc[:,'colors'] = pd.Series(colors_wind_max).values\n\n# Mazimum Humidity\n\nfour_hum_max_mad.loc[:,'colors'] = pd.Series(colors_hum_max).values\nfour_hum_max_az.loc[:,'colors'] = pd.Series(colors_hum_max).values\nfour_hum_max_pt.loc[:,'colors'] = pd.Series(colors_hum_max).values\n\n# Min Humidity\nfour_hum_min_mad.loc[:,'colors'] = pd.Series(colors_hum_min).values\nfour_hum_min_az.loc[:,'colors'] = pd.Series(colors_hum_min).values\nfour_hum_min_pt.loc[:,'colors'] = pd.Series(colors_hum_min).values\n\n\n# ------------------------------\n# IMAGE MANIPULATION \n# ------------------------------\n\n\n# Load Base Images\ntemplate_pt = Image.open(\"resumo_meteo_template_pt.png\")\ntemplate_az = Image.open(\"resumo_meteo_template_az.png\")\ntemplate_mad = Image.open(\"resumo_meteo_template_mad.png\")\n\n# Define Font and Size\n# Font file needs to be in the same folder\ntitle_font = ImageFont.truetype('Lato-Bold.ttf', 24)\nsubtitle_font = ImageFont.truetype('Lato-Bold.ttf', 22)\ndate_font = ImageFont.truetype('Lato-Bold.ttf', 24)\namplitude_font =ImageFont.truetype('Lato-Bold.ttf',72)\nlocation_font =ImageFont.truetype('Lato-Bold.ttf',14)\n\n# Create copies of the images that can be edited\nimage_editable_pt = ImageDraw.Draw(template_pt)\nimage_editable_az = ImageDraw.Draw(template_az)\nimage_editable_mad = ImageDraw.Draw(template_mad)\n\n\n# Create vars for coordinates\n\n# Right Column \n\nmax_temp_start_coords = 190 # This value increments by 30px every loop iteration\nmax_temp_station_name_x = 115 # Where Station Name Appears \nmax_temp_value_x = 460 # Where Value Appears \n\nmin_temp_start_coords = 370 # This value increments by 30px every loop iteration\nmin_temp_station_name_x = 115 # Where Station Name Appears \nmin_temp_value_x = 460 # Where Value Appears \n\nmax_rain_start_coords = 540 # This value increments by 30px every loop iteration\nmax_rain_station_name_x = 115 # Where Station Name Appears \nmax_rain_value_x = 460 # Where Value Appears \n\nmax_wind_start_coords = 720 # This value increments by 30px every loop iteration\nmax_wind_station_name_x = 115 # Where Station Name Appears \nmax_wind_value_x = 460 # Where Value Appears \n\n\n# Left Column \n\nmax_hum_start_coords = 190 # This value increments by 30px every loop iteration\nmax_hum_station_name_x = 650 # WHere Station Name Appears \nmax_hum_value_x = 950 # Where Value Appears \nmax_hum_unit_x = 970 # Where Unit Appears \n\nmin_hum_start_coords = 370 # This value increments by 30px every loop iteration\nmin_hum_station_name_x = 650 # WHere Station Name Appears \nmin_hum_value_x = 950 # Where Value Appears \nmin_hum_unit_x = 970 # Where Unit Appears \n\n\n\n\n# ------------------------------\n# PORTUGAL CONTINENTAL\n# ------------------------------\n\n\n# Create Loop For Max Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_max_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_temp_max_pt.iloc[x].temp_max)\n color = four_temp_max_pt.iloc[x].colors\n image_editable_pt.text((max_temp_station_name_x,max_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((max_temp_value_x, max_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_temp_start_coords += 30\n \n # Create Loop For Min Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_min_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_temp_min_pt.iloc[x].temp_min)\n color = four_temp_min_pt.iloc[x].colors\n image_editable_pt.text((min_temp_station_name_x,min_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((min_temp_value_x, min_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_temp_start_coords += 30 \n\n# Create Loop For Max Rainfall \nfor x in range(4):\n name = getStationNameById(four_rain_accu_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_rain_accu_pt.iloc[x].prec_quant)\n color = four_rain_accu_pt.iloc[x].colors\n image_editable_pt.text((max_rain_station_name_x,max_rain_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((max_rain_value_x, max_rain_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_rain_start_coords += 30\n\n# Create Loop For Max Wind Gust\nfor x in range(4):\n name = getStationNameById(four_wind_max_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_wind_max_pt.iloc[x].vento_int_max_inst)\n color = four_wind_max_pt.iloc[x].colors\n image_editable_pt.text((max_wind_station_name_x,max_wind_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((max_wind_value_x, max_wind_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_wind_start_coords += 30\n\n# Create Loop for Max Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_max_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_hum_max_pt.iloc[x].hum_max)\n color = four_hum_max_pt.iloc[x].colors\n image_editable_pt.text((max_hum_station_name_x,max_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((max_hum_value_x, max_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_hum_start_coords += 30\n\n# Create Loop for Min Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_min_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(CIM)\", \"\").strip()\n station_temp = str(four_hum_min_pt.iloc[x].hum_min)\n color = four_hum_min_pt.iloc[x].colors\n image_editable_pt.text((min_hum_station_name_x,min_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_pt.text((min_hum_value_x, min_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_hum_start_coords += 30\n\n# Create Loop for Ampitude\nfor x in range(1):\n name = getStationNameById(df_amplitude_pt.iloc[x].stationId)\n station_name = name.location.values[0]\n station_temp_max = str(df_amplitude_pt.iloc[x].temp_max)\n station_temp_min = str(df_amplitude_pt.iloc[x].temp_min)\n station_temp_amplitude = str(round(df_amplitude_pt.iloc[x].amplitude,2))\n image_editable_pt.text((842,525), station_name,(0,0,0), font=location_font)\n image_editable_pt.text((920,570), station_temp_max,(154,7,7), font=subtitle_font)\n image_editable_pt.text((683,570), station_temp_min,(93,173,236), font=subtitle_font)\n image_editable_pt.text((755,600), station_temp_amplitude,(250,186,61), font=amplitude_font)\n\n\n# ------------------------------\n# AÇORES\n# ------------------------------\n\n# Reset Coordinates\n\n# Right Column \n\nmax_temp_start_coords = 190 # This value increments by 30px every loop iteration\nmax_temp_station_name_x = 115 # Where Station Name Appears \nmax_temp_value_x = 460 # Where Value Appears \n\nmin_temp_start_coords = 370 # This value increments by 30px every loop iteration\nmin_temp_station_name_x = 115 # Where Station Name Appears \nmin_temp_value_x = 460 # Where Value Appears \n\nmax_rain_start_coords = 540 # This value increments by 30px every loop iteration\nmax_rain_station_name_x = 115 # Where Station Name Appears \nmax_rain_value_x = 460 # Where Value Appears \n\nmax_wind_start_coords = 720 # This value increments by 30px every loop iteration\nmax_wind_station_name_x = 115 # Where Station Name Appears \nmax_wind_value_x = 460 # Where Value Appears \n\n\n# Left Column \n\nmax_hum_start_coords = 190 # This value increments by 30px every loop iteration\nmax_hum_station_name_x = 650 # WHere Station Name Appears \nmax_hum_value_x = 950 # Where Value Appears \nmax_hum_unit_x = 970 # Where Unit Appears \n\nmin_hum_start_coords = 370 # This value increments by 30px every loop iteration\nmin_hum_station_name_x = 650 # WHere Station Name Appears \nmin_hum_value_x = 950 # Where Value Appears \nmin_hum_unit_x = 970 # Where Unit Appears \n\n# Create Loop For Max Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_max_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_temp_max_az.iloc[x].temp_max)\n color = four_temp_max_az.iloc[x].colors\n image_editable_az.text((max_temp_station_name_x,max_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((max_temp_value_x, max_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_temp_start_coords += 30\n \n # Create Loop For Min Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_min_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_temp_min_az.iloc[x].temp_min)\n color = four_temp_min_az.iloc[x].colors\n image_editable_az.text((min_temp_station_name_x,min_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((min_temp_value_x, min_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_temp_start_coords += 30 \n\n# Create Loop For Max Rainfall \nfor x in range(4):\n name = getStationNameById(four_rain_accu_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_rain_accu_az.iloc[x].prec_quant)\n color = four_rain_accu_az.iloc[x].colors\n image_editable_az.text((max_rain_station_name_x,max_rain_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((max_rain_value_x, max_rain_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_rain_start_coords += 30\n\n# Create Loop For Max Wind Gust\nfor x in range(4):\n name = getStationNameById(four_wind_max_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_wind_max_az.iloc[x].vento_int_max_inst)\n color = four_wind_max_az.iloc[x].colors\n image_editable_az.text((max_wind_station_name_x,max_wind_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((max_wind_value_x, max_wind_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_wind_start_coords += 30\n\n# Create Loop for Max Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_max_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_hum_max_az.iloc[x].hum_max)\n color = four_hum_max_az.iloc[x].colors\n image_editable_az.text((max_hum_station_name_x,max_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((max_hum_value_x, max_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_hum_start_coords += 30\n\n# Create Loop for Min Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_min_az.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"(DROTRH)\", \"\").strip()\n station_temp = str(four_hum_min_az.iloc[x].hum_min)\n color = four_hum_min_az.iloc[x].colors\n image_editable_az.text((min_hum_station_name_x,min_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_az.text((min_hum_value_x, min_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_hum_start_coords += 30\n\n# Create Loop for Ampitude\nfor x in range(1):\n name = getStationNameById(df_amplitude_az.iloc[x].stationId)\n station_name = name.location.values[0]\n station_temp_max = str(df_amplitude_az.iloc[x].temp_max)\n station_temp_min = str(df_amplitude_az.iloc[x].temp_min)\n station_temp_amplitude = str(round(df_amplitude_az.iloc[x].amplitude,2))\n image_editable_az.text((842,525), station_name,(0,0,0), font=location_font)\n image_editable_az.text((920,570), station_temp_max,(154,7,7), font=subtitle_font)\n image_editable_az.text((683,570), station_temp_min,(93,173,236), font=subtitle_font)\n image_editable_az.text((755,600), station_temp_amplitude,(250,186,61), font=amplitude_font)\n\n\n\n# ------------------------------\n# MADEIRA\n# ------------------------------\n\n# Reset Coordinates\n\n# Right Column \n\nmax_temp_start_coords = 190 # This value increments by 30px every loop iteration\nmax_temp_station_name_x = 115 # Where Station Name Appears \nmax_temp_value_x = 460 # Where Value Appears \n\nmin_temp_start_coords = 370 # This value increments by 30px every loop iteration\nmin_temp_station_name_x = 115 # Where Station Name Appears \nmin_temp_value_x = 460 # Where Value Appears \n\nmax_rain_start_coords = 540 # This value increments by 30px every loop iteration\nmax_rain_station_name_x = 115 # Where Station Name Appears \nmax_rain_value_x = 460 # Where Value Appears \n\nmax_wind_start_coords = 720 # This value increments by 30px every loop iteration\nmax_wind_station_name_x = 115 # Where Station Name Appears \nmax_wind_value_x = 460 # Where Value Appears \n\n\n# Left Column \n\nmax_hum_start_coords = 190 # This value increments by 30px every loop iteration\nmax_hum_station_name_x = 650 # WHere Station Name Appears \nmax_hum_value_x = 950 # Where Value Appears \nmax_hum_unit_x = 970 # Where Unit Appears \n\nmin_hum_start_coords = 370 # This value increments by 30px every loop iteration\nmin_hum_station_name_x = 650 # WHere Station Name Appears \nmin_hum_value_x = 950 # Where Value Appears \nmin_hum_unit_x = 970 # Where Unit Appears \n\n# Create Loop For Max Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_max_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_temp_max_mad.iloc[x].temp_max)\n color = four_temp_max_mad.iloc[x].colors\n image_editable_mad.text((max_temp_station_name_x,max_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((max_temp_value_x, max_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_temp_start_coords += 30\n \n # Create Loop For Min Temperature \nfor x in range(4):\n name = getStationNameById(four_temp_min_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_temp_min_mad.iloc[x].temp_min)\n color = four_temp_min_mad.iloc[x].colors\n image_editable_mad.text((min_temp_station_name_x,min_temp_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((min_temp_value_x, min_temp_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_temp_start_coords += 30 \n\n# Create Loop For Max Rainfall \nfor x in range(4):\n name = getStationNameById(four_rain_accu_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_rain_accu_mad.iloc[x].prec_quant)\n color = four_rain_accu_mad.iloc[x].colors\n image_editable_mad.text((max_rain_station_name_x,max_rain_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((max_rain_value_x, max_rain_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_rain_start_coords += 30\n\n# Create Loop For Max Wind Gust\nfor x in range(4):\n name = getStationNameById(four_wind_max_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_wind_max_mad.iloc[x].vento_int_max_inst)\n color = four_wind_max_mad.iloc[x].colors\n image_editable_mad.text((max_wind_station_name_x,max_wind_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((max_wind_value_x, max_wind_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_wind_start_coords += 30\n\n# Create Loop for Max Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_max_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_hum_max_mad.iloc[x].hum_max)\n color = four_hum_max_mad.iloc[x].colors\n image_editable_mad.text((max_hum_station_name_x,max_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((max_hum_value_x, max_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n max_hum_start_coords += 30\n\n# Create Loop for Min Humidity \nfor x in range(4):\n name = getStationNameById(four_hum_min_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n # Strip station names \n station_name_final = station_name.replace(\"Madeira,\", \"\").strip()\n station_temp = str(four_hum_min_mad.iloc[x].hum_min)\n color = four_hum_min_mad.iloc[x].colors\n image_editable_mad.text((min_hum_station_name_x,min_hum_start_coords), station_name_final, color, font=subtitle_font)\n image_editable_mad.text((min_hum_value_x, min_hum_start_coords), station_temp, color, font=subtitle_font)\n \n\n # Increase y coordinates by 30px \n min_hum_start_coords += 30\n\n# Create Loop for Ampitude\nfor x in range(1):\n name = getStationNameById(df_amplitude_mad.iloc[x].stationId)\n station_name = name.location.values[0]\n station_temp_max = str(df_amplitude_mad.iloc[x].temp_max)\n station_temp_min = str(df_amplitude_mad.iloc[x].temp_min)\n station_temp_amplitude = str(round(df_amplitude_mad.iloc[x].amplitude,2))\n image_editable_mad.text((842,525), station_name,(0,0,0), font=location_font)\n image_editable_mad.text((920,570), station_temp_max,(154,7,7), font=subtitle_font)\n image_editable_mad.text((683,570), station_temp_min,(93,173,236), font=subtitle_font)\n image_editable_mad.text((755,600), station_temp_amplitude,(250,186,61), font=amplitude_font)\n\n#---------------------------------\n# INSERT DATES\n#---------------------------------\n\n# Insert Report Dates\nimage_editable_pt.text((29,1020), report_date,(255,255,255), font=date_font)\nimage_editable_az.text((29,1020), report_date,(255,255,255), font=date_font)\nimage_editable_mad.text((29,1020), report_date,(255,255,255), font=date_font)\n\n#---------------------------------\n# SAVE PICTURES\n#---------------------------------\n# Save Resulting Pictures\ntemplate_pt.save(\"daily_meteo_report_pt.png\")\ntemplate_az.save(\"daily_meteo_report_az.png\")\ntemplate_mad.save(\"daily_meteo_report_mad.png\")\n\n#---------------------------------\n# THE END\n#---------------------------------\n\n\n# Made with 🤍 by Jorge Gomes & João Pina MARCH 2022\n\n\n\n\n" ]
[ [ "pandas.json_normalize", "pandas.Series", "pandas.DataFrame" ] ]
Z-yq/audioSamples.github.io
[ "084519b5a0464f465e1d72c24cba07c1ec55cd26" ]
[ "asr/models/layers/transpose_time_major.py" ]
[ "# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\n\n\nclass TransposeTimeMajor(tf.keras.layers.Layer):\n def __init__(self, name: str = \"transpose_time_major\", **kwargs):\n super(TransposeTimeMajor, self).__init__(name=name, **kwargs)\n\n def call(self, inputs, **kwargs):\n return tf.transpose(inputs, perm=[1, 0, 2])\n\n def get_config(self):\n config = super(TransposeTimeMajor, self).get_config()\n return config\n" ]
[ [ "tensorflow.transpose" ] ]
fianfian237/uro_pred_backend
[ "5986d9c38189362014cbfe0318a18b72c8386a7b" ]
[ "app.py" ]
[ "from flask import Flask, Response, jsonify\nfrom model import Model\nfrom flask import request\nimport pandas as pd\n\napp = Flask(__name__)\nmodel_grade = Model('Modeles/model_grade.joblib')\nmodel_stade = Model('Modeles/model_stade.joblib')\n\n\[email protected]('/predict_grade_n_stade', methods=[\"GET\"])\ndef predict_grade():\n input_json = {k: [request.args.get(k)] for k in model_stade.meta_data[\"required_input\"]}\n input = pd.DataFrame(input_json)\n try:\n prediction_grade = model_grade.predict(input)\n prediction_stade = model_stade.predict(input)\n\n result = {\n 'grade': prediction_grade[0],\n 'stade': prediction_stade[0]\n }\n # print(result)\n # return Response(result, status=200, mimetype='application/json')\n return jsonify(result)\n except:\n return Response({\"message\":\"Something went wrong, please try again.\"}, status=500, mimetype='application/json')\n\[email protected]('/health')\ndef health_check():\n return Response(\"\", status=200)\n\n\n# if __name__ == '__main__':\n# app.run(debug=True)\n # app.run(debug=True, host='0.0.0.0', port=4000)" ]
[ [ "pandas.DataFrame" ] ]
djl11/ivy
[ "209f74b5a1a82ca69ad712788ae0469c3f8614d9" ]
[ "ivy/backends/numpy/core/general.py" ]
[ "\"\"\"\nCollection of Numpy general functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport logging\nimport numpy as _np\nimport math as _math\nfrom operator import mul as _mul\nfrom functools import reduce as _reduce\nimport multiprocessing as _multiprocessing\n\n# local\nfrom ivy.core.device import default_device\nfrom ivy.backends.numpy.core.device import _dev_callable\n\n\nDTYPE_TO_STR = {_np.dtype('int8'): 'int8',\n _np.dtype('int16'): 'int16',\n _np.dtype('int32'): 'int32',\n _np.dtype('int64'): 'int64',\n _np.dtype('uint8'): 'uint8',\n _np.dtype('uint16'): 'uint16',\n _np.dtype('uint32'): 'uint32',\n _np.dtype('uint64'): 'uint64',\n 'bfloat16': 'bfloat16',\n _np.dtype('float16'): 'float16',\n _np.dtype('float32'): 'float32',\n _np.dtype('float64'): 'float64',\n _np.dtype('bool'): 'bool',\n\n _np.int8: 'int8',\n _np.int16: 'int16',\n _np.int32: 'int32',\n _np.int64: 'int64',\n _np.uint8: 'uint8',\n _np.uint16: 'uint16',\n _np.uint32: 'uint32',\n _np.uint64: 'uint64',\n _np.float16: 'float16',\n _np.float32: 'float32',\n _np.float64: 'float64',\n _np.bool_: 'bool'}\n\nDTYPE_FROM_STR = {'int8': _np.dtype('int8'),\n 'int16': _np.dtype('int16'),\n 'int32': _np.dtype('int32'),\n 'int64': _np.dtype('int64'),\n 'uint8': _np.dtype('uint8'),\n 'uint16': _np.dtype('uint16'),\n 'uint32': _np.dtype('uint32'),\n 'uint64': _np.dtype('uint64'),\n 'bfloat16': 'bfloat16',\n 'float16': _np.dtype('float16'),\n 'float32': _np.dtype('float32'),\n 'float64': _np.dtype('float64'),\n 'bool': _np.dtype('bool')}\n\n\n# Helpers #\n# --------#\n\ndef _to_dev(x, dev):\n if dev is not None:\n if 'gpu' in dev:\n raise Exception('Native Numpy does not support GPU placement, consider using Jax instead')\n elif 'cpu' in dev:\n pass\n else:\n raise Exception('Invalid device specified, must be in the form [ \"cpu:idx\" | \"gpu:idx\" ],'\n 'but found {}'.format(dev))\n return x\n\n\ndef _flat_array_to_1_dim_array(x):\n return x.reshape((1,)) if x.shape == () else x\n\n\n# API #\n# ----#\n\n# noinspection PyShadowingNames\ndef array(object_in, dtype=None, dev=None):\n if dtype:\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n else:\n dtype = None\n return _to_dev(_np.array(object_in, dtype=dtype), dev)\n\n\nasarray = array\n\n\ndef is_array(x, exclusive=False):\n if isinstance(x, _np.ndarray):\n return True\n return False\n\n\ncopy_array = lambda x: x.copy()\narray_equal = _np.array_equal\nto_numpy = lambda x: x\nto_numpy.__name__ = 'to_numpy'\nto_scalar = lambda x: x.item()\nto_scalar.__name__ = 'to_scalar'\nto_list = lambda x: x.tolist()\nto_list.__name__ = 'to_list'\nshape = lambda x, as_tensor=False: _np.asarray(_np.shape(x)) if as_tensor else x.shape\nshape.__name__ = 'shape'\nget_num_dims = lambda x, as_tensor=False: _np.asarray(len(_np.shape(x))) if as_tensor else len(x.shape)\nminimum = _np.minimum\nmaximum = _np.maximum\nclip = lambda x, x_min, x_max: _np.asarray(_np.clip(x, x_min, x_max))\nround = lambda x: _np.asarray(_np.round(x))\nfloormod = lambda x, y: _np.asarray(x % y)\nfloor = lambda x: _np.asarray(_np.floor(x))\nceil = lambda x: _np.asarray(_np.ceil(x))\nabs = lambda x: _np.asarray(_np.absolute(x))\n\n\ndef argmax(x, axis=0):\n ret = _np.asarray(_np.argmax(x, axis))\n if ret.shape == ():\n return ret.reshape(-1)\n return ret\n\n\ndef argmin(x, axis=0):\n ret = _np.asarray(_np.argmin(x, axis))\n if ret.shape == ():\n return ret.reshape(-1)\n return ret\n\n\nargsort = lambda x, axis=-1: _np.asarray(_np.argsort(x, axis))\n\n\ndef cast(x, dtype):\n return x.astype(dtype_from_str(dtype))\n\n\n# noinspection PyShadowingNames\ndef arange(stop, start=0, step=1, dtype=None, dev=None):\n if dtype:\n dtype = dtype_from_str(dtype)\n res = _to_dev(_np.arange(start, stop, step=step, dtype=dtype), dev)\n if not dtype:\n if res.dtype == _np.float64:\n return res.astype(_np.float32)\n elif res.dtype == _np.int64:\n return res.astype(_np.int32)\n return res\n\n\ndef linspace(start, stop, num, axis=None, dev=None):\n if axis is None:\n axis = -1\n return _to_dev(_np.linspace(start, stop, num, axis=axis), dev)\n\n\ndef logspace(start, stop, num, base=10., axis=None, dev=None):\n if axis is None:\n axis = -1\n return _to_dev(_np.logspace(start, stop, num, base=base, axis=axis), dev)\n\n\ndef concatenate(xs, axis=-1):\n if xs[0].shape == ():\n return _np.concatenate([_np.expand_dims(x, 0) for x in xs], axis)\n return _np.concatenate(xs, axis)\n\n\ndef flip(x, axis=None, batch_shape=None):\n num_dims = len(batch_shape) if batch_shape is not None else len(x.shape)\n if not num_dims:\n return x\n if axis is None:\n axis = list(range(num_dims))\n if type(axis) is int:\n axis = [axis]\n axis = [item + num_dims if item < 0 else item for item in axis]\n return _np.flip(x, axis)\n\n\nstack = _np.stack\n\n\ndef unstack(x, axis, keepdims=False):\n if x.shape == ():\n return [x]\n x_split = _np.split(x, x.shape[axis], axis)\n if keepdims:\n return x_split\n return [_np.squeeze(item, axis) for item in x_split]\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))\n return [x]\n if num_or_size_splits is None:\n num_or_size_splits = x.shape[axis]\n elif isinstance(num_or_size_splits, int) and with_remainder:\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = _math.floor(num_chunks)\n remainder = num_chunks - num_chunks_int\n if remainder != 0:\n num_or_size_splits = [num_or_size_splits]*num_chunks_int + [int(remainder*num_or_size_splits)]\n if isinstance(num_or_size_splits, (list, tuple)):\n num_or_size_splits = _np.cumsum(num_or_size_splits[:-1])\n return _np.split(x, num_or_size_splits, axis)\n\n\nrepeat = _np.repeat\ntile = _np.tile\nconstant_pad = lambda x, pad_width, value=0: _np.pad(_flat_array_to_1_dim_array(x), pad_width, constant_values=value)\nzero_pad = lambda x, pad_width: _np.pad(_flat_array_to_1_dim_array(x), pad_width)\nswapaxes = _np.swapaxes\n\n\ndef transpose(x, axes=None):\n if axes is None:\n num_dims = len(x.shape)\n axes = list(range(num_dims))\n axes.reverse()\n return _np.transpose(x, axes)\n\n\nexpand_dims = _np.expand_dims\nwhere = lambda condition, x1, x2: _np.where(condition, x1, x2)\n\n\ndef indices_where(x):\n where_x = _np.where(x)\n if len(where_x) == 1:\n return _np.expand_dims(where_x[0], -1)\n res = _np.concatenate([_np.expand_dims(item, -1) for item in where_x], -1)\n return res\n\n\nisnan = _np.isnan\nreshape = _np.reshape\nbroadcast_to = _np.broadcast_to\n\n\ndef squeeze(x, axis=None):\n if x.shape == ():\n if axis is None or axis == 0 or axis == -1:\n return x\n raise Exception('tried to squeeze a zero-dimensional input by axis {}'.format(axis))\n return _np.squeeze(x, axis)\n\n\n# noinspection PyShadowingNames\ndef zeros(shape, dtype='float32', dev=None):\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n return _to_dev(_np.zeros(shape, dtype), dev)\n\n\n# noinspection PyShadowingNames\ndef zeros_like(x, dtype=None, dev=None):\n if dtype:\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n else:\n dtype = x.dtype\n return _to_dev(_np.zeros_like(x, dtype=dtype), dev)\n\n\n# noinspection PyShadowingNames\ndef ones(shape, dtype='float32', dev=None):\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n return _to_dev(_np.ones(shape, dtype), dev)\n\n\n# noinspection PyShadowingNames\ndef ones_like(x, dtype=None, dev=None):\n if dtype:\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n else:\n dtype = x.dtype\n return _to_dev(_np.ones_like(x, dtype=dtype), dev)\n\n\n# noinspection PyUnusedLocal\ndef one_hot(indices, depth, dev=None):\n # from https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy\n res = _np.eye(depth)[_np.array(indices).reshape(-1)]\n return res.reshape(list(indices.shape) + [depth])\n\n\ncross = _np.cross\nmatmul = lambda x1, x2: _np.matmul(x1, x2)\ncumsum = _np.cumsum\n\n\ndef cumprod(x, axis=0, exclusive=False):\n if exclusive:\n x = _np.swapaxes(x, axis, -1)\n x = _np.concatenate((_np.ones_like(x[..., -1:]), x[..., :-1]), -1)\n res = _np.cumprod(x, -1)\n return _np.swapaxes(res, axis, -1)\n return _np.cumprod(x, axis)\n\n\n# noinspection PyShadowingNames\ndef identity(n, dtype='float32', batch_shape=None, dev=None):\n dtype = 'bool_' if dtype == 'bool' else dtype\n dtype = _np.__dict__[dtype]\n mat = _np.identity(n, dtype=dtype)\n if batch_shape is None:\n return_mat = mat\n else:\n reshape_dims = [1] * len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n return_mat = _np.tile(_np.reshape(mat, reshape_dims), tile_dims)\n return _to_dev(return_mat, dev)\n\n\nmeshgrid = lambda *xs, indexing='ij': _np.meshgrid(*xs, indexing=indexing)\n\n\ndef scatter_flat(indices, updates, size, reduction='sum', dev=None):\n if dev is None:\n dev = _dev_callable(updates)\n if reduction == 'sum':\n target = _np.zeros([size], dtype=updates.dtype)\n _np.add.at(target, indices, updates)\n elif reduction == 'min':\n target = _np.ones([size], dtype=updates.dtype) * 1e12\n _np.minimum.at(target, indices, updates)\n target = _np.where(target == 1e12, 0., target)\n elif reduction == 'max':\n target = _np.ones([size], dtype=updates.dtype) * -1e12\n _np.maximum.at(target, indices, updates)\n target = _np.where(target == -1e12, 0., target)\n else:\n raise Exception('reduction is {}, but it must be one of \"sum\", \"min\" or \"max\"'.format(reduction))\n return _to_dev(target, dev)\n\n\n# noinspection PyShadowingNames\ndef scatter_nd(indices, updates, shape, reduction='sum', dev=None):\n if dev is None:\n dev = _dev_callable(updates)\n shape = list(shape)\n indices_flat = indices.reshape(-1, indices.shape[-1]).T\n indices_tuple = tuple(indices_flat) + (Ellipsis,)\n if reduction == 'sum':\n target = _np.zeros(shape, dtype=updates.dtype)\n _np.add.at(target, indices_tuple, updates)\n elif reduction == 'min':\n target = _np.ones(shape, dtype=updates.dtype) * 1e12\n _np.minimum.at(target, indices_tuple, updates)\n target = _np.where(target == 1e12, 0., target)\n elif reduction == 'max':\n target = _np.ones(shape, dtype=updates.dtype) * -1e12\n _np.maximum.at(target, indices_tuple, updates)\n target = _np.where(target == -1e12, 0., target)\n else:\n raise Exception('reduction is {}, but it must be one of \"sum\", \"min\" or \"max\"'.format(reduction))\n return _to_dev(target, dev)\n\n\ndef gather(params, indices, axis=-1, dev=None):\n if dev is None:\n dev = _dev_callable(params)\n return _to_dev(_np.take_along_axis(params, indices, axis), dev)\n\n\ndef gather_nd(params, indices, dev=None):\n if dev is None:\n dev = _dev_callable(params)\n indices_shape = indices.shape\n params_shape = params.shape\n num_index_dims = indices_shape[-1]\n result_dim_sizes_list = [_reduce(_mul, params_shape[i + 1:], 1) for i in range(len(params_shape) - 1)] + [1]\n result_dim_sizes = _np.array(result_dim_sizes_list)\n implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())\n flat_params = _np.reshape(params, (-1,))\n new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]\n indices_scales = _np.reshape(result_dim_sizes[0:num_index_dims], new_shape)\n indices_for_flat_tiled = _np.tile(_np.reshape(_np.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)), (1, implicit_indices_factor))\n implicit_indices = _np.tile(_np.expand_dims(_np.arange(implicit_indices_factor), 0), (indices_for_flat_tiled.shape[0], 1))\n indices_for_flat = indices_for_flat_tiled + implicit_indices\n flat_indices_for_flat = _np.reshape(indices_for_flat, (-1,)).astype(_np.int32)\n flat_gather = _np.take(flat_params, flat_indices_for_flat, 0)\n new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])\n res = _np.reshape(flat_gather, new_shape)\n return _to_dev(res, dev)\n\n\ndef linear_resample(x, num_samples, axis=-1):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n axis = axis % num_x_dims\n x_pre_shape = x_shape[0:axis]\n x_pre_size = _reduce(_mul, x_pre_shape) if x_pre_shape else 1\n num_pre_dims = len(x_pre_shape)\n num_vals = x.shape[axis]\n x_post_shape = x_shape[axis+1:]\n x_post_size = _reduce(_mul, x_post_shape) if x_post_shape else 1\n num_post_dims = len(x_post_shape)\n xp = _np.reshape(_np.arange(num_vals*x_pre_size*x_post_size), x_shape)\n x_coords = _np.arange(num_samples) * ((num_vals-1)/(num_samples-1)) * x_post_size\n x_coords = _np.reshape(x_coords, [1]*num_pre_dims + [num_samples] + [1]*num_post_dims)\n x_coords = _np.broadcast_to(x_coords, x_pre_shape + [num_samples] + x_post_shape)\n slc = [slice(None)] * num_x_dims\n slc[axis] = slice(0, 1, 1)\n x_coords = x_coords + xp[tuple(slc)]\n x = _np.reshape(x, (-1,))\n xp = _np.reshape(xp, (-1,))\n x_coords = _np.reshape(x_coords, (-1,))\n ret = _np.interp(x_coords, xp, x)\n return _np.reshape(ret, x_pre_shape + [num_samples] + x_post_shape)\n\n\ndef dtype(x, as_str=False):\n dt = x.dtype\n if as_str:\n return dtype_to_str(dt)\n return dt\n\n\ndef dtype_to_str(dtype_in):\n if isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_TO_STR[dtype_in]\n\n\ndef dtype_from_str(dtype_in):\n if not isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_FROM_STR[dtype_in]\n\n\n# noinspection PyUnusedLocal\ndef compile(func, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):\n logging.warning('Numpy does not support compiling functions.\\n'\n 'Now returning the unmodified function.')\n return func\n\n\ncurrent_framework_str = lambda: 'numpy'\ncurrent_framework_str.__name__ = 'current_framework_str'\nmultiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)\ncontainer_types = lambda: []\n\n\ndef inplace_update(x, val):\n x.data = val\n return x\n\n\ndef inplace_decrement(x, val):\n x -= val\n return x\n\n\ndef inplace_increment(x, val):\n x += val\n return x\n\n\ninplace_arrays_supported = lambda: True\ninplace_variables_supported = lambda: True\n" ]
[ [ "numpy.take_along_axis", "numpy.split", "numpy.expand_dims", "numpy.take", "numpy.linspace", "numpy.asarray", "numpy.squeeze", "numpy.cumsum", "numpy.dtype", "numpy.concatenate", "numpy.round", "numpy.maximum.at", "numpy.argmin", "numpy.zeros_like", "numpy.where", "numpy.swapaxes", "numpy.ones_like", "numpy.clip", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.matmul", "numpy.ceil", "numpy.argmax", "numpy.interp", "numpy.zeros", "numpy.logspace", "numpy.minimum.at", "numpy.cumprod", "numpy.identity", "numpy.floor", "numpy.transpose", "numpy.argsort", "numpy.array", "numpy.meshgrid", "numpy.flip", "numpy.sum", "numpy.add.at", "numpy.absolute", "numpy.ones", "numpy.shape", "numpy.broadcast_to" ] ]
wh629/unqover
[ "6c499c6b965bd12433ebdffedde2a1d1639fe0ee" ]
[ "utils/convert_hdf5_to_hf.py" ]
[ "import sys\nimport argparse\nimport h5py\nimport numpy as np\nimport torch\nfrom utils.holder import *\nfrom utils.util import *\nimport qa.pipeline\nfrom transformers import *\nimport logging\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\nparser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--load_file', help=\"Path to where HDF5 model to be loaded.\", default=\"\")\n## dim specs\nparser.add_argument('--hidden_size', help=\"The general hidden size of the pipeline\", type=int, default=768)\n# bert specs\nparser.add_argument('--transformer_type', help=\"The type of bert encoder from huggingface, eg. roberta-base\",default = \"roberta-base\")\n## pipeline stages\nparser.add_argument('--enc', help=\"The type of encoder, bert\", default='bert')\nparser.add_argument('--cls', help=\"The type of classifier, linear\", default='linear')\n#\nparser.add_argument('--output', help=\"Path to output HuggingFace(HF) format\", default='/models/hf')\n\n# Since the models are to be converted, not trained via HF, we will have to \"fake\" some training options\ndef artificialize_training_bin():\n\tparams = argparse.Namespace()\n\tparams.adam_epsilon=1e-08\n\tparams.cache_dir=''\n\tparams.config_name=''\n\tparams.data_dir=None\n\tparams.device=torch.device(type='cuda')\n\tparams.do_eval=True\n\tparams.do_lower_case=True\n\tparams.do_train=True\n\tparams.doc_stride=128\n\tparams.eval_all_checkpoints=False\n\tparams.evaluate_during_training=False\n\tparams.fp16=False\n\tparams.fp16_opt_level='O1'\n\tparams.gradient_accumulation_steps=1\n\tparams.lang_id=0\n\tparams.learning_rate=3e-05\n\tparams.local_rank=-1\n\tparams.logging_steps=500\n\tparams.max_answer_length=30\n\tparams.max_grad_norm=1.0\n\tparams.max_query_length=64\n\tparams.max_seq_length=384\n\tparams.max_steps=-1\n\tparams.model_name_or_path='bert-base-uncased'\n\tparams.model_type='bert'\n\tparams.n_best_size=20\n\tparams.n_gpu=1\n\tparams.no_cuda=False\n\tparams.null_score_diff_threshold=0.0\n\tparams.num_train_epochs=2.0\n\tparams.output_dir='NOT_DEFINED'\n\tparams.overwrite_cache=True\n\tparams.overwrite_output_dir=False\n\tparams.per_gpu_eval_batch_size=8\n\tparams.per_gpu_train_batch_size=20\n\tparams.predict_file='NOT_DEFINED'\n\tparams.save_steps=500\n\tparams.seed=3435\n\tparams.server_ip=''\n\tparams.server_port=''\n\tparams.threads=1\n\tparams.tokenizer_name=''\n\tparams.train_batch_size=20\n\tparams.train_file='model_name_or_path'\n\tparams.verbose_logging=False\n\tparams.version_2_with_negative=False\n\tparams.warmup_steps=0\n\tparams.weight_decay=0.0\n\treturn params\n\ndef main(args):\n\topt = parser.parse_args(args)\n\topt.gpuid = -1\n\topt.dropout = 0\n\tshared = Holder()\n\n\t# fix some hyperparameters automatically\n\tif 'base' in opt.transformer_type:\n\t\topt.hidden_size = 768\n\telif 'large'in opt.transformer_type:\n\t\topt.hidden_size = 1024\n\n\t# load model\n\tm = qa.pipeline.Pipeline(opt, shared)\n\tprint('loading pretrained model from {0}...'.format(opt.load_file))\n\tparam_dict = load_param_dict('{0}.hdf5'.format(opt.load_file))\n\tm.set_param_dict(param_dict)\n\n\tmlm = AutoModel.from_pretrained(opt.transformer_type)\n\ttokenizer = AutoTokenizer.from_pretrained(opt.transformer_type, add_special_tokens=False, use_fast=True)\n\ttraining_args = artificialize_training_bin()\n\n\tconfig = mlm.config\n\tconfig.num_labels = 2\t# 1 for start and 1 for end\n\ttraining_args.model_name_or_path = opt.transformer_type\n\n\tif 'roberta' in opt.transformer_type:\n\t\ttraining_args.model_type = 'roberta'\n\t\tm_hf = RobertaForQuestionAnswering(config)\n\t\t# move parameters\n\t\tm_hf.roberta = m.encoder.transformer\n\t\tm_hf.qa_outputs = m.classifier.linear[1]\n\telif 'distilbert' in opt.transformer_type:\n\t\ttraining_args.model_type = 'distilbert'\n\t\tm_hf = DistilBertForQuestionAnswering(config)\n\t\t# move parameters\n\t\tm_hf.distilbert = m.encoder.transformer\n\t\tm_hf.qa_outputs = m.classifier.linear[1]\n\telif 'bert' in opt.transformer_type:\n\t\ttraining_args.model_type = 'bert'\n\t\tm_hf = BertForQuestionAnswering(config)\n\t\t# move parameters\n\t\tm_hf.bert = m.encoder.transformer\n\t\tm_hf.qa_outputs = m.classifier.linear[1]\n\telse:\n\t\traise Exception('unrecognized model type {0}'.format(opt.transformer_type))\n\t\n\tm_hf.save_pretrained(opt.output)\n\ttokenizer.save_pretrained(opt.output)\n\ttorch.save(training_args, opt.output + '/training_args.bin')\n\n\nif __name__ == '__main__':\n\tsys.exit(main(sys.argv[1:]))\n" ]
[ [ "torch.device", "torch.save" ] ]
tilschaef/genomepy
[ "4c10e69b6886cf52381caf6498395391834a675b" ]
[ "genomepy/providers/base.py" ]
[ "\"\"\"BaseProvider class, the parent of the provider classes\"\"\"\nimport gzip\nimport os\nimport shutil\nimport subprocess as sp\nimport time\nfrom tempfile import TemporaryDirectory, mkdtemp\nfrom typing import Iterator, List, Union\nfrom urllib.request import urlopen\n\nimport pandas as pd\nfrom loguru import logger\n\nfrom genomepy.__about__ import __version__\nfrom genomepy.annotation.utils import read_annot, write_annot\nfrom genomepy.exceptions import GenomeDownloadError\nfrom genomepy.files import extract_archive, get_file_info, update_readme\nfrom genomepy.online import download_file\nfrom genomepy.utils import get_genomes_dir, get_localname, lower, mkdir_p, rm_rf, safe\n\n\nclass BaseProvider:\n \"\"\"\n Provider base class.\n \"\"\"\n\n # class variables set by child classes:\n name = None\n \"Name of this provider.\"\n genomes = {}\n \"Dictionary with assembly names as key and assembly metadata dictionary as value.\"\n accession_fields = []\n \"Metadata fields that (can) contain the assembly's accession ID.\"\n taxid_fields = []\n \"Metadata fields that (can) contain the assembly's taxonomy ID.\"\n description_fields = []\n \"Metadata fields with assembly related info.\"\n _cli_install_options = {}\n _url = None\n\n def __hash__(self):\n return hash(str(self.__class__))\n\n @staticmethod\n def ping() -> bool:\n \"\"\"Can the provider be reached?\"\"\"\n raise NotImplementedError()\n\n def _provider_status(self):\n \"\"\"check if provider is online\"\"\"\n if not self.ping():\n raise ConnectionError(f\"{self.name} appears to be offline.\")\n\n def _check_name(self, name):\n \"\"\"check if genome name can be found for provider\"\"\"\n name = safe(name)\n if name in self.genomes:\n return name\n\n raise GenomeDownloadError(\n f\"Could not download genome {name} from {self.name}.\\n\\n\"\n \"Check for typos or try\\n\"\n f\" genomepy search {name} -p {self.name}\"\n )\n\n def _genome_info_tuple(self, name):\n \"\"\"tuple with assembly metadata\"\"\"\n raise NotImplementedError()\n\n def list_available_genomes(self):\n \"\"\"\n List all available genomes.\n\n Yields\n ------\n genomes : list of tuples\n tuples with assembly name, accession, scientific_name, taxonomy id and description\n \"\"\"\n for name in self.genomes.keys():\n yield self._genome_info_tuple(name)\n\n def genome_taxid(self, name: str) -> int:\n \"\"\"\n Return the genome taxonomy ID for a genome.\n\n Parameters\n ----------\n name: str\n genome name\n\n Returns\n ------\n int\n Genome Taxonomy identifier\n \"\"\"\n for field in self.taxid_fields:\n tid = str(self.genomes[name].get(field))\n if tid.isdigit():\n return int(tid)\n\n def assembly_accession(self, name: str) -> str or None:\n \"\"\"\n Return the assembly accession number (GCA* or GCF*) for a genome.\n\n Parameters\n ----------\n name: str\n genome name\n\n Returns\n ------\n str\n Assembly accession number\n \"\"\"\n for field in self.accession_fields:\n accession = str(self.genomes[name].get(field))\n if accession.startswith((\"GCA\", \"GCF\")):\n return accession\n\n def annotation_links(self, name: str, **kwargs) -> List[str]:\n \"\"\"\n Return available gene annotation links (http/ftp) for a genome\n\n Parameters\n ----------\n name: str\n genome name\n\n Returns\n ------\n list\n Gene annotation links\n \"\"\"\n if \"annotations\" not in self.genomes[safe(name)]:\n links = self.get_annotation_download_links(name, **kwargs)\n self.genomes[safe(name)][\"annotations\"] = links\n return self.genomes[safe(name)][\"annotations\"]\n\n def get_genome_download_link(self, name, mask=\"soft\", **kwargs):\n raise NotImplementedError()\n\n def download_genome(\n self,\n name: str,\n genomes_dir: str = None,\n localname: str = None,\n mask: str = \"soft\",\n **kwargs,\n ):\n \"\"\"\n Download a (gzipped) genome file to a specific directory\n\n Parameters\n ----------\n name : str\n Genome / species name\n\n genomes_dir : str , optional\n Directory to install genome\n\n localname : str , optional\n Custom name for your genome\n\n mask: str , optional\n Masking, soft, hard or none (all other strings)\n \"\"\"\n name = self._check_name(name)\n link = self.get_genome_download_link(name, mask=mask, **kwargs)\n\n localname = get_localname(name, localname)\n genomes_dir = get_genomes_dir(genomes_dir, check_exist=False)\n out_dir = os.path.join(genomes_dir, localname)\n mkdir_p(out_dir)\n\n logger.info(f\"Downloading genome from {self.name}. Target URL: {link}...\")\n\n # download to tmp dir. Move genome on completion.\n # tmp dir is in genome_dir to prevent moving the genome between disks\n get_file = shutil.copyfile if os.path.exists(link) else download_file\n with TemporaryDirectory(dir=out_dir) as tmp_dir:\n tmp_fname = os.path.join(tmp_dir, link.split(\"/\")[-1])\n fname = os.path.join(tmp_dir, f\"{localname}.fa\")\n\n get_file(link, tmp_fname)\n logger.info(\"Genome download successful, starting post processing...\")\n\n # unzip genome\n _, is_compressed = get_file_info(link)\n if is_compressed:\n extract_archive(tmp_fname, outfile=fname, concat=True)\n else:\n shutil.move(tmp_fname, fname)\n\n # process genome (e.g. masking)\n if hasattr(self, \"_post_process_download\"):\n self._post_process_download(\n name=name, fname=fname, out_dir=out_dir, mask=mask\n )\n\n # transfer the genome from the tmpdir to the genome_dir\n src = fname\n dst = os.path.join(out_dir, f\"{localname}.fa\")\n shutil.move(src, dst)\n\n logger.info(\"name: {}\".format(name))\n logger.info(\"local name: {}\".format(localname))\n logger.info(\"fasta: {}\".format(dst))\n\n # Create readme with information\n readme = os.path.join(genomes_dir, localname, \"README.txt\")\n asm_acc = self.assembly_accession(name)\n tax_id = self.genome_taxid(name)\n metadata = {\n \"name\": localname,\n \"provider\": self.name,\n \"original name\": name,\n \"original filename\": os.path.split(link)[-1],\n \"assembly_accession\": asm_acc if asm_acc else \"na\",\n \"tax_id\": tax_id if tax_id else \"na\",\n \"mask\": mask,\n \"genome url\": link,\n \"genomepy version\": __version__,\n \"date\": time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n update_readme(readme, metadata)\n\n def get_annotation_download_links(self, name, **kwargs):\n \"\"\"\n Retrieve functioning gene annotation download link(s).\n\n Parameters\n ----------\n name : str\n genome name\n **kwargs: dict, optional:\n provider specific options.\n\n Returns\n -------\n list\n http/ftp link(s)\n \"\"\"\n raise NotImplementedError()\n\n def get_annotation_download_link(self, name: str, **kwargs) -> str:\n \"\"\"\n Return a functional annotation download link.\n\n Parameters\n ----------\n name : str\n genome name\n\n Returns\n -------\n str\n http/ftp link\n\n Raises\n ------\n GenomeDownloadError\n if no functional link was found\n \"\"\"\n links = self.annotation_links(name, **kwargs)\n if links:\n return links[0]\n raise GenomeDownloadError(\n f\"No gene annotations found for {name} on {self.name}.\\n\"\n \"Check for typos or try\\n\"\n f\" genomepy search {name} -p {self.name}\"\n )\n\n def download_annotation(self, name, genomes_dir=None, localname=None, **kwargs):\n \"\"\"\n Download annotation file to to a specific directory\n\n Parameters\n ----------\n name : str\n Genome / species name\n\n genomes_dir : str , optional\n Directory to install annotation\n\n localname : str , optional\n Custom name for your genome\n \"\"\"\n name = self._check_name(name)\n link = self.get_annotation_download_link(name, **kwargs)\n\n localname = get_localname(name, localname)\n genomes_dir = get_genomes_dir(genomes_dir, check_exist=False)\n\n logger.info(f\"Downloading annotation from {self.name}. Target URL: {link}...\")\n try:\n download_annotation(genomes_dir, link, localname)\n logger.info(\"Annotation download successful\")\n except Exception as e:\n raise GenomeDownloadError(\n f\"An error occurred while installing the gene annotation for {name} from {self.name}.\\n\"\n \"If you think the annotation should be there, please file a bug report at: \"\n \"https://github.com/vanheeringen-lab/genomepy/issues\\n\\n\"\n f\"Error: {e.args[0]}\"\n )\n\n # Add annotation URL to readme\n readme = os.path.join(genomes_dir, localname, \"README.txt\")\n update_readme(readme, updated_metadata={\"annotation url\": link})\n\n def _search_text(self, term: str) -> Iterator[str]:\n \"\"\"check if search term is found in the provider's genome name or description field(s)\"\"\"\n for name, metadata in self.genomes.items():\n if term in lower(name) or any(\n [term in lower(metadata[f]) for f in self.description_fields]\n ):\n yield name\n\n def _search_accession(self, term: str) -> Iterator[str]:\n \"\"\"check if search term is found in the provider's accession field(s)\"\"\"\n # cut off prefix (GCA_/GCF_) and suffix (version numbers, e.g. '.3')\n term = term[4:].split(\".\")[0]\n for name, metadata in self.genomes.items():\n if any([term in str(metadata[f]) for f in self.accession_fields]):\n yield name\n\n def _search_taxonomy(self, term: str) -> Iterator[str]:\n \"\"\"check if search term matches to any of the provider's taxonomy field(s)\"\"\"\n for name, metadata in self.genomes.items():\n if any([term == lower(metadata[f]) for f in self.taxid_fields]):\n yield name\n\n def search(self, term: Union[str, int]):\n \"\"\"\n Search for term in genome names, descriptions and taxonomy ID.\n\n The search is case-insensitive.\n\n Parameters\n ----------\n term : str, int\n Search term, case-insensitive.\n Can be (part of) an assembly name (e.g. hg38),\n scientific name (Danio rerio) or assembly\n accession (`GCA_000146045`/`GCF_`),\n or an exact taxonomy id (7227).\n\n Yields\n ------\n tuples with name and metadata\n \"\"\"\n term = lower(term)\n\n search_function = self._search_text\n if term.startswith((\"gca_\", \"gcf_\")):\n search_function = self._search_accession\n if term.isdigit():\n search_function = self._search_taxonomy\n\n for name in search_function(term):\n yield self._genome_info_tuple(name)\n\n def head_annotation(self, name: str, genomes_dir=None, n: int = 5, **kwargs):\n \"\"\"\n Download the first n lines of the annotation.\n\n The first line of the GTF is printed for review\n (of the gene_name field, for instance).\n\n Parameters\n ----------\n name : str\n genome name\n genomes_dir : str, optional\n genomes directory to install the annotation in.\n n : int, optional\n download the annotation for n genes.\n \"\"\"\n name = self._check_name(name)\n link = self.get_annotation_download_link(name, **kwargs)\n\n localname = f\"{name}_head\"\n genomes_dir = get_genomes_dir(genomes_dir, check_exist=False)\n\n fpath = os.path.join(genomes_dir, localname, f\"{localname}.annotation.gtf\")\n download_annotation(genomes_dir, link, localname, n=n)\n\n logger.info(self.name)\n m = 0\n with open(fpath) as f:\n for line in f:\n line = line.strip()\n if line and line[0] != \"#\":\n print(line)\n m += 1\n if m == n:\n break\n\n\ndef download_annotation(genomes_dir, annot_url, localname, n=None):\n \"\"\"download annotation file, convert to intermediate file and generate output files\"\"\"\n\n # create output directory if missing\n out_dir = os.path.join(genomes_dir, localname)\n mkdir_p(out_dir)\n\n # download to tmp dir. Move genome on completion.\n # tmp dir is in genome_dir to prevent moving the genome between disks\n tmp_dir = mkdtemp(dir=out_dir)\n ext, is_compressed = get_file_info(annot_url)\n\n annot_file = os.path.join(tmp_dir, localname + \".annotation\" + ext)\n tmp_annot_file = os.path.join(tmp_dir, annot_url.split(\"/\")[-1])\n get_file = shutil.copyfile if os.path.exists(annot_url) else download_file\n if n is None:\n get_file(annot_url, tmp_annot_file)\n else:\n download_head(annot_url, tmp_annot_file, n)\n is_compressed = False\n\n # unzip input file (if needed)\n if is_compressed:\n annot_file = extract_archive(tmp_annot_file, outfile=annot_file)\n else:\n shutil.move(tmp_annot_file, annot_file)\n\n # generate intermediate file (GenePred)\n pred_file = annot_file.replace(ext, \".gp\")\n if \"bed\" in ext:\n cmd = \"bedToGenePred {0} {1}\"\n elif \"gff\" in ext:\n # example annotation: GRCh38.p12 from NCBI\n cmd = \"gff3ToGenePred -useName -warnAndContinue {0} {1}\"\n elif \"gtf\" in ext:\n cmd = \"gtfToGenePred -genePredExt -allErrors -ignoreGroupsWithoutExons {0} {1}\"\n elif \"txt\" in ext:\n # UCSC annotations only\n with open(annot_file) as f:\n cols = f.readline().split(\"\\t\")\n\n # extract the genePred format columns\n start_col = 1\n for i, col in enumerate(cols):\n if col in [\"+\", \"-\"]:\n start_col = i - 1\n break\n end_col = start_col + 10\n cmd = (\n f\"\"\"cat {{0}} | cut -f {start_col}-{end_col} | \"\"\"\n # knownGene.txt.gz has spotty fields, this replaces non-integer fields with zeroes\n + \"\"\"awk 'BEGIN {{FS=OFS=\"\\t\"}} !($11 ~ /^[0-9]+$/) {{$11=\"0\"}}1' > {1}\"\"\"\n )\n else:\n raise TypeError(f\"file type extension {ext} not recognized!\")\n\n if n is None and \"gencode\" in annot_url:\n rename_contigs(annot_file)\n\n sp.check_call(cmd.format(annot_file, pred_file), shell=True)\n\n # generate gzipped gtf file (if required)\n gtf_file = annot_file.replace(ext, \".gtf\")\n if \"gtf\" not in ext:\n cmd = \"genePredToGtf -source=genomepy file {0} {1}\"\n sp.check_call(cmd.format(pred_file, gtf_file), shell=True)\n\n # generate gzipped bed file (if required)\n bed_file = annot_file.replace(ext, \".bed\")\n if \"bed\" not in ext:\n cmd = \"genePredToBed {0} {1}\"\n sp.check_call(cmd.format(pred_file, bed_file), shell=True)\n\n # transfer the files from the tmpdir to the genome_dir\n for f in [gtf_file, bed_file]:\n src = f\n dst = os.path.join(out_dir, os.path.basename(f))\n shutil.move(src, dst)\n rm_rf(tmp_dir)\n\n\ndef download_head(annot_url, annot_file, n: int = 5):\n \"\"\"\n Download the first n lines of a (gzipped) file.\n Comment lines are downloaded but do not count towards the line limit.\n\n Parameters\n ----------\n annot_url : str\n url to the file\n annot_file : str\n output filename\n n : int, optional\n number of lines to download\n \"\"\"\n res = urlopen(annot_url)\n if annot_url.endswith(\".gz\"):\n res = gzip.GzipFile(fileobj=res)\n m = 0\n with open(annot_file, \"w\") as f:\n for line in res:\n line = line.decode(\"utf-8\")\n f.write(line)\n if line[0] != \"#\":\n m += 1\n # add a few extra lines to the intermediate file\n if m == n + 2:\n break\n\n\ndef rename_contigs(annot_file):\n genome_dir = os.path.dirname(os.path.dirname(annot_file))\n asm_report = os.path.join(genome_dir, \"assembly_report.txt\")\n gencode2ucsc = pd.read_csv(\n asm_report,\n sep=\"\\t\",\n comment=\"#\",\n usecols=[\"GenBank-Accn\", \"UCSC-style-name\"],\n dtype=str,\n )\n gtf = read_annot(annot_file)\n\n # use the UCSC names for the scaffolds\n newgtf = gtf.merge(\n gencode2ucsc, left_on=\"seqname\", right_on=\"GenBank-Accn\", how=\"left\"\n )\n newgtf[\"seqname\"] = newgtf[\"UCSC-style-name\"].mask(pd.isnull, newgtf[\"seqname\"])\n newgtf.drop(columns=[\"GenBank-Accn\", \"UCSC-style-name\"], inplace=True)\n\n # overwrite the raw GTF\n write_annot(newgtf, annot_file)\n" ]
[ [ "pandas.read_csv" ] ]
0oshowero0/gnn_profile
[ "fafe1ae8da167c1ece51a73921c2f11d54021620" ]
[ "hanzhenyu/ogb/ogbn-arxiv/gat.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport math\nimport os\nimport random\nimport time\n\nimport dgl\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator, MultipleLocator\nfrom ogb.nodeproppred import DglNodePropPredDataset, Evaluator\n\nfrom models import GAT\n\nfrom datetime import datetime\nimport networkx as nx\nfrom torch.profiler import profile, record_function, ProfilerActivity\n\n\nepsilon = 1 - math.log(2)\n\ndevice = None\n\ndataset = \"ogbn-arxiv\"\nn_node_feats, n_classes = 0, 0\n\n\ndef seed(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n dgl.random.seed(seed)\n\n\ndef load_data(dataset):\n global n_node_feats, n_classes\n\n data = DglNodePropPredDataset(name=dataset)\n\n # ##################################################\n # # 统计原图信息\n # origin_graph = data.graph[0]\n # print(origin_graph.num_nodes())\n # print(origin_graph.num_edges())\n # degrees = origin_graph.in_degrees().float()\n # print(float(degrees.mean()))\n # print(float(degrees.max()))\n # print(float(degrees.min()))\n # print(float(degrees.std()))\n # g = origin_graph.to_networkx()\n # i = 0\n # sub_g_node_num = []\n # for subg in nx.connected_components(g.to_undirected()):\n # i += 1\n # sub_g_node_num.append(len(subg))\n # connected_graph_num=i\n # max_connected_node_num=max(sub_g_node_num)\n # print(connected_graph_num)\n # print(max_connected_node_num)\n\n # ##################################################\n\n evaluator = Evaluator(name=dataset)\n\n splitted_idx = data.get_idx_split()\n train_idx, val_idx, test_idx = splitted_idx[\"train\"], splitted_idx[\"valid\"], splitted_idx[\"test\"]\n graph, labels = data[0]\n\n n_node_feats = graph.ndata[\"feat\"].shape[1]\n n_classes = (labels.max() + 1).item()\n\n return graph, labels, train_idx, val_idx, test_idx, evaluator\n\n\ndef preprocess(graph):\n global n_node_feats\n\n # make bidirected\n feat = graph.ndata[\"feat\"]\n graph = dgl.to_bidirected(graph)\n graph.ndata[\"feat\"] = feat\n\n # add self-loop\n print(f\"Total edges before adding self-loop {graph.number_of_edges()}\")\n graph = graph.remove_self_loop().add_self_loop()\n print(f\"Total edges after adding self-loop {graph.number_of_edges()}\")\n\n graph.create_formats_()\n\n # ##################################################\n # # 统计处理后信息\n # degrees = graph.in_degrees().float()\n # print(float(degrees.mean()))\n # print(float(degrees.max()))\n # print(float(degrees.min()))\n # print(float(degrees.std()))\n # g = graph.to_networkx()\n # i = 0\n # sub_g_node_num = []\n # for subg in nx.connected_components(g.to_undirected()):\n # i += 1\n # sub_g_node_num.append(len(subg))\n # connected_graph_num=i\n # max_connected_node_num=max(sub_g_node_num)\n # print(connected_graph_num)\n # print(max_connected_node_num)\n\n # ##################################################\n\n\n return graph\n\n\ndef gen_model(args):\n if args.use_labels:\n n_node_feats_ = n_node_feats + n_classes\n else:\n n_node_feats_ = n_node_feats\n\n model = GAT(\n n_node_feats_,\n n_classes,\n n_hidden=args.n_hidden,\n n_layers=args.n_layers,\n n_heads=args.n_heads,\n activation=F.relu,\n dropout=args.dropout,\n input_drop=args.input_drop,\n attn_drop=args.attn_drop,\n edge_drop=args.edge_drop,\n use_attn_dst=not args.no_attn_dst,\n use_symmetric_norm=args.use_norm,\n )\n\n return model\n\n\ndef custom_loss_function(x, labels):\n y = F.cross_entropy(x, labels[:, 0], reduction=\"none\")\n y = torch.log(epsilon + y) - math.log(epsilon)\n return torch.mean(y)\n\n\ndef add_labels(feat, labels, idx):\n onehot = torch.zeros([feat.shape[0], n_classes], device=device)\n onehot[idx, labels[idx, 0]] = 1\n return torch.cat([feat, onehot], dim=-1)\n\n\ndef adjust_learning_rate(optimizer, lr, epoch):\n if epoch <= 50:\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr * epoch / 50\n\n\ndef train(args, model, graph, labels, train_idx, val_idx, test_idx, optimizer, evaluator):\n model.train()\n\n feat = graph.ndata[\"feat\"]\n\n if args.use_labels:\n mask = torch.rand(train_idx.shape) < args.mask_rate\n\n train_labels_idx = train_idx[mask]\n train_pred_idx = train_idx[~mask]\n\n feat = add_labels(feat, labels, train_labels_idx)\n else:\n mask = torch.rand(train_idx.shape) < args.mask_rate\n\n train_pred_idx = train_idx[mask]\n\n optimizer.zero_grad()\n begin_forward_time = time.time()\n with record_function('forward'):\n pred = model(graph, feat)\n end_forward_time = time.time()\n\n if args.n_label_iters > 0:\n unlabel_idx = torch.cat([train_pred_idx, val_idx, test_idx])\n for _ in range(args.n_label_iters):\n pred = pred.detach()\n torch.cuda.empty_cache()\n feat[unlabel_idx, -n_classes:] = F.softmax(pred[unlabel_idx], dim=-1)\n pred = model(graph, feat)\n\n loss = custom_loss_function(pred[train_pred_idx], labels[train_pred_idx])\n begin_backward_time = time.time()\n with record_function('backward'):\n loss.backward()\n optimizer.step()\n end_backward_time = time.time()\n return evaluator(pred[train_idx], labels[train_idx]), loss.item(), end_forward_time - begin_forward_time,end_backward_time - begin_backward_time\n\n\[email protected]_grad()\ndef evaluate(args, model, graph, labels, train_idx, val_idx, test_idx, evaluator):\n model.eval()\n\n feat = graph.ndata[\"feat\"]\n\n if args.use_labels:\n feat = add_labels(feat, labels, train_idx)\n\n pred = model(graph, feat)\n\n if args.n_label_iters > 0:\n unlabel_idx = torch.cat([val_idx, test_idx])\n for _ in range(args.n_label_iters):\n feat[unlabel_idx, -n_classes:] = F.softmax(pred[unlabel_idx], dim=-1)\n pred = model(graph, feat)\n\n train_loss = custom_loss_function(pred[train_idx], labels[train_idx])\n val_loss = custom_loss_function(pred[val_idx], labels[val_idx])\n test_loss = custom_loss_function(pred[test_idx], labels[test_idx])\n\n return (\n evaluator(pred[train_idx], labels[train_idx]),\n evaluator(pred[val_idx], labels[val_idx]),\n evaluator(pred[test_idx], labels[test_idx]),\n train_loss,\n val_loss,\n test_loss,\n pred,\n )\n\n\ndef run(args, graph, labels, train_idx, val_idx, test_idx, evaluator, n_running):\n evaluator_wrapper = lambda pred, labels: evaluator.eval(\n {\"y_pred\": pred.argmax(dim=-1, keepdim=True), \"y_true\": labels}\n )[\"acc\"]\n\n # define model and optimizer\n model = gen_model(args).to(device)\n optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n # training loop\n total_time = 0\n best_val_acc, final_test_acc, best_val_loss = 0, 0, float(\"inf\")\n final_pred = None\n\n accs, train_accs, val_accs, test_accs = [], [], [], []\n losses, train_losses, val_losses, test_losses = [], [], [], []\n total_forward_time = 0\n total_backward_time = 0\n for epoch in range(1, args.n_epochs + 1):\n tic = time.time()\n\n adjust_learning_rate(optimizer, args.lr, epoch)\n\n acc, loss, forwardt, backwardt = train(args, model, graph, labels, train_idx, val_idx, test_idx, optimizer, evaluator_wrapper)\n\n total_forward_time += forwardt\n total_backward_time += backwardt\n\n train_acc, val_acc, test_acc, train_loss, val_loss, test_loss, pred = evaluate(\n args, model, graph, labels, train_idx, val_idx, test_idx, evaluator_wrapper\n )\n\n toc = time.time()\n total_time += toc - tic\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_val_acc = val_acc\n final_test_acc = test_acc\n final_pred = pred\n\n if epoch == args.n_epochs or epoch % args.log_every == 0:\n print(\n f\"Run: {n_running}/{args.n_runs}, Epoch: {epoch}/{args.n_epochs}, Average epoch time: {total_time / epoch:.2f}\\n\"\n f\"Loss: {loss:.4f}, Acc: {acc:.4f}\\n\"\n f\"Train/Val/Test loss: {train_loss:.4f}/{val_loss:.4f}/{test_loss:.4f}\\n\"\n f\"Train/Val/Test/Best val/Final test acc: {train_acc:.4f}/{val_acc:.4f}/{test_acc:.4f}/{best_val_acc:.4f}/{final_test_acc:.4f}\"\n )\n\n for l, e in zip(\n [accs, train_accs, val_accs, test_accs, losses, train_losses, val_losses, test_losses],\n [acc, train_acc, val_acc, test_acc, loss, train_loss, val_loss, test_loss],\n ):\n l.append(e)\n\n print(\"*\" * 50)\n print(f\"Best val acc: {best_val_acc}, Final test acc: {final_test_acc}\")\n print(\"*\" * 50)\n\n print('Avg Forward Time: ' + str(total_forward_time/args.n_epochs))\n print('Avg Backward Time: ' + str(total_backward_time/args.n_epochs))\n\n # plot learning curves\n if args.plot_curves:\n fig = plt.figure(figsize=(24, 24))\n ax = fig.gca()\n ax.set_xticks(np.arange(0, args.n_epochs, 100))\n ax.set_yticks(np.linspace(0, 1.0, 101))\n ax.tick_params(labeltop=True, labelright=True)\n for y, label in zip([accs, train_accs, val_accs, test_accs], [\"acc\", \"train acc\", \"val acc\", \"test acc\"]):\n plt.plot(range(args.n_epochs), y, label=label, linewidth=1)\n ax.xaxis.set_major_locator(MultipleLocator(100))\n ax.xaxis.set_minor_locator(AutoMinorLocator(1))\n ax.yaxis.set_major_locator(MultipleLocator(0.01))\n ax.yaxis.set_minor_locator(AutoMinorLocator(2))\n plt.grid(which=\"major\", color=\"red\", linestyle=\"dotted\")\n plt.grid(which=\"minor\", color=\"orange\", linestyle=\"dotted\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(f\"gat_acc_{n_running}.png\")\n\n fig = plt.figure(figsize=(24, 24))\n ax = fig.gca()\n ax.set_xticks(np.arange(0, args.n_epochs, 100))\n ax.tick_params(labeltop=True, labelright=True)\n for y, label in zip(\n [losses, train_losses, val_losses, test_losses], [\"loss\", \"train loss\", \"val loss\", \"test loss\"]\n ):\n plt.plot(range(args.n_epochs), y, label=label, linewidth=1)\n ax.xaxis.set_major_locator(MultipleLocator(100))\n ax.xaxis.set_minor_locator(AutoMinorLocator(1))\n ax.yaxis.set_major_locator(MultipleLocator(0.1))\n ax.yaxis.set_minor_locator(AutoMinorLocator(5))\n plt.grid(which=\"major\", color=\"red\", linestyle=\"dotted\")\n plt.grid(which=\"minor\", color=\"orange\", linestyle=\"dotted\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(f\"gat_loss_{n_running}.png\")\n\n if args.save_pred:\n os.makedirs(\"./output\", exist_ok=True)\n torch.save(F.softmax(final_pred, dim=1), f\"./output/{n_running}.pt\")\n\n return best_val_acc, final_test_acc\n\n\ndef count_parameters(args):\n model = gen_model(args)\n return sum([p.numel() for p in model.parameters() if p.requires_grad])\n\n\ndef main():\n global device, n_node_feats, n_classes, epsilon\n\n argparser = argparse.ArgumentParser(\n \"GAT implementation on ogbn-arxiv\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n argparser.add_argument(\"--cpu\", action=\"store_true\", help=\"CPU mode. This option overrides --gpu.\")\n argparser.add_argument(\"--gpu\", type=int, default=0, help=\"GPU device ID.\")\n argparser.add_argument(\"--seed\", type=int, default=0, help=\"seed\")\n argparser.add_argument(\"--n-runs\", type=int, default=10, help=\"running times\")\n argparser.add_argument(\"--n-epochs\", type=int, default=2000, help=\"number of epochs\")\n argparser.add_argument(\n \"--use-labels\", action=\"store_true\", help=\"Use labels in the training set as input features.\"\n )\n argparser.add_argument(\"--n-label-iters\", type=int, default=0, help=\"number of label iterations\")\n argparser.add_argument(\"--mask-rate\", type=float, default=0.5, help=\"mask rate\")\n argparser.add_argument(\"--no-attn-dst\", action=\"store_true\", help=\"Don't use attn_dst.\")\n argparser.add_argument(\"--use-norm\", action=\"store_true\", help=\"Use symmetrically normalized adjacency matrix.\")\n argparser.add_argument(\"--lr\", type=float, default=0.002, help=\"learning rate\")\n argparser.add_argument(\"--n-layers\", type=int, default=3, help=\"number of layers\")\n argparser.add_argument(\"--n-heads\", type=int, default=3, help=\"number of heads\")\n argparser.add_argument(\"--n-hidden\", type=int, default=250, help=\"number of hidden units\")\n argparser.add_argument(\"--dropout\", type=float, default=0.75, help=\"dropout rate\")\n argparser.add_argument(\"--input-drop\", type=float, default=0.1, help=\"input drop rate\")\n argparser.add_argument(\"--attn-drop\", type=float, default=0.0, help=\"attention drop rate\")\n argparser.add_argument(\"--edge-drop\", type=float, default=0.0, help=\"edge drop rate\")\n argparser.add_argument(\"--wd\", type=float, default=0, help=\"weight decay\")\n argparser.add_argument(\"--log-every\", type=int, default=20, help=\"log every LOG_EVERY epochs\")\n argparser.add_argument(\"--plot-curves\", action=\"store_true\", help=\"plot learning curves\")\n argparser.add_argument(\"--save-pred\", action=\"store_true\", help=\"save final predictions\")\n args = argparser.parse_args()\n\n if not args.use_labels and args.n_label_iters > 0:\n raise ValueError(\"'--use-labels' must be enabled when n_label_iters > 0\")\n\n if args.cpu:\n device = torch.device(\"cpu\")\n else:\n device = torch.device(f\"cuda:{args.gpu}\")\n\n # load data & preprocess\n begin_load_data_time = datetime.now()\n graph, labels, train_idx, val_idx, test_idx, evaluator = load_data(dataset)\n end_load_data_time = datetime.now()\n print('Load Data in: '+str((end_load_data_time-begin_load_data_time).total_seconds()))\n \n begin_pre_process_data_time = datetime.now()\n graph = preprocess(graph)\n end_pre_process_data_time = datetime.now()\n print('Pre-process Data in: '+str((end_pre_process_data_time-begin_pre_process_data_time).total_seconds()))\n\n\n graph, labels, train_idx, val_idx, test_idx = map(\n lambda x: x.to(device), (graph, labels, train_idx, val_idx, test_idx)\n )\n\n # run\n val_accs, test_accs = [], []\n\n for i in range(args.n_runs):\n seed(args.seed + i)\n val_acc, test_acc = run(args, graph, labels, train_idx, val_idx, test_idx, evaluator, i + 1)\n val_accs.append(val_acc)\n test_accs.append(test_acc)\n\n print(args)\n print(f\"Runned {args.n_runs} times\")\n print(\"Val Accs:\", val_accs)\n print(\"Test Accs:\", test_accs)\n print(f\"Average val accuracy: {np.mean(val_accs)} ± {np.std(val_accs)}\")\n print(f\"Average test accuracy: {np.mean(test_accs)} ± {np.std(test_accs)}\")\n print(f\"Number of params: {count_parameters(args)}\")\n\n\nif __name__ == \"__main__\":\n #main()\n\n with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],with_stack=True, record_shapes=True, use_cuda=True) as p:\n main()\n p.export_chrome_trace('profile_gat-ogbn-arxiv.json')\n print(p.key_averages().table(sort_by=\"cuda_time_total\"))\n\n\n\n# Namespace(attn_drop=0.0, cpu=False, dropout=0.75, edge_drop=0.1, gpu=0, input_drop=0.1, log_every=20, lr=0.002, n_epochs=2000, n_heads=3, n_hidden=250, n_label_iters=0, n_layers=3, n_runs=10, no_attn_dst=True, plot_curves=True, use_labels=True, use_norm=True, wd=0)\n# Runned 10 times\n# Val Accs: [0.7492868888217725, 0.7524413570925199, 0.7505620993993087, 0.7500251686298198, 0.7501929594952851, 0.7513003792073559, 0.7516695191113796, 0.7505285412262156, 0.7504949830531226, 0.7515017282459143]\n# Test Accs: [0.7366829208073575, 0.7384112091846182, 0.7368886694236981, 0.7345019854741477, 0.7373001666563792, 0.7362508487130424, 0.7352221056313396, 0.736477172191017, 0.7380614365368393, 0.7362919984363105]\n# Average val accuracy: 0.7508003624282694 ± 0.0008760483047616948\n# Average test accuracy: 0.736608851305475 ± 0.0011192876013651112\n# Number of params: 1441580\n\n# Namespace(attn_drop=0.0, cpu=False, dropout=0.75, edge_drop=0.3, gpu=0, input_drop=0.25, log_every=20, lr=0.002, n_epochs=2000, n_heads=3, n_hidden=250, n_label_iters=1, n_layers=3, n_runs=10, no_attn_dst=True, plot_curves=True, use_labels=True, use_norm=True, wd=0)\n# Runned 20 times\n# Val Accs: [0.7529782878620088, 0.7521393335346823, 0.7521728917077755, 0.7504949830531226, 0.7518037518037518, 0.7518373099768448, 0.7516359609382866, 0.7511325883418907, 0.7509312393033323, 0.7515017282459143, 0.7511325883418907, 0.7514346118997282, 0.7509312393033323, 0.7521393335346823, 0.7528776133427296, 0.7522735662270545, 0.7504949830531226, 0.7522735662270545, 0.7511661465149837, 0.7501258431490989]\n# Test Accs: [0.7390901796185421, 0.7398720243606361, 0.7394605271279551, 0.7384523589078863, 0.7388638561405675, 0.7397280003291978, 0.7414151389831903, 0.7376499393041582, 0.7399748986688065, 0.7400366232537087, 0.7392547785116145, 0.7388844310022015, 0.7374853404110857, 0.7384317840462523, 0.7418677859391396, 0.737937987367035, 0.7381643108450096, 0.7399543238071724, 0.7377322387506944, 0.7385758080776906]\n# Average val accuracy: 0.7515738783180644 ± 0.0007617982474634186\n# Average test accuracy: 0.7391416167726272 ± 0.0011522198067958794\n# Number of params: 1441580\n" ]
[ [ "torch.mean", "matplotlib.pyplot.legend", "torch.nn.functional.softmax", "matplotlib.ticker.MultipleLocator", "numpy.linspace", "torch.zeros", "torch.cat", "matplotlib.ticker.AutoMinorLocator", "torch.no_grad", "numpy.mean", "torch.cuda.manual_seed_all", "torch.device", "torch.profiler.record_function", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.std", "torch.rand", "matplotlib.pyplot.figure", "torch.profiler.profile", "matplotlib.pyplot.savefig", "torch.cuda.empty_cache", "torch.log", "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.nn.functional.cross_entropy", "matplotlib.pyplot.grid" ] ]
infinitemugen/genrl
[ "602587417ce167380c90a726764a3efa4643dc38" ]
[ "genrl/deep/common/noise.py" ]
[ "from abc import ABC, abstractmethod\n\nimport numpy as np\n\n\nclass ActionNoise(ABC):\n \"\"\"\n Base class for Action Noise\n\n :param mean: Mean of noise distribution\n :param std: Standard deviation of noise distribution\n :type mean: float\n :type std: float\n \"\"\"\n\n def __init__(self, mean: float, std: float):\n # super().__init__(mean, std)\n self._mean = mean\n self._std = std\n\n @abstractmethod\n def __call__(self) -> None:\n raise NotImplementedError\n\n @property\n def mean(self) -> float:\n \"\"\"\n Returns mean of noise distribution\n \"\"\"\n return self._mean\n\n @property\n def std(self) -> float:\n \"\"\"\n Returns standard deviation of noise distribution\n \"\"\"\n return self._std\n\n\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n Normal implementation of Action Noise\n\n :param mean: Mean of noise distribution\n :param std: Standard deviation of noise distribution\n :type mean: float\n :type std: float\n \"\"\"\n\n def __init__(self, mean: float, std: float):\n super(NormalActionNoise, self).__init__(mean, std)\n\n def __call__(self) -> float:\n \"\"\"\n Return action noise randomly sampled from noise distribution\n \"\"\"\n return np.random.normal(self._mean, self._std)\n\n def reset(self) -> None:\n pass\n\n\nclass OrnsteinUhlenbeckActionNoise(ActionNoise):\n \"\"\"\n Ornstein Uhlenbeck implementation of Action Noise\n\n :param mean: Mean of noise distribution\n :param std: Standard deviation of noise distribution\n :param theta: Parameter used to solve the Ornstein Uhlenbeck process\n :param dt: Small parameter used to solve the Ornstein Uhlenbeck process\n :param initial_noise: Initial noise distribution\n :type mean: float\n :type std: float\n :type theta: float\n :type dt: float\n :type initial_noise: Numpy array\n \"\"\"\n\n def __init__(\n self,\n mean: float,\n std: float,\n theta: float = 0.15,\n dt: float = 1e-2,\n initial_noise: np.ndarray = None,\n ):\n super(OrnsteinUhlenbeckActionNoise, self).__init__(mean, std)\n self._theta = theta\n self._mean = mean\n self._std = std\n self._dt = dt\n self._initial_noise = initial_noise\n self.noise_prev = None\n self.reset()\n\n def __call__(self) -> float:\n \"\"\"\n Return action noise randomly sampled from noise distribution \\\naccording to the Ornstein Uhlenbeck process\n \"\"\"\n noise = (\n self.noise_prev\n + self._theta * (self._mean - self.noise_prev) * self._dt\n + (self._std * np.sqrt(self._dt) * np.random.normal(size=self._mean.shape))\n )\n self.noise_prev = noise\n return noise\n\n def reset(self) -> None:\n \"\"\"\n Reset the initial noise value for the noise distribution sampling\n \"\"\"\n self.noise_prev = (\n self._initial_noise\n if self._initial_noise is not None\n else np.zeros_like(self._mean)\n )\n" ]
[ [ "numpy.random.normal", "numpy.zeros_like", "numpy.sqrt" ] ]
Yihao-Sun/pytorch-mopo
[ "33a81aae8b221de01007e7b27a8a02ee19c27eb3" ]
[ "models/tf_dynamics_models/constructor.py" ]
[ "import numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_eager_execution()\nimport pdb\n\nfrom models.tf_dynamics_models.fc import FC\nfrom models.tf_dynamics_models.bnn import BNN\n\ndef construct_model(obs_dim=11, act_dim=3, rew_dim=1, hidden_dim=200, num_networks=7,\n\t\t\t\t\tnum_elites=5, session=None, model_type='mlp', separate_mean_var=False,\n\t\t\t\t\tname=None, load_dir=None, deterministic=False):\n\tif name is None:\n\t\tname = 'BNN'\n\tprint('[ BNN ] Name {} | Observation dim {} | Action dim: {} | Hidden dim: {}'.format(name, obs_dim, act_dim, hidden_dim))\n\tparams = {'name': name, 'num_networks': num_networks, 'num_elites': num_elites,\n\t\t\t\t'sess': session, 'separate_mean_var': separate_mean_var, 'deterministic': deterministic}\n\n\tif load_dir is not None:\n\t\tprint('Specified load dir', load_dir)\n\t\tparams['model_dir'] = load_dir\n\n\tmodel = BNN(params)\n\n\tif not model.model_loaded:\n\t\tif model_type == 'identity':\n\t\t\treturn\n\t\telif model_type == 'linear':\n\t\t\tprint('[ BNN ] Training linear model')\n\t\t\tmodel.add(FC(obs_dim+rew_dim, input_dim=obs_dim+act_dim, weight_decay=0.000025))\n\t\telif model_type == 'mlp':\n\t\t\tprint('[ BNN ] Training non-linear model | Obs: {} | Act: {} | Rew: {}'.format(obs_dim, act_dim, rew_dim))\n\t\t\tmodel.add(FC(hidden_dim, input_dim=obs_dim+act_dim, activation=\"swish\", weight_decay=0.000025))\n\t\t\tmodel.add(FC(hidden_dim, activation=\"swish\", weight_decay=0.00005))\n\t\t\tmodel.add(FC(hidden_dim, activation=\"swish\", weight_decay=0.000075))\n\t\t\tmodel.add(FC(hidden_dim, activation=\"swish\", weight_decay=0.000075))\n\t\t\tmodel.add(FC(obs_dim+rew_dim, weight_decay=0.0001))\n\t\t\tif separate_mean_var:\n\t\t\t\tmodel.add(FC(obs_dim+rew_dim, input_dim=hidden_dim, weight_decay=0.0001), var_layer=True)\n\n\tif load_dir is not None:\n\t\tmodel.model_loaded = True\n\n\tmodel.finalize(tf.train.AdamOptimizer, {\"learning_rate\": 0.001})\n\tprint('[ BNN ] Model: {}'.format(model))\n\treturn model\n\ndef format_samples_for_training(samples):\n\tobs = samples['observations']\n\tact = samples['actions']\n\tnext_obs = samples['next_observations']\n\trew = samples['rewards']\n\tdelta_obs = next_obs - obs\n\tinputs = np.concatenate((obs, act), axis=-1)\n\toutputs = np.concatenate((rew, delta_obs), axis=-1)\n\treturn inputs, outputs\n\ndef reset_model(model):\n\tmodel_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=model.name)\n\tmodel.sess.run(tf.initialize_vars(model_vars))\n\nif __name__ == '__main__':\n\tmodel = construct_model()\n" ]
[ [ "numpy.concatenate", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.initialize_vars", "tensorflow.compat.v1.disable_eager_execution" ] ]
lnicoletti/EPA1361-G21
[ "e8dce7169101fd620a3e36f7c38df7f4db598a69" ]
[ "epa1361_open/final assignment/funs_generate_network.py" ]
[ "from __future__ import division, unicode_literals, print_function\n\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\nfrom funs_dikes import Lookuplin # @UnresolvedImport\n\n\ndef to_dict_dropna(data):\n return dict((str(k), v.dropna().to_dict())\n for k, v in pd.compat.iteritems(data))\n\n\ndef get_network(plann_steps_max=10):\n ''' Build network uploading crucial parameters '''\n\n # Upload dike info\n df = pd.read_excel('./data/dikeIjssel.xlsx', dtype=object)\n df = df.set_index('NodeName')\n\n nodes = df.to_dict('index')\n\n # Create network out of dike info\n G = nx.MultiDiGraph()\n for key, attr in nodes.items():\n G.add_node(key, **attr)\n\n # Select dike type nodes\n branches = df['branch'].dropna().unique()\n dike_list = df['type'][df['type'] == 'dike'].index.values\n dike_branches = {k: df[df['branch'] == k].index.values\n for k in branches}\n\n # Upload fragility curves:\n frag_curves = pd.read_excel('./data/fragcurves/frag_curves.xlsx',\n header=None, index_col=0).transpose()\n calibration_factors = pd.read_excel('./data/fragcurves/calfactors_pf1250.xlsx',\n index_col=0)\n\n # Upload room for the river projects:\n steps = np.array(range(plann_steps_max))\n \n projects = pd.read_excel('./data/rfr_strategies.xlsx', index_col=0,\n names=['project name', 0,1,2,3,4])\n \n for n in steps:\n G.add_node('RfR_projects {}'.format(n), **to_dict_dropna(projects))\n G.node['RfR_projects {}'.format(n)]['type'] = 'measure'\n\n G.add_node('discount rate {}'.format(n), **{'value': 0})\n\n # Upload evacuation policies:\n G.add_node('EWS', **pd.read_excel('./data/EWS.xlsx').to_dict())\n G.node['EWS']['type'] = 'measure'\n\n # Upload muskingum params:\n Muskingum_params = pd.read_excel('./data/Muskingum/params.xlsx',\n index_col=0)\n\n # Fill network with crucial info:\n for dike in dike_list:\n # Assign fragility curves, assuming it's the same shape for every\n # location\n dikeid = 50001010\n G.node[dike]['f'] = np.column_stack((frag_curves.loc[:, 'wl'].values,\n frag_curves.loc[:, dikeid].values))\n # Adjust fragility curves\n G.node[dike]['f'][:, 0] += calibration_factors.loc[dike].values\n\n # Determine the level of the dike\n G.node[dike]['dikelevel'] = Lookuplin(G.node[dike]['f'], 1, 0, 0.5)\n\n # Assign stage-discharge relationships\n filename = './data/rating_curves/{}_ratingcurve_new.txt'.format(dike)\n G.node[dike]['r'] = np.loadtxt(filename)\n\n # Assign losses per location:\n name = './data/losses_tables/{}_lossestable.xlsx'.format(dike)\n G.node[dike]['table'] = pd.read_excel(name, index_col=0).values\n\n # Assign Muskingum paramters:\n G.node[dike]['C1'] = Muskingum_params.loc[G.node[dike]['prec_node'], 'C1']\n G.node[dike]['C2'] = Muskingum_params.loc[G.node[dike]['prec_node'], 'C2']\n G.node[dike]['C3'] = Muskingum_params.loc[G.node[dike]['prec_node'], 'C3']\n \n # The plausible 133 upstream wave-shapes:\n G.node['A.0']['Qevents_shape'] = pd.read_excel(\n './data/hydrology/wave_shapes.xls', index_col=0)\n\n return G, dike_list, dike_branches, steps\n" ]
[ [ "pandas.read_excel", "pandas.compat.iteritems", "numpy.loadtxt", "numpy.column_stack" ] ]
syedraza2/Variational_Quantum_Embedding
[ "780d6a976c4a3d8912df93d20335a6d4a15481ca" ]
[ "generate_data.py" ]
[ "import numpy as np\n\ndef generate_data(finename_X, finename_Y, number_of_data):\n\tX = []\n\tY = []\n\tfor i in range(40):\n\t x =[]\n\t y = 0\n\t r1 = np.random.uniform(0,1)\n\t if r1 < 0.5:\n\t x.append(np.random.uniform(-1.0,1.0))\n\t x.append(np.random.uniform(-1.0,1.0))\n\t y = -1.0\n\t else:\n\t r2 = np.random.uniform(0,1)\n\t y = 1.0\n\t if r2 < 0.5:\n\t x.append(np.random.uniform(-2.0,-1.0))\n\t x.append(np.random.uniform(-2.0,-1.0))\n\t else:\n\t x.append(np.random.uniform(1.0,2.0))\n\t x.append(np.random.uniform(1.0,2.0))\n\t X.append(x)\n\t Y.append(y)\n\t \n\t np.savetxt('./data/{}'.format(finename_X), X)\n\t np.savetxt('./data/{}'.format(finename_Y), Y)\n" ]
[ [ "numpy.random.uniform" ] ]
CodingOfZero/Detection
[ "4196d364b7ee8de928c6e6fc92473bce0f5e8628" ]
[ "demo.py" ]
[ "#!/usr/bin/env python\n\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"\nDemo script showing detections in sample images.\n\nSee README.md for installation instructions before running.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport socket\nimport time\n\nfrom lib.utils.nms_wrapper import nms\nfrom lib.utils.test import im_detect\n#from nets.resnet_v1 import resnetv1\nfrom lib.nets.vgg16 import vgg16\nfrom lib.utils.timer import Timer\nfrom lib.config import config as cfgx\n\nCLASSES = ('__background__',\n 'aokeng', 'huaheng', 'cashang', 'quebian',\n 'tuqi','ganbu' )\n\n\nNETS = {'vgg16': ('vgg16_faster_rcnn_iter_40000.ckpt',), 'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}\nDATASETS = {'pascal_voc': ('voc_2007_trainval',), 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}\n\nsaveImgPath='result.jpg'\n\ndef vis_detections(im, class_name, dets, inds):\n \"\"\"Draw detected bounding boxes.\"\"\"\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255),1)\n cv2.putText(im,class_name+\":\"+ str('%.3f' % score), (int(bbox[0]),int(bbox[1]-10)),cv2.FONT_HERSHEY_COMPLEX,0.6 , (0, 255, 0),1)\n\ndef drawDefect(im,scores,boxes,conf_thresh=0.6,nms_thresh=0.2):\n \"\"\"\n #缺陷画框\n :param scores:\n :param boxes:\n :param conf_thresh:\n :param nms_thresh:非极大值抑制\n :return:\n \"\"\"\n # CONF_THRESH = 0.6\n # NMS_THRESH = 0.2 # 非极大值抑制\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32) # 水平方向堆叠\n keep = nms(dets, nms_thresh)\n dets = dets[keep, :]\n # print(dets.shape)\n\n inds = np.where(dets[:, -1] >= conf_thresh)[0] # 大于阈值的缺陷索引index\n if len(inds) > 0: # 缺陷\n vis_detections(im, cls, dets, inds) # 画框\n\ndef getResult(scores, boxes,conf_thresh=0.6,nms_thresh=0.2):\n\n result = [0 for _ in range(len(CLASSES))] #初始,生成全0列表\n\n # Detect all object classes and regress object bounds\n\n # Visualize detections for each class\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32) # 水平方向堆叠\n keep = nms(dets, nms_thresh)\n dets = dets[keep, :]\n # print(dets.shape)\n\n inds = np.where(dets[:, -1] >= conf_thresh)[0] # 大于阈值的缺陷索引index\n # if len(inds) > 0: # 缺陷\n # vis_detections(im, cls, dets,inds) #画框\n result[cls_ind] = len(inds)\n return result\n\ndef demo(sess, net):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n # im_file = os.path.join(cfg.FLAGS2[\"data_dir\"], 'demo', image_name)\n CONF_THRESH = 0.6\n NMS_THRESH = 0.1 # 非极大值抑制\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((\"localhost\", 8888))\n server.listen(1) # tcp连接队列的大小,即连接数\n im_names = ['K:/39.jpg', 'K:/40.jpg', 'K:/41.jpg', 'K:/42.jpg',\n 'K:/43.jpg', 'K:/44.jpg']\n im=cv2.imread(\"init.jpg\") #目的是为了初始化相关变量,避免首次检测延时过大\n scores, boxes = im_detect(sess, net, im)\n print(\"wait connecting...\")\n while True:\n connection, address = server.accept() #阻塞,等待连接\n print(connection, address)\n recv_str = connection.recv(1024)\n recv_str = recv_str.decode(\"ascii\")\n if not recv_str:\n continue\n print(recv_str)\n im_name =recv_str #im_names[int(recv_str)] #'G:/40.jpg' #\n # saveImgPath+=im_name\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('Demo for {}'.format(im_name))\n timer = Timer()\n timer.tic()\n im = cv2.imread(im_name)\n try:\n im.shape\n except:\n print('fail to read '+im_name)\n connection.send(bytes(\"0000000\", encoding=\"ascii\"))\n connection.close()\n continue\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n result=getResult(scores, boxes,CONF_THRESH,NMS_THRESH)\n\n #向cpp发送结果\n strResult=map(str,result) #int列表转str列表\n back_str=\"\".join(strResult)\n print(back_str)\n connection.send(bytes(\"%s\" % back_str , encoding=\"ascii\"))\n\n #保存图像等操作\n # timer2 = Timer()\n # timer2.tic()\n\n drawDefect(im,scores, boxes, CONF_THRESH, NMS_THRESH)\n cv2.imwrite(saveImgPath, im)\n\n # timer2.toc()\n # print(\"保存图像耗时:\"+str(timer2.total_time))\n # connection.send(bytes(\"saved\" , encoding=\"ascii\")) #反馈图像保存成功\n connection.close()\n time.sleep(2)\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')\n parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',\n choices=NETS.keys(), default='vgg16')\n parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',\n choices=DATASETS.keys(), default='pascal_voc')\n args = parser.parse_args()\n return args\n\n# def sktWithCpp():\n# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# server.bind((\"localhost\", 8888))\n# server.listen(0)\n# connection, address = server.accept()\n# print(connection, address)\n# num = 0\n# while True:\n# # connection, address = server.accept()\n# # print(connection, address)\n#\n# recv_str = connection.recv(1024)[0:5]\n# print(\"enter 1\")\n# recv_str = recv_str.decode(\"ascii\")\n# if not recv_str:\n# break\n# num = num + 1\n# print(recv_str, num)\n#\n# connection.send(bytes(\"clientRecv: %s,\" % recv_str, encoding=\"ascii\"))\n# time.sleep(0.5)\n#\n# connection.close()\n# input(\"enter end\")\n\nif __name__ == '__main__':\n args = parse_args()\n\n # model path\n demonet = args.demo_net\n dataset = args.dataset\n\n tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0])\n\n if not os.path.isfile(tfmodel + '.meta'):\n print(tfmodel)\n raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(tfmodel + '.meta'))\n\n # set config\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n\n # init session\n sess = tf.Session(config=tfconfig)\n # load network\n\n if demonet == 'vgg16':\n net = vgg16(batch_size=1)\n # elif demonet == 'res101':\n # net = resnetv1(batch_size=1, num_layers=101)\n else:\n raise NotImplementedError\n\n net.create_architecture(sess, \"TEST\", len(CLASSES),\n tag='default', anchor_scales=[8, 16, 32])\n saver = tf.train.Saver()\n saver.restore(sess, tfmodel)\n print('Loaded network {:s}'.format(tfmodel))\n demo(sess, net )\n\n" ]
[ [ "numpy.hstack", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.train.Saver", "numpy.where" ] ]
FENGShuanglang/CPFNet_Project
[ "57f7455f008841555eaffef61945ba606445fc0f" ]
[ "OCT/CPFNet/dataset/PiFu.py" ]
[ "import torch\nimport glob\nimport os\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as F\n#import cv2\nfrom PIL import Image\n# import pandas as pd\nimport numpy as np\nfrom imgaug import augmenters as iaa\nimport imgaug as ia\n#from utils import get_label_info, one_hot_it\nimport random\n\n\ndef augmentation():\n # augment images with spatial transformation: Flip, Affine, Rotation, etc...\n # see https://github.com/aleju/imgaug for more details\n pass\n\ndef augmentation_pixel():\n # augment images with pixel intensity transformation: GaussianBlur, Multiply, etc...\n pass\n\nclass PiFu(torch.utils.data.Dataset):\n def __init__(self, dataset_path,scale,k_fold_test=1, mode='train'):\n super().__init__()\n self.mode = mode\n self.img_path=dataset_path+'/img'\n self.mask_path=dataset_path+'/mask'\n self.image_lists,self.label_lists=self.read_list(self.img_path,k_fold_test=k_fold_test)\n self.fliplr = iaa.Fliplr(0.5)\n # resize\n self.resize_label = transforms.Resize(scale, Image.NEAREST)\n self.resize_img = transforms.Resize(scale, Image.BILINEAR)\n # normalization\n self.to_tensor = transforms.ToTensor()\n\n def __getitem__(self, index):\n # load image and crop\n img = Image.open(self.image_lists[index])\n\n # img = self.resize_img(img)\n img = np.array(img)\n labels=self.label_lists[index]\n #load label\n if self.mode !='test':\n label = Image.open(self.label_lists[index])\n # label = self.resize_label(label)\n label = np.array(label) \n \n # augment image and label\n if self.mode == 'train':\n seq_det = self.fliplr.to_deterministic()#固定变换\n img = seq_det.augment_image(img)\n label = seq_det.augment_image(label)\n # print(np.min(label))\n\n label=np.reshape(label,label.shape+(1,))\n label=self.to_tensor(label.copy())\n\n labels=label\n # print(self.mode)\n\n img = self.to_tensor(img.copy()).float()\n return img, labels\n\n def __len__(self):\n return len(self.image_lists)\n def read_list(self,image_path,k_fold_test=1):\n fold=sorted(os.listdir(image_path))\n # print(fold)\n os.listdir()\n img_list=[]\n if self.mode=='train':\n fold_r=fold\n fold_r.remove('f'+str(k_fold_test))# remove testdata\n for item in fold_r:\n img_list+=glob.glob(os.path.join(image_path,item)+'/*.jpg')\n # print(len(img_list))\n label_list=[x.replace('img','mask').split('.')[0]+'_segmentation.png' for x in img_list]\n elif self.mode=='val' or self.mode=='test':\n fold_s=fold[k_fold_test-1]\n img_list=glob.glob(os.path.join(image_path,fold_s)+'/*.jpg')\n label_list=[x.replace('img','mask').split('.')[0]+'_segmentation.png' for x in img_list]\n return img_list,label_list\n\n \n\n\n\n\n\n# if __name__ == '__main__':\n# data = PiFu('/home/FENGsl/BiSeNet/dataset/path/to/PiFu', (512, 512),mode='train')\n# from model.build_BiSeNet import BiSeNet\n# from utils import reverse_one_hot, get_label_info, colour_code_segmentation, compute_global_accuracy\n#\n# for i, (img, label) in enumerate(data):\n#\n# print(label.shape)\n# print(img.shape)\n \n\n" ]
[ [ "numpy.reshape", "numpy.array" ] ]
jherzberg/article-tagging
[ "769b06061502af1517af359ea8adee51aede4fa5" ]
[ "lib/tagnews/crimetype/benchmark.py" ]
[ "from __future__ import division, print_function\n\nimport numpy as np\nimport pandas as pd\n\n\ndef get_kfold_split(N, k=4):\n \"\"\"\n Create groups used for k-fold cross validation.\n\n Parameters\n ----------\n N : number of samples to split\n k : number of groups used for cross validation\n\n Returns\n -------\n List of (index_train, index_test) pairs\n \"\"\"\n np.random.seed(2017)\n idx = np.random.permutation(N)\n index_pairs = [(np.ones(N).astype(np.bool),\n np.zeros(N).astype(np.bool))\n for _ in range(k)]\n\n for i, fold_idx in enumerate(np.array_split(idx, k)):\n index_pairs[i][0][fold_idx] = 0\n index_pairs[i][1][fold_idx] = 1\n\n return index_pairs\n\n\ndef benchmark(clf_factory, X, Y, clf_params_dict=None, k=4, verbose=False):\n \"\"\"\n benchmark a classifier on preprocessed data.\n\n Parameters\n ----------\n clf_factory :\n Function which returns a classifier. Classifiers implement\n a `fit` method and a `predict` method. The parameters\n clf_params will be passed to clf_factory.\n X : NxM matrix of features\n Y : NxL matrix of binary values. Y[i,j] indicates whether or\n not the j'th tag applies to the i'th article.\n clf_params_dict :\n dictionary of parameters passed to the classifier factory.\n If None, no parameters are passed.\n k : how many folds to use for cross validation\n verbose : Should status be printed?\n \"\"\"\n if clf_params_dict is None:\n clf_params_dict = {}\n\n L = Y.shape[1]\n\n fold_indexes = get_kfold_split(X.shape[0], k)\n acc = np.zeros(k)\n tpr = np.zeros((k, L))\n fpr = np.zeros((k, L))\n ppv = np.zeros((k, L))\n\n clfs = []\n for i, (idx_trn, idx_tst) in enumerate(fold_indexes):\n if verbose:\n print('step {} of {}...'.format(i, k), end='')\n\n clf = clf_factory(**clf_params_dict)\n\n x_trn = X[idx_trn, :]\n y_trn = Y[idx_trn, :]\n\n x_tst = X[idx_tst, :]\n y_tst = Y[idx_tst, :]\n\n clf.fit(x_trn, y_trn)\n y_hat = clf.predict_proba(x_tst)\n y_hat = y_hat > 0.5\n\n y_hat.dtype = np.int8\n y_tst.dtype = np.int8\n\n acc[i] = (np.sum(y_tst == y_hat)) / float(y_tst.size)\n for j in range(L):\n tpr[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_tst[:, j])\n fpr[i, j] = (np.sum(np.logical_not(y_tst[:, j]) & y_hat[:, j])\n / np.sum(np.logical_not(y_tst[:, j])))\n ppv[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_hat[:, j])\n\n clfs.append(clf)\n\n if verbose:\n print('done')\n\n return {'acc': acc, 'tpr': tpr, 'fpr': fpr, 'ppv': ppv, 'clfs': clfs}\n\n\ndef predict_articles(clf, vectorizer, df, n=100, seed=1029384756):\n np.random.seed(seed)\n\n pd.set_option('display.max_columns', 100)\n pd.set_option('display.float_format', lambda x: '%.6f' % x)\n\n random_subset = np.random.choice(np.arange(df.shape[0]),\n size=n,\n replace=False)\n\n preds = clf.predict_proba(vectorizer.transform(\n df.iloc[random_subset, 3].values\n ))\n preds = pd.DataFrame(preds)\n preds.columns = df.loc[:, 'OEMC':'TASR'].columns\n\n for i, rand_i in enumerate(random_subset):\n s = 'Article ID: ' + str(df.index[rand_i])\n s += '\\n' + df.iloc[rand_i, 3]\n s += '\\n Predicted Tags: '\n s += str(preds.iloc[i, :].index[preds.iloc[i, :] > 0.5].values)\n s += '\\n' + str(preds.iloc[i, :])\n s += '\\n'\n filename = 'test-tag-' + str(df.index[rand_i]) + '.txt'\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(s)\n" ]
[ [ "numpy.logical_not", "numpy.random.seed", "numpy.arange", "pandas.DataFrame", "numpy.ones", "numpy.random.permutation", "pandas.set_option", "numpy.array_split", "numpy.zeros", "numpy.sum" ] ]
bdh-team-12/sleep-predictions-through-deep-learning
[ "7664cdffc0a0b0e732bffc95fd01e3ea27687025" ]
[ "CNN_CNN_Model/Train_Model.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 24 14:11:51 2019\r\n\r\n@author: CRNZ\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport glob\r\nimport os\r\nfrom glob import glob\r\nfrom Models import get_base_model,get_model_cnn\r\nfrom keras import optimizers, losses, activations, models\r\nfrom keras.layers import Dense, Input, Dropout, Convolution1D, MaxPool1D, GlobalMaxPool1D, GlobalAveragePooling1D, \\\r\n concatenate, SpatialDropout1D, TimeDistributed, Bidirectional, LSTM\r\nfrom keras_contrib.layers import CRF\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\r\nfrom Utilities import *\r\nfrom sklearn.metrics import *\r\nfrom plots import plot_learning_curves, plot_confusion_matrix\r\n\r\ndata_path = \"./data_npz\"\r\nfiles = sorted(glob(os.path.join(data_path, \"*.npz\")))\r\nfile_path = \"cnn_model.h5\"\r\nepochs=30\r\n\r\nids = sorted(list(set([x.split(\"\\\\\")[-1][:12] for x in files])))\r\n#split by test subject\r\ntrain_ids, test_ids = train_test_split(ids, test_size=0.15, random_state=1338)\r\n\r\ntrain_val, test = [x for x in files if x.split(\"\\\\\")[-1][:12] in train_ids],\\\r\n [x for x in files if x.split(\"\\\\\")[-1][:12] in test_ids]\r\n\r\ntrain, val = train_test_split(train_val, test_size=0.1, random_state=1337)\r\n\r\ntrain_dict = {k: np.load(k) for k in train}\r\ntest_dict = {k: np.load(k) for k in test}\r\nval_dict = {k: np.load(k) for k in val}\r\n\r\nmodel = get_model_cnn()\r\n\r\n\r\n# model.load_weights(file_path)\r\n\r\ncheckpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\nearly = EarlyStopping(monitor=\"val_acc\", mode=\"max\", patience=20, verbose=1)\r\nredonplat = ReduceLROnPlateau(monitor=\"val_acc\", mode=\"max\", patience=5, verbose=2)\r\ncallbacks_list = [checkpoint, early, redonplat] # early\r\n\r\nmodel.fit_generator(gen(train_dict, aug=False), validation_data=gen(val_dict), epochs=epochs, verbose=2,\r\n steps_per_epoch=1000, validation_steps=300, callbacks=callbacks_list)\r\nmodel.load_weights(file_path)\r\n\r\n'''for test model'''\r\npreds = []\r\ngt = []\r\n\r\nfor record in tqdm(test_dict):\r\n all_rows = test_dict[record]['x']\r\n for batch_hyp in chunker(range(all_rows.shape[0])):\r\n\r\n\r\n X = all_rows[min(batch_hyp):max(batch_hyp)+1, ...]\r\n Y = test_dict[record]['y'][min(batch_hyp):max(batch_hyp)+1]\r\n\r\n X = np.expand_dims(X, 0)\r\n\r\n X = rescale_array(X)\r\n\r\n Y_pred = model.predict(X)\r\n Y_pred = Y_pred.argmax(axis=-1).ravel().tolist()\r\n\r\n gt += Y.ravel().tolist()\r\n preds += Y_pred\r\n\r\n\r\nf1score = f1_score(gt, preds, average=\"weighted\")\r\nacc = accuracy_score(gt, preds)\r\nprint(\"Test f1 score : %s accuracy score : %s\"%(f1score,acc))\r\n\r\nclass_names = ['Wake', 'Stage 1', 'Stage 2', 'Stage 3', 'R']\r\nplot_confusion_matrix(list(map(lambda x,y:(x,y),gt,preds)), class_names)" ]
[ [ "numpy.load", "numpy.expand_dims", "sklearn.model_selection.train_test_split" ] ]
WoojunePark/BasicSR
[ "7a6f13d3933db6c5be3319e37815c57ff72ab374" ]
[ "test_scripts/test_esrgan.py" ]
[ "import argparse\nimport cv2\nimport glob\nimport numpy as np\nimport os\nimport torch\n\nfrom basicsr.models.archs.rrdbnet_arch import RRDBNet\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_path',\n type=str,\n default= # noqa: E251\n 'experiments/pretrained_models/ESRGAN/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth' # noqa: E501\n )\n parser.add_argument(\n '--folder',\n type=str,\n default='datasets/Set14/LRbicx4',\n help='input test image folder')\n parser.add_argument(\n '--device', type=str, default='cuda', help='Options: cuda, cpu.')\n args = parser.parse_args()\n\n device = torch.device(args.device)\n\n # set up model\n model = RRDBNet(\n num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32)\n model.load_state_dict(torch.load(args.model_path)['params'], strict=True)\n model.eval()\n model = model.to(device)\n\n os.makedirs('results/ESRGAN', exist_ok=True)\n for idx, path in enumerate(\n sorted(glob.glob(os.path.join(args.folder, '*')))):\n imgname = os.path.splitext(os.path.basename(path))[0]\n print('Testing', idx, imgname)\n # read image\n img = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.\n img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]],\n (2, 0, 1))).float()\n img = img.unsqueeze(0).to(device)\n # inference\n with torch.no_grad():\n output = model(img)\n # save image\n output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()\n output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))\n output = (output * 255.0).round().astype(np.uint8)\n cv2.imwrite(f'results/ESRGAN/{imgname}_ESRGAN.png', output)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "torch.no_grad", "torch.load", "numpy.transpose" ] ]
asitnayak/MLFlow-FashionMNIST-PyTorch
[ "bada0eeee45f91599fd135ffdb5e720770f3761c" ]
[ "src/stage_03_training_model.py" ]
[ "import argparse\nfrom calendar import EPOCH\nimport os\nimport shutil\nfrom tqdm import tqdm\nimport logging\nimport torch\nimport torch.nn as nn\nfrom src.utils.common import read_yaml, create_directories\nfrom src.stage_01_get_data import main as loader_main\nfrom src.stage_02_base_model_creation import CNN\nimport random\nimport warnings\n\n\nSTAGE = \"TRAINING_MODEL\" ## <<< change stage name \n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter(\"[%(asctime)s: %(levelname)s: %(module)s]: %(message)s\")\n\nfile_handler = logging.FileHandler(os.path.join(\"logs\", \"running_logs.log\"))\nfile_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\n\nwarnings.filterwarnings('ignore')\n\n\ndef main(config_path):\n ## read config files\n try:\n config = read_yaml(config_path)\n\n DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model_dir = config['artifacts']['model_dir']\n base_model_name = config['artifacts']['base_model_name']\n trained_model_name = config['artifacts']['trained_model_name']\n base_model_path = os.path.join(model_dir, base_model_name)\n trained_model_path = os.path.join(model_dir, trained_model_name)\n\n logger.info(f\"loading base model from {base_model_path}\")\n loaded_model = torch.load(base_model_path)\n logger.info(\"Base model loaded\")\n loaded_model.to(DEVICE)\n logger.info(f\"Base model loaded into {DEVICE}\")\n criterion = nn.CrossEntropyLoss() # Loss function\n optimizer = torch.optim.Adam(loaded_model.parameters() , lr=config['params']['LEARNING_RATE'])\n\n train_data_loader, test_data_loader, labels_dict = loader_main(config_path)\n EPOCH = config['params']['EPOCH']\n\n for epoch in range(EPOCH):\n with tqdm(train_data_loader) as tqdm_epoch:\n for images, labels in tqdm_epoch:\n tqdm_epoch.set_description(f\"Epoch : {epoch + 1} / EPOCH\")\n\n # put the images on device\n images = images.to(DEVICE)\n labels = labels.to(DEVICE)\n\n # forward pass\n output = loaded_model(images)\n loss = criterion(output, labels)\n\n # backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n tqdm_epoch.set_postfix(loss = loss.item())\n\n logger.info(\"Model training completed successfully\")\n torch.save(loaded_model, trained_model_path)\n logger.info(f\"trained model saved at {trained_model_path}\")\n\n except Exception as e:\n logger.exception(e)\n print(e)\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", \"-c\", default=\"configs/config.yaml\")\n parsed_args = args.parse_args()\n\n try:\n logger.info(\"\\n********************\")\n logger.info(f\">>>>> stage {STAGE} started <<<<<\")\n main(config_path=parsed_args.config)\n logger.info(f\">>>>> stage {STAGE} completed!<<<<<\\n\")\n except Exception as e:\n logger.exception(e)\n raise e" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.save", "torch.cuda.is_available", "torch.load" ] ]
dmeybohm/intellij-community
[ "7fcc441fd5902ec3d237c34ee93f5ed1faf23629" ]
[ "python/helpers/pycharm_matplotlib_backend/backend_interagg.py" ]
[ "import matplotlib\nimport os\nimport socket\nimport struct\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.backend_bases import FigureManagerBase, ShowBase\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\n\nHOST = 'localhost'\nPORT = os.getenv(\"PYCHARM_MATPLOTLIB_PORT\")\nPORT = int(PORT) if PORT is not None else None\nPORT = PORT if PORT != -1 else None\nindex = int(os.getenv(\"PYCHARM_MATPLOTLIB_INDEX\", 0))\n\nrcParams = matplotlib.rcParams\nverbose = matplotlib.verbose\n\n\nclass Show(ShowBase):\n def __call__(self, **kwargs):\n managers = Gcf.get_all_fig_managers()\n if not managers:\n return\n\n for manager in managers:\n manager.show(**kwargs)\n\n def mainloop(self):\n pass\n\n\nshow = Show()\n\n\n# from pyplot API\ndef draw_if_interactive():\n if matplotlib.is_interactive():\n figManager = Gcf.get_active()\n if figManager is not None:\n figManager.canvas.show()\n\n\n# from pyplot API\ndef new_figure_manager(num, *args, **kwargs):\n FigureClass = kwargs.pop('FigureClass', Figure)\n figure = FigureClass(*args, **kwargs)\n return new_figure_manager_given_figure(num, figure)\n\n\n# from pyplot API\ndef new_figure_manager_given_figure(num, figure):\n canvas = FigureCanvasInterAgg(figure)\n manager = FigureManagerInterAgg(canvas, num)\n return manager\n\n\n# from pyplot API\nclass FigureCanvasInterAgg(FigureCanvasAgg):\n def __init__(self, figure):\n FigureCanvasAgg.__init__(self, figure)\n\n def show(self):\n self.figure.tight_layout()\n FigureCanvasAgg.draw(self)\n if PORT is None:\n return\n\n if matplotlib.__version__ < '1.2':\n buffer = self.tostring_rgb(0, 0)\n else:\n buffer = self.tostring_rgb()\n\n if len(set(buffer)) <= 1:\n # do not plot empty\n return\n\n render = self.get_renderer()\n width = int(render.width)\n\n plot_index = index if os.getenv(\"PYCHARM_MATPLOTLIB_INTERACTIVE\", False) else -1\n try:\n sock = socket.socket()\n sock.connect((HOST, PORT))\n sock.send(struct.pack('>i', width))\n sock.send(struct.pack('>i', plot_index))\n sock.send(struct.pack('>i', len(buffer)))\n sock.send(buffer)\n except OSError as _:\n # nothing bad. It just means, that our tool window doesn't run yet\n pass\n\n def draw(self):\n is_interactive = os.getenv(\"PYCHARM_MATPLOTLIB_INTERACTIVE\", False)\n if is_interactive and matplotlib.is_interactive():\n self.show()\n\n\nclass FigureManagerInterAgg(FigureManagerBase):\n def __init__(self, canvas, num):\n FigureManagerBase.__init__(self, canvas, num)\n global index\n index += 1\n self.canvas = canvas\n self._num = num\n self._shown = False\n\n def show(self, **kwargs):\n self.canvas.show()\n Gcf.destroy(self._num)\n" ]
[ [ "matplotlib.backend_bases.FigureManagerBase.__init__", "matplotlib.is_interactive", "matplotlib._pylab_helpers.Gcf.get_all_fig_managers", "matplotlib.backends.backend_agg.FigureCanvasAgg.draw", "matplotlib.backends.backend_agg.FigureCanvasAgg.__init__", "matplotlib._pylab_helpers.Gcf.get_active", "matplotlib._pylab_helpers.Gcf.destroy" ] ]
wjyamada/BaleIdentification
[ "42280bc70af985691ad3c1d6519b96ad6d89f464" ]
[ "yolov3/train.py" ]
[ "import argparse\n\nimport torch.distributed as dist\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport test # import test.py to get mAP after each epoch\nfrom models import *\nfrom utils.datasets import *\nfrom utils.utils import *\n\nmixed_precision = True\ntry: # Mixed precision training https://github.com/NVIDIA/apex\n from apex import amp\nexcept:\n print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')\n mixed_precision = False # not installed\n\nwdir = 'weights' + os.sep # weights dir\nlast = wdir + 'last.pt'\nbest = wdir + 'best.pt'\nresults_file = 'results.txt'\n\n# Hyperparameters\nhyp = {'giou': 3.54, # giou loss gain\n 'cls': 37.4, # cls loss gain\n 'cls_pw': 1.0, # cls BCELoss positive_weight\n 'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)\n 'obj_pw': 1.0, # obj BCELoss positive_weight\n 'iou_t': 0.20, # iou training threshold\n 'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)\n 'lrf': 0.0005, # final learning rate (with cos scheduler)\n 'momentum': 0.937, # SGD momentum\n 'weight_decay': 0.000484, # optimizer weight decay\n 'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)\n 'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)\n 'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)\n 'hsv_v': 0.36, # image HSV-Value augmentation (fraction)\n 'degrees': 1.98 * 0, # image rotation (+/- deg)\n 'translate': 0.05 * 0, # image translation (+/- fraction)\n 'scale': 0.05 * 0, # image scale (+/- gain)\n 'shear': 0.641 * 0} # image shear (+/- deg)\n\n# Overwrite hyp with hyp*.txt (optional)\nf = glob.glob('hyp*.txt')\nif f:\n print('Using %s' % f[0])\n for k, v in zip(hyp.keys(), np.loadtxt(f[0])):\n hyp[k] = v\n\n# Print focal loss if gamma > 0\nif hyp['fl_gamma']:\n print('Using FocalLoss(gamma=%g)' % hyp['fl_gamma'])\n\n\ndef train():\n cfg = opt.cfg\n data = opt.data\n epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs\n batch_size = opt.batch_size\n accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64)\n weights = opt.weights # initial training weights\n imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test)\n\n # Image Sizes\n gs = 64 # (pixels) grid size\n assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs)\n opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max)\n if opt.multi_scale:\n if imgsz_min == imgsz_max:\n imgsz_min //= 1.5\n imgsz_max //= 0.667\n grid_min, grid_max = imgsz_min // gs, imgsz_max // gs\n imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs)\n img_size = imgsz_max # initialize with max size\n\n # Configure run\n init_seeds()\n data_dict = parse_data_cfg(data)\n train_path = data_dict['train']\n test_path = data_dict['valid']\n nc = 1 if opt.single_cls else int(data_dict['classes']) # number of classes\n hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset\n\n # Remove previous results\n for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):\n os.remove(f)\n\n # Initialize model\n model = Darknet(cfg).to(device)\n\n # Optimizer\n pg0, pg1, pg2 = [], [], [] # optimizer parameter groups\n for k, v in dict(model.named_parameters()).items():\n if '.bias' in k:\n pg2 += [v] # biases\n elif 'Conv2d.weight' in k:\n pg1 += [v] # apply weight_decay\n else:\n pg0 += [v] # all else\n\n if opt.adam:\n # hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4)\n optimizer = optim.Adam(pg0, lr=hyp['lr0'])\n # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)\n else:\n optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay\n optimizer.add_param_group({'params': pg2}) # add pg2 (biases)\n print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))\n del pg0, pg1, pg2\n\n start_epoch = 0\n best_fitness = 0.0\n attempt_download(weights)\n if weights.endswith('.pt'): # pytorch format\n # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.\n chkpt = torch.load(weights, map_location=device)\n\n # load model\n try:\n chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()}\n model.load_state_dict(chkpt['model'], strict=False)\n except KeyError as e:\n s = \"%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. \" \\\n \"See https://github.com/ultralytics/yolov3/issues/657\" % (opt.weights, opt.cfg, opt.weights)\n raise KeyError(s) from e\n\n # load optimizer\n if chkpt['optimizer'] is not None:\n optimizer.load_state_dict(chkpt['optimizer'])\n best_fitness = chkpt['best_fitness']\n\n # load results\n if chkpt.get('training_results') is not None:\n with open(results_file, 'w') as file:\n file.write(chkpt['training_results']) # write results.txt\n\n start_epoch = chkpt['epoch'] + 1\n del chkpt\n\n elif len(weights) > 0: # darknet format\n # possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.\n load_darknet_weights(model, weights)\n\n # Mixed precision training https://github.com/NVIDIA/apex\n if mixed_precision:\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)\n\n # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n scheduler.last_epoch = start_epoch - 1 # see link below\n # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822\n\n # Plot lr schedule\n # y = []\n # for _ in range(epochs):\n # scheduler.step()\n # y.append(optimizer.param_groups[0]['lr'])\n # plt.plot(y, '.-', label='LambdaLR')\n # plt.xlabel('epoch')\n # plt.ylabel('LR')\n # plt.tight_layout()\n # plt.savefig('LR.png', dpi=300)\n\n # Initialize distributed training\n if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():\n dist.init_process_group(backend='nccl', # 'distributed backend'\n init_method='tcp://127.0.0.1:9999', # distributed training init method\n world_size=1, # number of nodes for distributed training\n rank=0) # distributed training node rank\n model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)\n model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level\n\n # Dataset\n dataset = LoadImagesAndLabels(train_path, img_size, batch_size,\n augment=True,\n hyp=hyp, # augmentation hyperparameters\n rect=opt.rect, # rectangular training\n cache_images=opt.cache_images,\n single_cls=opt.single_cls)\n\n # Dataloader\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n shuffle=not opt.rect, # Shuffle=True unless rectangular training is used\n pin_memory=True,\n collate_fn=dataset.collate_fn)\n\n # Testloader\n testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size,\n hyp=hyp,\n rect=True,\n cache_images=opt.cache_images,\n single_cls=opt.single_cls),\n batch_size=batch_size,\n num_workers=nw,\n pin_memory=True,\n collate_fn=dataset.collate_fn)\n\n # Model parameters\n model.nc = nc # attach number of classes to model\n model.hyp = hyp # attach hyperparameters to model\n model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)\n model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights\n\n # Model EMA\n ema = torch_utils.ModelEMA(model)\n\n # Start training\n nb = len(dataloader) # number of batches\n n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations)\n maps = np.zeros(nc) # mAP per class\n # torch.autograd.set_detect_anomaly(True)\n results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'\n t0 = time.time()\n print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))\n print('Using %g dataloader workers' % nw)\n print('Starting training for %g epochs...' % epochs)\n for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------\n model.train()\n\n # Update image weights (optional)\n if dataset.image_weights:\n w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights\n image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)\n dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx\n\n mloss = torch.zeros(4).to(device) # mean losses\n print(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))\n pbar = tqdm(enumerate(dataloader), total=nb) # progress bar\n for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------\n ni = i + nb * epoch # number integrated batches (since train start)\n imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0\n targets = targets.to(device)\n\n # Burn-in\n if ni <= n_burn * 2:\n model.gr = np.interp(ni, [0, n_burn * 2], [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)\n if ni == n_burn: # burnin complete\n print_model_biases(model)\n\n for j, x in enumerate(optimizer.param_groups):\n # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n x['lr'] = np.interp(ni, [0, n_burn], [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])\n if 'momentum' in x:\n x['momentum'] = np.interp(ni, [0, n_burn], [0.9, hyp['momentum']])\n\n # Multi-Scale\n if opt.multi_scale:\n if ni / accumulate % 1 == 0: #  adjust img_size (67% - 150%) every 1 batch\n img_size = random.randrange(grid_min, grid_max + 1) * gs\n sf = img_size / max(imgs.shape[2:]) # scale factor\n if sf != 1:\n ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple)\n imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)\n\n # Forward\n pred = model(imgs)\n\n # Loss\n loss, loss_items = compute_loss(pred, targets, model)\n if not torch.isfinite(loss):\n print('WARNING: non-finite loss, ending training ', loss_items)\n return results\n\n # Backward\n loss *= batch_size / 64 # scale loss\n if mixed_precision:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # Optimize\n if ni % accumulate == 0:\n optimizer.step()\n optimizer.zero_grad()\n ema.update(model)\n\n # Print\n mloss = (mloss * i + loss_items) / (i + 1) # update mean losses\n mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)\n s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size)\n pbar.set_description(s)\n\n # Plot\n if ni < 1:\n f = 'train_batch%g.jpg' % i # filename\n res = plot_images(images=imgs, targets=targets, paths=paths, fname=f)\n if tb_writer:\n tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch)\n # tb_writer.add_graph(model, imgs) # add model to tensorboard\n\n # end batch ------------------------------------------------------------------------------------------------\n\n # Update scheduler\n scheduler.step()\n\n # Process epoch results\n ema.update_attr(model)\n final_epoch = epoch + 1 == epochs\n if not opt.notest or final_epoch: # Calculate mAP\n is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80\n results, maps = test.test(cfg,\n data,\n batch_size=batch_size,\n img_size=imgsz_test,\n model=ema.ema,\n save_json=final_epoch and is_coco,\n single_cls=opt.single_cls,\n dataloader=testloader)\n\n # Write\n with open(results_file, 'a') as f:\n f.write(s + '%10.3g' * 7 % results + '\\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)\n if len(opt.name) and opt.bucket:\n os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name))\n\n # Tensorboard\n if tb_writer:\n tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',\n 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',\n 'val/giou_loss', 'val/obj_loss', 'val/cls_loss']\n for x, tag in zip(list(mloss[:-1]) + list(results), tags):\n tb_writer.add_scalar(tag, x, epoch)\n\n # Update best mAP\n fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]\n if fi > best_fitness:\n best_fitness = fi\n\n # Save model\n save = (not opt.nosave) or (final_epoch and not opt.evolve)\n if save:\n with open(results_file, 'r') as f: # create checkpoint\n chkpt = {'epoch': epoch,\n 'best_fitness': best_fitness,\n 'training_results': f.read(),\n 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(),\n 'optimizer': None if final_epoch else optimizer.state_dict()}\n\n # Save last, best and delete\n torch.save(chkpt, last)\n if (best_fitness == fi) and not final_epoch:\n torch.save(chkpt, best)\n del chkpt\n\n # end epoch ----------------------------------------------------------------------------------------------------\n # end training\n\n n = opt.name\n if len(n):\n n = '_' + n if not n.isnumeric() else n\n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n\n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):\n if os.path.exists(f1):\n os.rename(f1, f2) # rename\n ispt = f2.endswith('.pt') # is *.pt\n strip_optimizer(f2) if ispt else None # strip optimizer\n os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload\n\n if not opt.evolve:\n plot_results() # save as results.png\n print('%g epochs completed in %.3f hours.\\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))\n dist.destroy_process_group() if torch.cuda.device_count() > 1 else None\n torch.cuda.empty_cache()\n return results\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=300) # 500200 batches at bs 16, 117263 COCO images = 273 epochs\n parser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64\n parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')\n parser.add_argument('--data', type=str, default='data/coco2017.data', help='*.data path')\n parser.add_argument('--multi-scale', action='store_true', help='adjust (67%% - 150%%) img_size every 10 batches')\n parser.add_argument('--img-size', nargs='+', type=int, default=[320, 640], help='[min_train, max-train, test]')\n parser.add_argument('--rect', action='store_true', help='rectangular training')\n parser.add_argument('--resume', action='store_true', help='resume training from last.pt')\n parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')\n parser.add_argument('--notest', action='store_true', help='only test final epoch')\n parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')\n parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')\n parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='initial weights path')\n parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')\n parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')\n parser.add_argument('--adam', action='store_true', help='use adam optimizer')\n parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')\n opt = parser.parse_args()\n opt.weights = last if opt.resume else opt.weights\n check_git_status()\n print(opt)\n opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size))) # extend to 3 sizes (min, max, test)\n device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)\n if device.type == 'cpu':\n mixed_precision = False\n\n # scale hyp['obj'] by img_size (evolved at 320)\n # hyp['obj'] *= opt.img_size[0] / 320.\n\n tb_writer = None\n if not opt.evolve: # Train normally\n print('Start Tensorboard with \"tensorboard --logdir=runs\", view at http://localhost:6006/')\n tb_writer = SummaryWriter(comment=opt.name)\n train() # train normally\n\n else: # Evolve hyperparameters (optional)\n opt.notest, opt.nosave = True, True # only test/save final epoch\n if opt.bucket:\n os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists\n\n for _ in range(1): # generations to evolve\n if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate\n # Select parent(s)\n parent = 'single' # parent selection method: 'single' or 'weighted'\n x = np.loadtxt('evolve.txt', ndmin=2)\n n = min(5, len(x)) # number of previous results to consider\n x = x[np.argsort(-fitness(x))][:n] # top n mutations\n w = fitness(x) - fitness(x).min() # weights\n if parent == 'single' or len(x) == 1:\n # x = x[random.randint(0, n - 1)] # random selection\n x = x[random.choices(range(n), weights=w)[0]] # weighted selection\n elif parent == 'weighted':\n x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination\n\n # Mutate\n method, mp, s = 3, 0.9, 0.2 # method, mutation probability, sigma\n npr = np.random\n npr.seed(int(time.time()))\n g = np.array([1, 1, 1, 1, 1, 1, 1, 0, .1, 1, 0, 1, 1, 1, 1, 1, 1, 1]) # gains\n ng = len(g)\n if method == 1:\n v = (npr.randn(ng) * npr.random() * g * s + 1) ** 2.0\n elif method == 2:\n v = (npr.randn(ng) * npr.random(ng) * g * s + 1) ** 2.0\n elif method == 3:\n v = np.ones(ng)\n while all(v == 1): # mutate until a change occurs (prevent duplicates)\n # v = (g * (npr.random(ng) < mp) * npr.randn(ng) * s + 1) ** 2.0\n v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)\n for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)\n hyp[k] = x[i + 7] * v[i] # mutate\n\n # Clip to limits\n keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']\n limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]\n for k, v in zip(keys, limits):\n hyp[k] = np.clip(hyp[k], v[0], v[1])\n\n # Train mutation\n results = train()\n\n # Write mutation results\n print_mutation(hyp, results, opt.bucket)\n\n # Plot results\n # plot_evolution_results(hyp)\n" ]
[ [ "torch.optim.Adam", "torch.optim.lr_scheduler.LambdaLR", "torch.distributed.init_process_group", "torch.utils.tensorboard.SummaryWriter", "torch.distributed.destroy_process_group", "torch.optim.SGD" ] ]
yadala1998/Triangulation-of-convex-layers
[ "5da10fac0478e088c2725f1abe0b20c87380009b" ]
[ "input_file_generator.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nUnivertity of Wisconsin-Madison\nYaqi Zhang\n\"\"\"\nimport sys\nimport numpy as np\n\ndef main():\n if len(sys.argv) != 7:\n print(\"Usage: >> python {} <filename> <npoints> <xmin> <xmax> <ymin> <ymax>\".format(sys.argv[0]))\n sys.exit(1)\n filename = sys.argv[1]\n npoints = int(sys.argv[2])\n xmin, xmax, ymin, ymax = (float(x) for x in sys.argv[3:])\n xs = np.random.uniform(xmin, xmax, (npoints, 1))\n ys = np.random.uniform(ymin, ymax, (npoints, 1))\n with open(filename, 'w') as out_file:\n for row in range(npoints):\n out_file.write(\"%.6f\\t%.6f\\n\" % (xs[row], ys[row]))\n print(\"write {:d} points to {:s}\".format(npoints, filename))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.random.uniform" ] ]
hhelm10/primitives-interfaces
[ "15766d77dae016fa699a46bade0fe66711b23459" ]
[ "jhu_primitives/sgm/ben_sgm/backends/fused.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n backends/fused.py\n\"\"\"\n\nfrom time import time\nfrom ..common import _BaseSGM, _JVMixin\nfrom .. import lap_solvers\n\nimport numpy as np\nfrom scipy import sparse\n\n# --\n# SGM loop\n\nclass BaseSGMFused(_BaseSGM):\n def run(self, num_iters, tolerance, verbose=True):\n A, B, P = self.A, self.B, self.P\n if hasattr(self, '_warmup'):\n self._warmup()\n \n self._reset_timers()\n \n \n AP = A.dot(P)\n \n for i in range(num_iters):\n iter_t = time()\n \n lap_t = time()\n T = self.solve_lap_fused(AP, B)\n self.lap_times.append(time() - lap_t)\n \n AT = A.dot(T)\n \n ps_grad_P = self.compute_trace(AP, B, P)\n ps_grad_T = self.compute_trace(AP, B, T)\n ps_gradt_P = self.compute_trace(AT, B, P)\n ps_gradt_T = self.compute_trace(AT, B, T)\n \n alpha, stop = self.check_convergence(\n c=ps_grad_P,\n d=ps_gradt_P + ps_grad_T,\n e=ps_gradt_T,\n tolerance=tolerance,\n )\n \n if not stop:\n if alpha is not None:\n P = (alpha * P) + (1 - alpha) * T\n AP = (alpha * AP) + (1 - alpha) * AT\n else:\n P = T\n AP = AT\n \n self.iter_times.append(time() - iter_t)\n if verbose:\n self._log_times()\n \n if stop:\n break\n \n return self.solve_lap_final(P)\n\n# --\n\nclass _ScipyFusedSGM(_JVMixin, BaseSGMFused):\n def _warmup(self):\n x = sparse.random(100, 100, density=0.5).tocsr()\n y = sparse.random(100, 100, density=0.5).tocsr()\n _ = self.solve_lap_fused(x, y, verbose=False)\n \n def compute_trace(self, AX, B, Y):\n YBt = Y.dot(B.T)\n \n AX_sum = Y.dot(AX.sum(axis=1)).sum()\n B_sum = Y.T.dot(B.sum(axis=0).T).sum()\n \n return 4 * AX.multiply(YBt).sum() + AX.shape[0] * Y.sum() - 2 * (AX_sum + B_sum)\n \n def solve_lap_final(self, cost):\n return lap_solvers.jv(cost, jv_backend=self.jv_backend)\n\n\nclass JVFusedSGM(_ScipyFusedSGM):\n def solve_lap_fused(self, AP, B, verbose=True):\n rowcol_offsets = - 2 * AP.sum(axis=1) - 2 * B.sum(axis=0) + AP.shape[0]\n idx = lap_solvers.jv(\n AP.dot(B).toarray() + rowcol_offsets, \n jv_backend=self.jv_backend\n )\n return sparse.csr_matrix((np.ones(AP.shape[0]), (np.arange(idx.shape[0]), idx)))\n\n\nclass AuctionFusedSGM(_ScipyFusedSGM):\n def solve_lap_fused(self, AP, B, verbose=False):\n idx = lap_solvers.dot_auction(AP, B, AP.shape[0], verbose=verbose)\n return sparse.csr_matrix((np.ones(AP.shape[0]), (np.arange(idx.shape[0]), idx)))\n" ]
[ [ "scipy.sparse.random", "numpy.arange", "numpy.ones" ] ]
ekrim/latent-tracking
[ "2688cd9adad18b301552b033b20acecbcce17868" ]
[ "geometry.py" ]
[ "import os\nimport sys\nimport PIL\nimport colorsys\nfrom collections import namedtuple\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport torch\n\nimport data\n\n\nJOINTS = 'wrist index_mcp index_pip index_dip index_tip middle_mcp middle_pip middle_dip middle_tip ring_mcp ring_pip ring_dip ring_tip little_mcp little_pip little_dip little_tip thumb_mcp thumb_pip thumb_dip thumb_tip'.split(' ')\n\nMS = 15\nMS2 = 30\n\nHandPart = namedtuple('HandPart', 'connections conn_color pts pt_color pt_size')\n\nINDEX = HandPart(\n connections=np.array([[4,3], [3,2], [2,1]]),\n conn_color='g',\n pts=[1,2,3,4],\n pt_color='g',\n pt_size=MS)\n\nMIDDLE = HandPart(\n connections=np.array([[8,7], [7,6], [6,5]]),\n conn_color='m',\n pts=[5,6,7,8],\n pt_color='m',\n pt_size=MS)\n\nRING = HandPart(\n connections=np.array([[12,11], [11,10], [10,9]]),\n conn_color='r',\n pts=[9,10,11,12],\n pt_color='r',\n pt_size=MS)\n\nLITTLE = HandPart(\n connections=np.array([[16,15], [15,14], [14,13]]),\n conn_color='c',\n pts=[13,14,15,16],\n pt_color='c',\n pt_size=MS)\n\nTHUMB = HandPart(\n connections=np.array([[20,19], [19,18], [18,17]]),\n conn_color='y',\n pts=[17,18,19,20],\n pt_color='y',\n pt_size=MS)\n\nWRIST = HandPart(\n connections=np.array([[0,17], [0,1], [0,5], [0,9], [0,13]]),\n conn_color='b',\n pts=[0],\n pt_color='b',\n pt_size=MS2)\n\nHAND = [INDEX, MIDDLE, RING, LITTLE, THUMB, WRIST]\n\nlw_w = 3\nlw_t1, lw_t2, lw_t3 = 2.8, 2.2, 2.1\nlw_l1, lw_l2, lw_l3 = 2.3, 1.5, 1.2\nlw_r1, lw_r2, lw_r3 = 2.4, 1.7, 1.4\nlw_m1, lw_m2, lw_m3 = 2.7, 2.0, 1.8\nlw_i1, lw_i2, lw_i3 = 2.8, 2.0, 1.8\n\ns_w = 30\ns_t1, s_t2, s_t3, s_t4 = 18, 16, 14, 12\ns_l1, s_l2, s_l3, s_l4 = 18, 16, 14, 12\ns_r1, s_r2, s_r3, s_r4 = 18, 16, 14, 12\ns_m1, s_m2, s_m3, s_m4 = 18, 16, 14, 12\ns_i1, s_i2, s_i3, s_i4 = 18, 16, 14, 12\n\nOBJECTS = [\n ['l', 0, 17, lw_w],\n ['l', 0, 1, lw_w],\n ['l', 0, 5, lw_w],\n ['l', 0, 9, lw_w],\n ['l', 0, 13, lw_w],\n ['l', 20, 19, lw_t3],\n ['l', 19, 18, lw_t2],\n ['l', 18, 17, lw_t1],\n ['l', 16, 15, lw_l3],\n ['l', 15, 14, lw_l2],\n ['l', 14, 13, lw_l1],\n ['l', 12, 11, lw_r3],\n ['l', 11, 10, lw_r2],\n ['l', 10, 9, lw_r1],\n ['l', 8, 7, lw_m3],\n ['l', 7, 6, lw_m2],\n ['l', 6, 5, lw_m1],\n ['l', 4, 3, lw_i3],\n ['l', 3, 2, lw_i2],\n ['l', 2, 1, lw_i1],\n ['pt', 0, s_w, 2/3], \n ['pt', 17, s_t1, 0],\n ['pt', 18, s_t2, 0],\n ['pt', 19, s_t3, 0],\n ['pt', 20, s_t4, 0],\n ['pt', 13, s_l1, 0.2], \n ['pt', 14, s_l2, 0.2],\n ['pt', 15, s_l3, 0.2],\n ['pt', 16, s_l4, 0.2],\n ['pt', 9, s_r1, 0.4],\n ['pt', 10, s_r2, 0.4],\n ['pt', 11, s_r3, 0.4],\n ['pt', 12, s_r4, 0.4],\n ['pt', 5, s_m1, 0.6],\n ['pt', 6, s_m2, 0.6],\n ['pt', 7, s_m3, 0.6],\n ['pt', 8, s_m4, 0.6],\n ['pt', 1, s_i1, 0.8],\n ['pt', 2, s_i2, 0.8],\n ['pt', 3, s_i3, 0.8],\n ['pt', 4, s_i4, 0.8]]\n\n\ndef depth_to_camera(jts, azim, elev):\n L = 10\n camera = np.float32([[L*np.cos(azim), L*np.sin(azim), L*np.sin(elev)]])\n return np.sqrt(np.sum((jts - camera)**2, axis=1))\n\n\ndef depth_to_value(depth):\n depth_range = 0.4\n d = -depth\n d -= np.min(d)\n d = d/np.max(d)\n d *= depth_range\n d += (1-depth_range)\n return d\n\n\ndef plot_skeleton3d(jts, ax, azim=30, elev=60, autoscale=False, axes=False):\n if jts.shape[-1] != 3:\n jts = jts.reshape((-1, 3)) \n\n depth = depth_to_camera(jts, azim*np.pi/180, elev*np.pi/180)\n \n depth_list = []\n for obj in OBJECTS:\n if obj[0] == 'l':\n depth_list += [np.mean(depth[obj[1:3]])]\n else:\n depth_list += [depth[obj[1]]]\n \n depth_arr = np.array(depth_list)\n furthest_idx = np.argsort(-depth_arr)\n \n #value_arr = depth_to_value(depth_arr)\n value_arr = 0.8*np.ones(depth_arr.shape)\n\n for i in furthest_idx:\n obj = OBJECTS[i] \n depth = depth_arr[i]\n value = value_arr[i]\n \n if obj[0] == 'l':\n idx = obj[1:3]\n lw = obj[3]\n rgb = colorsys.hsv_to_rgb(2/3, 1, value)\n a = ax.plot(jts[idx,0], jts[idx,1], jts[idx,2])\n a[0].set_linewidth(lw) \n a[0].set_color(rgb)\n\n else:\n idx = [obj[1]]\n rgb = colorsys.hsv_to_rgb(obj[3], 1, value)\n a = ax.plot(jts[idx,0], jts[idx,1], jts[idx,2], '.', markeredgecolor='none')\n a[0].set_markersize(obj[2])\n a[0].set_markerfacecolor(rgb)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_aspect('equal')\n\n return ax\n\n\ndef plot3d(x, ax, col='b', ms=10):\n ax.plot(x[:,0], x[:,1], x[:,2], '.'+col, markersize=ms)\n ax.set_xlabel('y')\n ax.set_ylabel('x')\n ax.set_zlabel('z')\n return ax\n\n\ndef old_plot_skeleton3d(x, ax, autoscale=True, axes=True):\n if x.shape[-1] != 3:\n x = x.reshape((-1, 3))\n\n for obj in HAND:\n ax = plot3d(x[obj.pts], ax, col=obj.pt_color, ms=obj.pt_size)\n for idx_pair in obj.connections:\n ax.plot(x[idx_pair, 0], x[idx_pair, 1], x[idx_pair, 2], obj.conn_color)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_aspect('equal')\n\n if autoscale:\n ranges = np.concatenate([ \n np.min(x, axis=0)[None,:],\n np.max(x, axis=0)[None,:]], axis=0)\n\n max_range = np.ceil(np.max(ranges[1] - ranges[0]))\n mean_range = np.mean(ranges, axis=0)\n \n new_range = np.concatenate([\n (mean_range-max_range/2)[None,:],\n (mean_range+max_range/2)[None,:],\n ])\n \n ax.auto_scale_xyz(new_range[:,0], new_range[:,1], new_range[:,2])\n ax.get_xaxis().set_visible(axes)\n ax.get_yaxis().set_visible(axes)\n return ax\n\n\ndef plot_skeleton2d(x, ax, autoscale=True, axes=False):\n if x.shape[-1] != 3:\n x = x.reshape((-1, 3))\n \n x[:,2] = -x[:,2]\n\n z = np.array([x[obj.pts[0], 2] for obj in HAND])\n plot_order = np.argsort(-z)\n \n for idx in plot_order:\n obj = HAND[idx]\n ax.plot(x[obj.pts, 0], x[obj.pts, 1], '.'+obj.pt_color, markersize=obj.pt_size)\n for idx_pair in obj.connections:\n ax.plot(x[idx_pair, 0], x[idx_pair, 1], obj.conn_color)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_aspect('equal')\n\n if autoscale:\n ranges = np.concatenate([ \n np.min(x, axis=0)[None,:],\n np.max(x, axis=0)[None,:]], axis=0)\n\n max_range = np.ceil(np.max(ranges[1] - ranges[0]))\n mean_range = np.mean(ranges, axis=0)\n \n new_range = np.concatenate([\n (mean_range-max_range/2)[None,:],\n (mean_range+max_range/2)[None,:],\n ])\n \n ax.set_xlim(new_range[:,0]) \n ax.set_ylim(new_range[:,1]) \n ax.get_xaxis().set_visible(axes)\n ax.get_yaxis().set_visible(axes)\n return ax\n\n\ndef joints_over_depth(jts, img, ax):\n #ax.imshow(img.transpose(PIL.Image.FLIP_TOP_BOTTOM))\n ax.imshow(img[::-1,:], cmap='Greys_r')\n\n n_rows, n_cols = img.shape\n \n bounds = find_bounds(img)\n\n new_jts = jts.copy().reshape((-1,3))\n\n new_jts = normalize_dim(new_jts, bounds[3], bounds[1], 1)\n x_min = np.min(new_jts[:,0])\n new_jts[:,0] += bounds[0] - x_min\n \n plot_skeleton2d(new_jts, ax, autoscale=False)\n return ax\n\n\ndef find_bounds(img, thresh=0.7):\n truth = img > thresh \n col_bounds = np.arange(img.shape[1])[np.any(truth, axis=0)][[0,-1]]\n row_bounds = np.arange(img.shape[0])[np.any(truth, axis=1)][[0,-1]]\n return col_bounds[0], img.shape[0]-row_bounds[0], col_bounds[1], img.shape[0]-row_bounds[1]\n\n\ndef normalize_dim(x, goal_min, goal_max, dim):\n # put in [0,1]\n min_val = np.min(x[:,dim])\n x -= min_val\n max_val = np.max(x[:,dim])\n x = x/max_val\n\n mag = goal_max - goal_min\n x *= mag\n x[:,dim] += goal_min\n \n return x\n \n \ndef get_quaternion(jts):\n v1 = jts[1] - jts[0]\n v2 = jts[13] - jts[0]\n \n out = np.float32([\n v1[1]*v2[2]-v2[1]*v1[2], \n v1[0]*v2[2]-v2[0]*v1[2], \n v1[0]*v2[1]-v2[0]*v1[1]])\n\n heading = np.arctan2(out[[1]], out[[0]])[0]\n attitude = np.arctan2(out[[2]], out[[0]])[0] - np.pi/2\n\n c1 = np.cos(heading/2)\n c2 = np.cos(attitude/2)\n c3 = 1\n s1 = np.sin(heading/2)\n s2 = np.sin(attitude/2)\n s3 = 0\n \n q = np.float32([\n c1*c2*c3 - s1*s2*s3,\n s1*s2*c3 + c1*c2*s3,\n s1*c2*c3 + c1*s2*s3,\n c1*s2*c3 - s1*c2*s3])\n \n return q \n\n\ndef get_angles(jts):\n v1 = jts[1] - jts[0]\n v2 = jts[13] - jts[0]\n \n out = np.float32([\n v1[1]*v2[2]-v2[1]*v1[2], \n v1[0]*v2[2]-v2[0]*v1[2], \n v1[0]*v2[1]-v2[0]*v1[1]])\n\n azim = np.arctan2(out[[1]], out[[0]])\n elev = np.arctan2(out[[2]], out[[0]])\n return azim, elev \n\n\ndef fix_2pi(z):\n new_z = z.copy()\n new_z[1,0] = closer_angle(z[0,0], z[1,0])\n new_z[1,1] = closer_angle(z[0,1], z[1,1])\n return new_z\n\n\ndef closer_angle(ang1, ang2, k_max=30):\n ang_candidates = 2*np.pi*np.arange(-k_max, k_max+1) + ang2\n return ang_candidates[np.argmin(np.abs(ang1 - ang_candidates))]\n\n\ndef hamilton_product(q1, q2):\n q = np.concatenate([\n q1[:,[0]]*q2[:,[0]] - q1[:,[1]]*q2[:,[1]] - q1[:,[2]]*q2[:,[2]] - q1[:,[3]]*q2[:,[3]], \n q1[:,[0]]*q2[:,[1]] + q1[:,[1]]*q2[:,[0]] + q1[:,[2]]*q2[:,[3]] - q1[:,[3]]*q2[:,[2]], \n q1[:,[0]]*q2[:,[2]] - q1[:,[1]]*q2[:,[3]] + q1[:,[2]]*q2[:,[0]] + q1[:,[3]]*q2[:,[1]], \n q1[:,[0]]*q2[:,[3]] + q1[:,[1]]*q2[:,[2]] - q1[:,[2]]*q2[:,[1]] + q1[:,[3]]*q2[:,[0]]], axis=1)\n return q\n\n\ndef quaternion_rotation(q, v):\n q = q.reshape((1, -1))\n u = np.concatenate([np.zeros((v.shape[0], 1)), v], axis=1).astype(np.float32)\n mask = np.float32([[1, -1, -1, -1]])\n u_new = hamilton_product(u, mask*q)\n return hamilton_product(q, u_new)[:, 1:]\n\n\ndef random_quaternion():\n \"\"\"Taken from 'Planning Algorithms', Stven LaValle, Eq. 5.15\"\"\"\n u1, u2, u3 = np.random.rand(3)\n u1sq1 = np.sqrt(1-u1)\n u1sq2 = np.sqrt(u1)\n\n q = np.float32([\n u1sq1*np.sin(2*np.pi*u2), \n u1sq1*np.cos(2*np.pi*u2),\n u1sq2*np.sin(2*np.pi*u3),\n u1sq2*np.cos(2*np.pi*u3)]).astype(np.float32)\n \n return q\n\n\ndef rand_rotate(x, theta, az, el):\n \"\"\"axis angle rotation\"\"\"\n x = np.cos(az) * np.cos(el)\n y = np.sin(az) * np.cos(el)\n z = np.sin(el)\n\n R = np.float32([\n [np.cos(theta)+x*x*(1-np.cos(theta)), x*y*(1-np.cos(theta))-z*np.sin(theta), x*z*(1-np.cos(theta))+y*np.sin(theta)],\n [y*x*(1-np.cos(theta))+z*np.sin(theta), np.cos(theta)+y*y*(1-np.cos(theta)), y*z*(1-np.cos(theta))-x*np.sin(theta)],\n [z*x*(1-np.cos(theta))-y*np.sin(theta), z*y*(1-np.cos(theta))+x*np.sin(theta), np.cos(theta)+z*z*(1-np.cos(theta))]])\n\n return np.dot(R, x.transpose()).transpose().astype(np.float32)\n\n\ndef rotate(x, theta, axis='x'):\n if axis == 'x':\n R = np.float32(\n [[1, 0, 0], \n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])\n elif axis == 'y':\n R = np.float32(\n [[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n elif axis == 'z':\n R = np.float32(\n [[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n return np.dot(R, x.transpose()).transpose()\n\n\ndef load_model(mod, mod_file, device):\n if device.type == 'cpu':\n mod.load_state_dict(torch.load(mod_file, map_location='cpu'))\n else:\n mod.load_state_dict(torch.load(mod_file))\n\n mod.to(device)\n mod.eval()\n return mod\n\n\ndef lim_axes(ax, lim=[-1, 1]):\n ax.set_xlim(lim)\n ax.set_ylim(lim)\n\n\ndef lim_axes3d(ax, lim=[-0.8, 0.8]):\n ax.set_xlim(lim)\n ax.set_ylim(lim)\n ax.set_zlim(lim)\n \n\ndef stack(lst):\n lst = [jts.reshape((1,-1)) for jts in lst]\n return np.concatenate(lst, axis=0)\n\n\ndef interpolate(x, n):\n out = np.zeros((n, x.shape[1])).astype(np.float32)\n wts = np.linspace(0, 1, n)\n for i, wt in enumerate(wts):\n out[i] = x[0] + wt*(x[1] - x[0])\n return out\n\n\nif __name__ == '__main__':\n \n img, jts = data.get_hand('P1', '5')\n \n q = get_quaternion(jts.reshape((-1, 3)))\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plot_skeleton3d(jts, ax)\n\n plt.show()\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "torch.load", "numpy.arctan2", "numpy.max", "numpy.concatenate", "numpy.mean", "numpy.any", "numpy.arange", "numpy.sin", "numpy.float32", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.min", "numpy.random.rand", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "numpy.abs", "numpy.cos", "numpy.ones" ] ]
dat-boris/pandas
[ "cc429cf41502ffca677bab2af2386f3387710ed4" ]
[ "pandas/plotting/_core.py" ]
[ "# being a bit too dynamic\n# pylint: disable=E1101\nfrom __future__ import division\n\nimport warnings\nimport re\nfrom collections import namedtuple\nfrom distutils.version import LooseVersion\n\nimport numpy as np\n\nfrom pandas.util._decorators import cache_readonly, Appender\nfrom pandas.compat import range, lrange, map, zip, string_types\nimport pandas.compat as compat\nfrom pandas.errors import AbstractMethodError\n\nimport pandas.core.common as com\nfrom pandas.core.base import PandasObject\nfrom pandas.core.config import get_option\nfrom pandas.core.generic import _shared_docs, _shared_doc_kwargs\n\nfrom pandas.core.dtypes.missing import isna, notna, remove_na_arraylike\nfrom pandas.core.dtypes.common import (\n is_list_like,\n is_integer,\n is_number,\n is_hashable,\n is_iterator)\nfrom pandas.core.dtypes.generic import (\n ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCMultiIndex, ABCIndexClass)\n\nfrom pandas.io.formats.printing import pprint_thing\n\nfrom pandas.plotting._compat import _mpl_ge_3_0_0\nfrom pandas.plotting._style import (plot_params,\n _get_standard_colors)\nfrom pandas.plotting._tools import (_subplots, _flatten, table,\n _handle_shared_axes, _get_all_lines,\n _get_xlim, _set_ticks_props,\n format_date_labels)\n\ntry:\n from pandas.plotting import _converter\nexcept ImportError:\n _HAS_MPL = False\nelse:\n _HAS_MPL = True\n if get_option('plotting.matplotlib.register_converters'):\n _converter.register(explicit=True)\n\n\ndef _raise_if_no_mpl():\n # TODO(mpl_converter): remove once converter is explicit\n if not _HAS_MPL:\n raise ImportError(\"matplotlib is required for plotting.\")\n\n\ndef _get_standard_kind(kind):\n return {'density': 'kde'}.get(kind, kind)\n\n\ndef _gca(rc=None):\n import matplotlib.pyplot as plt\n with plt.rc_context(rc):\n return plt.gca()\n\n\ndef _gcf():\n import matplotlib.pyplot as plt\n return plt.gcf()\n\n\nclass MPLPlot(object):\n \"\"\"\n Base class for assembling a pandas plot using matplotlib\n\n Parameters\n ----------\n data :\n\n \"\"\"\n\n @property\n def _kind(self):\n \"\"\"Specify kind str. Must be overridden in child class\"\"\"\n raise NotImplementedError\n\n _layout_type = 'vertical'\n _default_rot = 0\n orientation = None\n _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',\n 'mark_right', 'stacked']\n _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,\n 'mark_right': True, 'stacked': False}\n\n def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,\n sharey=False, use_index=True,\n figsize=None, grid=None, legend=True, rot=None,\n ax=None, fig=None, title=None, xlim=None, ylim=None,\n xticks=None, yticks=None,\n sort_columns=False, fontsize=None,\n secondary_y=False, colormap=None,\n table=False, layout=None, **kwds):\n\n _raise_if_no_mpl()\n _converter._WARN = False\n self.data = data\n self.by = by\n\n self.kind = kind\n\n self.sort_columns = sort_columns\n\n self.subplots = subplots\n\n if sharex is None:\n if ax is None:\n self.sharex = True\n else:\n # if we get an axis, the users should do the visibility\n # setting...\n self.sharex = False\n else:\n self.sharex = sharex\n\n self.sharey = sharey\n self.figsize = figsize\n self.layout = layout\n\n self.xticks = xticks\n self.yticks = yticks\n self.xlim = xlim\n self.ylim = ylim\n self.title = title\n self.use_index = use_index\n\n self.fontsize = fontsize\n\n if rot is not None:\n self.rot = rot\n # need to know for format_date_labels since it's rotated to 30 by\n # default\n self._rot_set = True\n else:\n self._rot_set = False\n self.rot = self._default_rot\n\n if grid is None:\n grid = False if secondary_y else self.plt.rcParams['axes.grid']\n\n self.grid = grid\n self.legend = legend\n self.legend_handles = []\n self.legend_labels = []\n\n for attr in self._pop_attributes:\n value = kwds.pop(attr, self._attr_defaults.get(attr, None))\n setattr(self, attr, value)\n\n self.ax = ax\n self.fig = fig\n self.axes = None\n\n # parse errorbar input if given\n xerr = kwds.pop('xerr', None)\n yerr = kwds.pop('yerr', None)\n self.errors = {}\n for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):\n self.errors[kw] = self._parse_errorbars(kw, err)\n\n if not isinstance(secondary_y, (bool, tuple, list,\n np.ndarray, ABCIndexClass)):\n secondary_y = [secondary_y]\n self.secondary_y = secondary_y\n\n # ugly TypeError if user passes matplotlib's `cmap` name.\n # Probably better to accept either.\n if 'cmap' in kwds and colormap:\n raise TypeError(\"Only specify one of `cmap` and `colormap`.\")\n elif 'cmap' in kwds:\n self.colormap = kwds.pop('cmap')\n else:\n self.colormap = colormap\n\n self.table = table\n\n self.kwds = kwds\n\n self._validate_color_args()\n\n def _validate_color_args(self):\n if 'color' not in self.kwds and 'colors' in self.kwds:\n warnings.warn((\"'colors' is being deprecated. Please use 'color'\"\n \"instead of 'colors'\"))\n colors = self.kwds.pop('colors')\n self.kwds['color'] = colors\n\n if ('color' in self.kwds and self.nseries == 1 and\n not is_list_like(self.kwds['color'])):\n # support series.plot(color='green')\n self.kwds['color'] = [self.kwds['color']]\n\n if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and\n self.nseries == 1 and len(self.kwds['color']) in (3, 4)):\n # support RGB and RGBA tuples in series plot\n self.kwds['color'] = [self.kwds['color']]\n\n if ('color' in self.kwds or 'colors' in self.kwds) and \\\n self.colormap is not None:\n warnings.warn(\"'color' and 'colormap' cannot be used \"\n \"simultaneously. Using 'color'\")\n\n if 'color' in self.kwds and self.style is not None:\n if is_list_like(self.style):\n styles = self.style\n else:\n styles = [self.style]\n # need only a single match\n for s in styles:\n if re.match('^[a-z]+?', s) is not None:\n raise ValueError(\n \"Cannot pass 'style' string with a color \"\n \"symbol and 'color' keyword argument. Please\"\n \" use one or the other or pass 'style' \"\n \"without a color symbol\")\n\n def _iter_data(self, data=None, keep_index=False, fillna=None):\n if data is None:\n data = self.data\n if fillna is not None:\n data = data.fillna(fillna)\n\n # TODO: unused?\n # if self.sort_columns:\n # columns = com.try_sort(data.columns)\n # else:\n # columns = data.columns\n\n for col, values in data.iteritems():\n if keep_index is True:\n yield col, values\n else:\n yield col, values.values\n\n @property\n def nseries(self):\n if self.data.ndim == 1:\n return 1\n else:\n return self.data.shape[1]\n\n def draw(self):\n self.plt.draw_if_interactive()\n\n def generate(self):\n self._args_adjust()\n self._compute_plot_data()\n self._setup_subplots()\n self._make_plot()\n self._add_table()\n self._make_legend()\n self._adorn_subplots()\n\n for ax in self.axes:\n self._post_plot_logic_common(ax, self.data)\n self._post_plot_logic(ax, self.data)\n\n def _args_adjust(self):\n pass\n\n def _has_plotted_object(self, ax):\n \"\"\"check whether ax has data\"\"\"\n return (len(ax.lines) != 0 or\n len(ax.artists) != 0 or\n len(ax.containers) != 0)\n\n def _maybe_right_yaxis(self, ax, axes_num):\n if not self.on_right(axes_num):\n # secondary axes may be passed via ax kw\n return self._get_ax_layer(ax)\n\n if hasattr(ax, 'right_ax'):\n # if it has right_ax proparty, ``ax`` must be left axes\n return ax.right_ax\n elif hasattr(ax, 'left_ax'):\n # if it has left_ax proparty, ``ax`` must be right axes\n return ax\n else:\n # otherwise, create twin axes\n orig_ax, new_ax = ax, ax.twinx()\n # TODO: use Matplotlib public API when available\n new_ax._get_lines = orig_ax._get_lines\n new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill\n orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax\n\n if not self._has_plotted_object(orig_ax): # no data on left y\n orig_ax.get_yaxis().set_visible(False)\n return new_ax\n\n def _setup_subplots(self):\n if self.subplots:\n fig, axes = _subplots(naxes=self.nseries,\n sharex=self.sharex, sharey=self.sharey,\n figsize=self.figsize, ax=self.ax,\n layout=self.layout,\n layout_type=self._layout_type)\n else:\n if self.ax is None:\n fig = self.plt.figure(figsize=self.figsize)\n axes = fig.add_subplot(111)\n else:\n fig = self.ax.get_figure()\n if self.figsize is not None:\n fig.set_size_inches(self.figsize)\n axes = self.ax\n\n axes = _flatten(axes)\n\n if self.logx or self.loglog:\n [a.set_xscale('log') for a in axes]\n if self.logy or self.loglog:\n [a.set_yscale('log') for a in axes]\n\n self.fig = fig\n self.axes = axes\n\n @property\n def result(self):\n \"\"\"\n Return result axes\n \"\"\"\n if self.subplots:\n if self.layout is not None and not is_list_like(self.ax):\n return self.axes.reshape(*self.layout)\n else:\n return self.axes\n else:\n sec_true = isinstance(self.secondary_y, bool) and self.secondary_y\n all_sec = (is_list_like(self.secondary_y) and\n len(self.secondary_y) == self.nseries)\n if (sec_true or all_sec):\n # if all data is plotted on secondary, return right axes\n return self._get_ax_layer(self.axes[0], primary=False)\n else:\n return self.axes[0]\n\n def _compute_plot_data(self):\n data = self.data\n\n if isinstance(data, ABCSeries):\n label = self.label\n if label is None and data.name is None:\n label = 'None'\n data = data.to_frame(name=label)\n\n # GH16953, _convert is needed as fallback, for ``Series``\n # with ``dtype == object``\n data = data._convert(datetime=True, timedelta=True)\n numeric_data = data.select_dtypes(include=[np.number,\n \"datetime\",\n \"datetimetz\",\n \"timedelta\"])\n\n try:\n is_empty = numeric_data.empty\n except AttributeError:\n is_empty = not len(numeric_data)\n\n # no empty frames or series allowed\n if is_empty:\n raise TypeError('Empty {0!r}: no numeric data to '\n 'plot'.format(numeric_data.__class__.__name__))\n\n self.data = numeric_data\n\n def _make_plot(self):\n raise AbstractMethodError(self)\n\n def _add_table(self):\n if self.table is False:\n return\n elif self.table is True:\n data = self.data.transpose()\n else:\n data = self.table\n ax = self._get_ax(0)\n table(ax, data)\n\n def _post_plot_logic_common(self, ax, data):\n \"\"\"Common post process for each axes\"\"\"\n\n def get_label(i):\n try:\n return pprint_thing(data.index[i])\n except Exception:\n return ''\n\n if self.orientation == 'vertical' or self.orientation is None:\n if self._need_to_set_index:\n xticklabels = [get_label(x) for x in ax.get_xticks()]\n ax.set_xticklabels(xticklabels)\n self._apply_axis_properties(ax.xaxis, rot=self.rot,\n fontsize=self.fontsize)\n self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)\n\n if hasattr(ax, 'right_ax'):\n self._apply_axis_properties(ax.right_ax.yaxis,\n fontsize=self.fontsize)\n\n elif self.orientation == 'horizontal':\n if self._need_to_set_index:\n yticklabels = [get_label(y) for y in ax.get_yticks()]\n ax.set_yticklabels(yticklabels)\n self._apply_axis_properties(ax.yaxis, rot=self.rot,\n fontsize=self.fontsize)\n self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)\n\n if hasattr(ax, 'right_ax'):\n self._apply_axis_properties(ax.right_ax.yaxis,\n fontsize=self.fontsize)\n else: # pragma no cover\n raise ValueError\n\n def _post_plot_logic(self, ax, data):\n \"\"\"Post process for each axes. Overridden in child classes\"\"\"\n pass\n\n def _adorn_subplots(self):\n \"\"\"Common post process unrelated to data\"\"\"\n if len(self.axes) > 0:\n all_axes = self._get_subplots()\n nrows, ncols = self._get_axes_layout()\n _handle_shared_axes(axarr=all_axes, nplots=len(all_axes),\n naxes=nrows * ncols, nrows=nrows,\n ncols=ncols, sharex=self.sharex,\n sharey=self.sharey)\n\n for ax in self.axes:\n if self.yticks is not None:\n ax.set_yticks(self.yticks)\n\n if self.xticks is not None:\n ax.set_xticks(self.xticks)\n\n if self.ylim is not None:\n ax.set_ylim(self.ylim)\n\n if self.xlim is not None:\n ax.set_xlim(self.xlim)\n\n ax.grid(self.grid)\n\n if self.title:\n if self.subplots:\n if is_list_like(self.title):\n if len(self.title) != self.nseries:\n msg = ('The length of `title` must equal the number '\n 'of columns if using `title` of type `list` '\n 'and `subplots=True`.\\n'\n 'length of title = {}\\n'\n 'number of columns = {}').format(\n len(self.title), self.nseries)\n raise ValueError(msg)\n\n for (ax, title) in zip(self.axes, self.title):\n ax.set_title(title)\n else:\n self.fig.suptitle(self.title)\n else:\n if is_list_like(self.title):\n msg = ('Using `title` of type `list` is not supported '\n 'unless `subplots=True` is passed')\n raise ValueError(msg)\n self.axes[0].set_title(self.title)\n\n def _apply_axis_properties(self, axis, rot=None, fontsize=None):\n labels = axis.get_majorticklabels() + axis.get_minorticklabels()\n for label in labels:\n if rot is not None:\n label.set_rotation(rot)\n if fontsize is not None:\n label.set_fontsize(fontsize)\n\n @property\n def legend_title(self):\n if not isinstance(self.data.columns, ABCMultiIndex):\n name = self.data.columns.name\n if name is not None:\n name = pprint_thing(name)\n return name\n else:\n stringified = map(pprint_thing,\n self.data.columns.names)\n return ','.join(stringified)\n\n def _add_legend_handle(self, handle, label, index=None):\n if label is not None:\n if self.mark_right and index is not None:\n if self.on_right(index):\n label = label + ' (right)'\n self.legend_handles.append(handle)\n self.legend_labels.append(label)\n\n def _make_legend(self):\n ax, leg = self._get_ax_legend(self.axes[0])\n\n handles = []\n labels = []\n title = ''\n\n if not self.subplots:\n if leg is not None:\n title = leg.get_title().get_text()\n handles = leg.legendHandles\n labels = [x.get_text() for x in leg.get_texts()]\n\n if self.legend:\n if self.legend == 'reverse':\n self.legend_handles = reversed(self.legend_handles)\n self.legend_labels = reversed(self.legend_labels)\n\n handles += self.legend_handles\n labels += self.legend_labels\n if self.legend_title is not None:\n title = self.legend_title\n\n if len(handles) > 0:\n ax.legend(handles, labels, loc='best', title=title)\n\n elif self.subplots and self.legend:\n for ax in self.axes:\n if ax.get_visible():\n ax.legend(loc='best')\n\n def _get_ax_legend(self, ax):\n leg = ax.get_legend()\n other_ax = (getattr(ax, 'left_ax', None) or\n getattr(ax, 'right_ax', None))\n other_leg = None\n if other_ax is not None:\n other_leg = other_ax.get_legend()\n if leg is None and other_leg is not None:\n leg = other_leg\n ax = other_ax\n return ax, leg\n\n @cache_readonly\n def plt(self):\n import matplotlib.pyplot as plt\n return plt\n\n _need_to_set_index = False\n\n def _get_xticks(self, convert_period=False):\n index = self.data.index\n is_datetype = index.inferred_type in ('datetime', 'date',\n 'datetime64', 'time')\n\n if self.use_index:\n if convert_period and isinstance(index, ABCPeriodIndex):\n self.data = self.data.reindex(index=index.sort_values())\n x = self.data.index.to_timestamp()._mpl_repr()\n elif index.is_numeric():\n \"\"\"\n Matplotlib supports numeric values or datetime objects as\n xaxis values. Taking LBYL approach here, by the time\n matplotlib raises exception when using non numeric/datetime\n values for xaxis, several actions are already taken by plt.\n \"\"\"\n x = index._mpl_repr()\n elif is_datetype:\n self.data = self.data[notna(self.data.index)]\n self.data = self.data.sort_index()\n x = self.data.index._mpl_repr()\n else:\n self._need_to_set_index = True\n x = lrange(len(index))\n else:\n x = lrange(len(index))\n\n return x\n\n @classmethod\n def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):\n mask = isna(y)\n if mask.any():\n y = np.ma.array(y)\n y = np.ma.masked_where(mask, y)\n\n if isinstance(x, ABCIndexClass):\n x = x._mpl_repr()\n\n if is_errorbar:\n if 'xerr' in kwds:\n kwds['xerr'] = np.array(kwds.get('xerr'))\n if 'yerr' in kwds:\n kwds['yerr'] = np.array(kwds.get('yerr'))\n return ax.errorbar(x, y, **kwds)\n else:\n # prevent style kwarg from going to errorbar, where it is\n # unsupported\n if style is not None:\n args = (x, y, style)\n else:\n args = (x, y)\n return ax.plot(*args, **kwds)\n\n def _get_index_name(self):\n if isinstance(self.data.index, ABCMultiIndex):\n name = self.data.index.names\n if com._any_not_none(*name):\n name = ','.join(pprint_thing(x) for x in name)\n else:\n name = None\n else:\n name = self.data.index.name\n if name is not None:\n name = pprint_thing(name)\n\n return name\n\n @classmethod\n def _get_ax_layer(cls, ax, primary=True):\n \"\"\"get left (primary) or right (secondary) axes\"\"\"\n if primary:\n return getattr(ax, 'left_ax', ax)\n else:\n return getattr(ax, 'right_ax', ax)\n\n def _get_ax(self, i):\n # get the twinx ax if appropriate\n if self.subplots:\n ax = self.axes[i]\n ax = self._maybe_right_yaxis(ax, i)\n self.axes[i] = ax\n else:\n ax = self.axes[0]\n ax = self._maybe_right_yaxis(ax, i)\n\n ax.get_yaxis().set_visible(True)\n return ax\n\n def on_right(self, i):\n if isinstance(self.secondary_y, bool):\n return self.secondary_y\n\n if isinstance(self.secondary_y, (tuple, list,\n np.ndarray, ABCIndexClass)):\n return self.data.columns[i] in self.secondary_y\n\n def _apply_style_colors(self, colors, kwds, col_num, label):\n \"\"\"\n Manage style and color based on column number and its label.\n Returns tuple of appropriate style and kwds which \"color\" may be added.\n \"\"\"\n style = None\n if self.style is not None:\n if isinstance(self.style, list):\n try:\n style = self.style[col_num]\n except IndexError:\n pass\n elif isinstance(self.style, dict):\n style = self.style.get(label, style)\n else:\n style = self.style\n\n has_color = 'color' in kwds or self.colormap is not None\n nocolor_style = style is None or re.match('[a-z]+', style) is None\n if (has_color or self.subplots) and nocolor_style:\n kwds['color'] = colors[col_num % len(colors)]\n return style, kwds\n\n def _get_colors(self, num_colors=None, color_kwds='color'):\n if num_colors is None:\n num_colors = self.nseries\n\n return _get_standard_colors(num_colors=num_colors,\n colormap=self.colormap,\n color=self.kwds.get(color_kwds))\n\n def _parse_errorbars(self, label, err):\n \"\"\"\n Look for error keyword arguments and return the actual errorbar data\n or return the error DataFrame/dict\n\n Error bars can be specified in several ways:\n Series: the user provides a pandas.Series object of the same\n length as the data\n ndarray: provides a np.ndarray of the same length as the data\n DataFrame/dict: error values are paired with keys matching the\n key in the plotted DataFrame\n str: the name of the column within the plotted DataFrame\n \"\"\"\n\n if err is None:\n return None\n\n def match_labels(data, e):\n e = e.reindex(data.index)\n return e\n\n # key-matched DataFrame\n if isinstance(err, ABCDataFrame):\n\n err = match_labels(self.data, err)\n # key-matched dict\n elif isinstance(err, dict):\n pass\n\n # Series of error values\n elif isinstance(err, ABCSeries):\n # broadcast error series across data\n err = match_labels(self.data, err)\n err = np.atleast_2d(err)\n err = np.tile(err, (self.nseries, 1))\n\n # errors are a column in the dataframe\n elif isinstance(err, string_types):\n evalues = self.data[err].values\n self.data = self.data[self.data.columns.drop(err)]\n err = np.atleast_2d(evalues)\n err = np.tile(err, (self.nseries, 1))\n\n elif is_list_like(err):\n if is_iterator(err):\n err = np.atleast_2d(list(err))\n else:\n # raw error values\n err = np.atleast_2d(err)\n\n err_shape = err.shape\n\n # asymmetrical error bars\n if err.ndim == 3:\n if (err_shape[0] != self.nseries) or \\\n (err_shape[1] != 2) or \\\n (err_shape[2] != len(self.data)):\n msg = \"Asymmetrical error bars should be provided \" + \\\n \"with the shape (%u, 2, %u)\" % \\\n (self.nseries, len(self.data))\n raise ValueError(msg)\n\n # broadcast errors to each data series\n if len(err) == 1:\n err = np.tile(err, (self.nseries, 1))\n\n elif is_number(err):\n err = np.tile([err], (self.nseries, len(self.data)))\n\n else:\n msg = \"No valid {label} detected\".format(label=label)\n raise ValueError(msg)\n\n return err\n\n def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):\n errors = {}\n\n for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):\n if flag:\n err = self.errors[kw]\n # user provided label-matched dataframe of errors\n if isinstance(err, (ABCDataFrame, dict)):\n if label is not None and label in err.keys():\n err = err[label]\n else:\n err = None\n elif index is not None and err is not None:\n err = err[index]\n\n if err is not None:\n errors[kw] = err\n return errors\n\n def _get_subplots(self):\n from matplotlib.axes import Subplot\n return [ax for ax in self.axes[0].get_figure().get_axes()\n if isinstance(ax, Subplot)]\n\n def _get_axes_layout(self):\n axes = self._get_subplots()\n x_set = set()\n y_set = set()\n for ax in axes:\n # check axes coordinates to estimate layout\n points = ax.get_position().get_points()\n x_set.add(points[0][0])\n y_set.add(points[0][1])\n return (len(y_set), len(x_set))\n\n\nclass PlanePlot(MPLPlot):\n \"\"\"\n Abstract class for plotting on plane, currently scatter and hexbin.\n \"\"\"\n\n _layout_type = 'single'\n\n def __init__(self, data, x, y, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n if x is None or y is None:\n raise ValueError(self._kind + ' requires an x and y column')\n if is_integer(x) and not self.data.columns.holds_integer():\n x = self.data.columns[x]\n if is_integer(y) and not self.data.columns.holds_integer():\n y = self.data.columns[y]\n if len(self.data[x]._get_numeric_data()) == 0:\n raise ValueError(self._kind + ' requires x column to be numeric')\n if len(self.data[y]._get_numeric_data()) == 0:\n raise ValueError(self._kind + ' requires y column to be numeric')\n\n self.x = x\n self.y = y\n\n @property\n def nseries(self):\n return 1\n\n def _post_plot_logic(self, ax, data):\n x, y = self.x, self.y\n ax.set_ylabel(pprint_thing(y))\n ax.set_xlabel(pprint_thing(x))\n\n def _plot_colorbar(self, ax, **kwds):\n # Addresses issues #10611 and #10678:\n # When plotting scatterplots and hexbinplots in IPython\n # inline backend the colorbar axis height tends not to\n # exactly match the parent axis height.\n # The difference is due to small fractional differences\n # in floating points with similar representation.\n # To deal with this, this method forces the colorbar\n # height to take the height of the parent axes.\n # For a more detailed description of the issue\n # see the following link:\n # https://github.com/ipython/ipython/issues/11215\n img = ax.collections[0]\n cbar = self.fig.colorbar(img, ax=ax, **kwds)\n\n if _mpl_ge_3_0_0():\n # The workaround below is no longer necessary.\n return\n\n points = ax.get_position().get_points()\n cbar_points = cbar.ax.get_position().get_points()\n\n cbar.ax.set_position([cbar_points[0, 0],\n points[0, 1],\n cbar_points[1, 0] - cbar_points[0, 0],\n points[1, 1] - points[0, 1]])\n # To see the discrepancy in axis heights uncomment\n # the following two lines:\n # print(points[1, 1] - points[0, 1])\n # print(cbar_points[1, 1] - cbar_points[0, 1])\n\n\nclass ScatterPlot(PlanePlot):\n _kind = 'scatter'\n\n def __init__(self, data, x, y, s=None, c=None, **kwargs):\n if s is None:\n # hide the matplotlib default for size, in case we want to change\n # the handling of this argument later\n s = 20\n super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)\n if is_integer(c) and not self.data.columns.holds_integer():\n c = self.data.columns[c]\n self.c = c\n\n def _make_plot(self):\n x, y, c, data = self.x, self.y, self.c, self.data\n ax = self.axes[0]\n\n c_is_column = is_hashable(c) and c in self.data.columns\n\n # plot a colorbar only if a colormap is provided or necessary\n cb = self.kwds.pop('colorbar', self.colormap or c_is_column)\n\n # pandas uses colormap, matplotlib uses cmap.\n cmap = self.colormap or 'Greys'\n cmap = self.plt.cm.get_cmap(cmap)\n color = self.kwds.pop(\"color\", None)\n if c is not None and color is not None:\n raise TypeError('Specify exactly one of `c` and `color`')\n elif c is None and color is None:\n c_values = self.plt.rcParams['patch.facecolor']\n elif color is not None:\n c_values = color\n elif c_is_column:\n c_values = self.data[c].values\n else:\n c_values = c\n\n if self.legend and hasattr(self, 'label'):\n label = self.label\n else:\n label = None\n scatter = ax.scatter(data[x].values, data[y].values, c=c_values,\n label=label, cmap=cmap, **self.kwds)\n if cb:\n cbar_label = c if c_is_column else ''\n self._plot_colorbar(ax, label=cbar_label)\n\n if label is not None:\n self._add_legend_handle(scatter, label)\n else:\n self.legend = False\n\n errors_x = self._get_errorbars(label=x, index=0, yerr=False)\n errors_y = self._get_errorbars(label=y, index=0, xerr=False)\n if len(errors_x) > 0 or len(errors_y) > 0:\n err_kwds = dict(errors_x, **errors_y)\n err_kwds['ecolor'] = scatter.get_facecolor()[0]\n ax.errorbar(data[x].values, data[y].values,\n linestyle='none', **err_kwds)\n\n\nclass HexBinPlot(PlanePlot):\n _kind = 'hexbin'\n\n def __init__(self, data, x, y, C=None, **kwargs):\n super(HexBinPlot, self).__init__(data, x, y, **kwargs)\n if is_integer(C) and not self.data.columns.holds_integer():\n C = self.data.columns[C]\n self.C = C\n\n def _make_plot(self):\n x, y, data, C = self.x, self.y, self.data, self.C\n ax = self.axes[0]\n # pandas uses colormap, matplotlib uses cmap.\n cmap = self.colormap or 'BuGn'\n cmap = self.plt.cm.get_cmap(cmap)\n cb = self.kwds.pop('colorbar', True)\n\n if C is None:\n c_values = None\n else:\n c_values = data[C].values\n\n ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,\n **self.kwds)\n if cb:\n self._plot_colorbar(ax)\n\n def _make_legend(self):\n pass\n\n\nclass LinePlot(MPLPlot):\n _kind = 'line'\n _default_rot = 0\n orientation = 'vertical'\n\n def __init__(self, data, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n if self.stacked:\n self.data = self.data.fillna(value=0)\n self.x_compat = plot_params['x_compat']\n if 'x_compat' in self.kwds:\n self.x_compat = bool(self.kwds.pop('x_compat'))\n\n def _is_ts_plot(self):\n # this is slightly deceptive\n return not self.x_compat and self.use_index and self._use_dynamic_x()\n\n def _use_dynamic_x(self):\n from pandas.plotting._timeseries import _use_dynamic_x\n return _use_dynamic_x(self._get_ax(0), self.data)\n\n def _make_plot(self):\n if self._is_ts_plot():\n from pandas.plotting._timeseries import _maybe_convert_index\n data = _maybe_convert_index(self._get_ax(0), self.data)\n\n x = data.index # dummy, not used\n plotf = self._ts_plot\n it = self._iter_data(data=data, keep_index=True)\n else:\n x = self._get_xticks(convert_period=True)\n plotf = self._plot\n it = self._iter_data()\n\n stacking_id = self._get_stacking_id()\n is_errorbar = com._any_not_none(*self.errors.values())\n\n colors = self._get_colors()\n for i, (label, y) in enumerate(it):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n style, kwds = self._apply_style_colors(colors, kwds, i, label)\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label) # .encode('utf-8')\n kwds['label'] = label\n\n newlines = plotf(ax, x, y, style=style, column_num=i,\n stacking_id=stacking_id,\n is_errorbar=is_errorbar,\n **kwds)\n self._add_legend_handle(newlines[0], label, index=i)\n\n lines = _get_all_lines(ax)\n left, right = _get_xlim(lines)\n ax.set_xlim(left, right)\n\n @classmethod\n def _plot(cls, ax, x, y, style=None, column_num=None,\n stacking_id=None, **kwds):\n # column_num is used to get the target column from protf in line and\n # area plots\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(y))\n y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])\n lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)\n cls._update_stacker(ax, stacking_id, y)\n return lines\n\n @classmethod\n def _ts_plot(cls, ax, x, data, style=None, **kwds):\n from pandas.plotting._timeseries import (_maybe_resample,\n _decorate_axes,\n format_dateaxis)\n # accept x to be consistent with normal plot func,\n # x is not passed to tsplot as it uses data.index as x coordinate\n # column_num must be in kwds for stacking purpose\n freq, data = _maybe_resample(data, ax, kwds)\n\n # Set ax with freq info\n _decorate_axes(ax, freq, kwds)\n # digging deeper\n if hasattr(ax, 'left_ax'):\n _decorate_axes(ax.left_ax, freq, kwds)\n if hasattr(ax, 'right_ax'):\n _decorate_axes(ax.right_ax, freq, kwds)\n ax._plot_data.append((data, cls._kind, kwds))\n\n lines = cls._plot(ax, data.index, data.values, style=style, **kwds)\n # set date formatter, locators and rescale limits\n format_dateaxis(ax, ax.freq, data.index)\n return lines\n\n def _get_stacking_id(self):\n if self.stacked:\n return id(self.data)\n else:\n return None\n\n @classmethod\n def _initialize_stacker(cls, ax, stacking_id, n):\n if stacking_id is None:\n return\n if not hasattr(ax, '_stacker_pos_prior'):\n ax._stacker_pos_prior = {}\n if not hasattr(ax, '_stacker_neg_prior'):\n ax._stacker_neg_prior = {}\n ax._stacker_pos_prior[stacking_id] = np.zeros(n)\n ax._stacker_neg_prior[stacking_id] = np.zeros(n)\n\n @classmethod\n def _get_stacked_values(cls, ax, stacking_id, values, label):\n if stacking_id is None:\n return values\n if not hasattr(ax, '_stacker_pos_prior'):\n # stacker may not be initialized for subplots\n cls._initialize_stacker(ax, stacking_id, len(values))\n\n if (values >= 0).all():\n return ax._stacker_pos_prior[stacking_id] + values\n elif (values <= 0).all():\n return ax._stacker_neg_prior[stacking_id] + values\n\n raise ValueError('When stacked is True, each column must be either '\n 'all positive or negative.'\n '{0} contains both positive and negative values'\n .format(label))\n\n @classmethod\n def _update_stacker(cls, ax, stacking_id, values):\n if stacking_id is None:\n return\n if (values >= 0).all():\n ax._stacker_pos_prior[stacking_id] += values\n elif (values <= 0).all():\n ax._stacker_neg_prior[stacking_id] += values\n\n def _post_plot_logic(self, ax, data):\n condition = (not self._use_dynamic_x() and\n data.index.is_all_dates and\n not self.subplots or\n (self.subplots and self.sharex))\n\n index_name = self._get_index_name()\n\n if condition:\n # irregular TS rotated 30 deg. by default\n # probably a better place to check / set this.\n if not self._rot_set:\n self.rot = 30\n format_date_labels(ax, rot=self.rot)\n\n if index_name is not None and self.use_index:\n ax.set_xlabel(index_name)\n\n\nclass AreaPlot(LinePlot):\n _kind = 'area'\n\n def __init__(self, data, **kwargs):\n kwargs.setdefault('stacked', True)\n data = data.fillna(value=0)\n LinePlot.__init__(self, data, **kwargs)\n\n if not self.stacked:\n # use smaller alpha to distinguish overlap\n self.kwds.setdefault('alpha', 0.5)\n\n if self.logy or self.loglog:\n raise ValueError(\"Log-y scales are not supported in area plot\")\n\n @classmethod\n def _plot(cls, ax, x, y, style=None, column_num=None,\n stacking_id=None, is_errorbar=False, **kwds):\n\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(y))\n y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])\n\n # need to remove label, because subplots uses mpl legend as it is\n line_kwds = kwds.copy()\n line_kwds.pop('label')\n lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)\n\n # get data from the line to get coordinates for fill_between\n xdata, y_values = lines[0].get_data(orig=False)\n\n # unable to use ``_get_stacked_values`` here to get starting point\n if stacking_id is None:\n start = np.zeros(len(y))\n elif (y >= 0).all():\n start = ax._stacker_pos_prior[stacking_id]\n elif (y <= 0).all():\n start = ax._stacker_neg_prior[stacking_id]\n else:\n start = np.zeros(len(y))\n\n if 'color' not in kwds:\n kwds['color'] = lines[0].get_color()\n\n rect = ax.fill_between(xdata, start, y_values, **kwds)\n cls._update_stacker(ax, stacking_id, y)\n\n # LinePlot expects list of artists\n res = [rect]\n return res\n\n def _post_plot_logic(self, ax, data):\n LinePlot._post_plot_logic(self, ax, data)\n\n if self.ylim is None:\n if (data >= 0).all().all():\n ax.set_ylim(0, None)\n elif (data <= 0).all().all():\n ax.set_ylim(None, 0)\n\n\nclass BarPlot(MPLPlot):\n _kind = 'bar'\n _default_rot = 90\n orientation = 'vertical'\n\n def __init__(self, data, **kwargs):\n # we have to treat a series differently than a\n # 1-column DataFrame w.r.t. color handling\n self._is_series = isinstance(data, ABCSeries)\n self.bar_width = kwargs.pop('width', 0.5)\n pos = kwargs.pop('position', 0.5)\n kwargs.setdefault('align', 'center')\n self.tick_pos = np.arange(len(data))\n\n self.bottom = kwargs.pop('bottom', 0)\n self.left = kwargs.pop('left', 0)\n\n self.log = kwargs.pop('log', False)\n MPLPlot.__init__(self, data, **kwargs)\n\n if self.stacked or self.subplots:\n self.tickoffset = self.bar_width * pos\n if kwargs['align'] == 'edge':\n self.lim_offset = self.bar_width / 2\n else:\n self.lim_offset = 0\n else:\n if kwargs['align'] == 'edge':\n w = self.bar_width / self.nseries\n self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5\n self.lim_offset = w * 0.5\n else:\n self.tickoffset = self.bar_width * pos\n self.lim_offset = 0\n\n self.ax_pos = self.tick_pos - self.tickoffset\n\n def _args_adjust(self):\n if is_list_like(self.bottom):\n self.bottom = np.array(self.bottom)\n if is_list_like(self.left):\n self.left = np.array(self.left)\n\n @classmethod\n def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):\n return ax.bar(x, y, w, bottom=start, log=log, **kwds)\n\n @property\n def _start_base(self):\n return self.bottom\n\n def _make_plot(self):\n import matplotlib as mpl\n\n colors = self._get_colors()\n ncolors = len(colors)\n\n pos_prior = neg_prior = np.zeros(len(self.data))\n K = self.nseries\n\n for i, (label, y) in enumerate(self._iter_data(fillna=0)):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n if self._is_series:\n kwds['color'] = colors\n else:\n kwds['color'] = colors[i % ncolors]\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label)\n\n if (('yerr' in kwds) or ('xerr' in kwds)) \\\n and (kwds.get('ecolor') is None):\n kwds['ecolor'] = mpl.rcParams['xtick.color']\n\n start = 0\n if self.log and (y >= 1).all():\n start = 1\n start = start + self._start_base\n\n if self.subplots:\n w = self.bar_width / 2\n rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,\n start=start, label=label,\n log=self.log, **kwds)\n ax.set_title(label)\n elif self.stacked:\n mask = y > 0\n start = np.where(mask, pos_prior, neg_prior) + self._start_base\n w = self.bar_width / 2\n rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,\n start=start, label=label,\n log=self.log, **kwds)\n pos_prior = pos_prior + np.where(mask, y, 0)\n neg_prior = neg_prior + np.where(mask, 0, y)\n else:\n w = self.bar_width / K\n rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,\n start=start, label=label,\n log=self.log, **kwds)\n self._add_legend_handle(rect, label, index=i)\n\n def _post_plot_logic(self, ax, data):\n if self.use_index:\n str_index = [pprint_thing(key) for key in data.index]\n else:\n str_index = [pprint_thing(key) for key in range(data.shape[0])]\n name = self._get_index_name()\n\n s_edge = self.ax_pos[0] - 0.25 + self.lim_offset\n e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset\n\n self._decorate_ticks(ax, name, str_index, s_edge, e_edge)\n\n def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):\n ax.set_xlim((start_edge, end_edge))\n ax.set_xticks(self.tick_pos)\n ax.set_xticklabels(ticklabels)\n if name is not None and self.use_index:\n ax.set_xlabel(name)\n\n\nclass BarhPlot(BarPlot):\n _kind = 'barh'\n _default_rot = 0\n orientation = 'horizontal'\n\n @property\n def _start_base(self):\n return self.left\n\n @classmethod\n def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):\n return ax.barh(x, y, w, left=start, log=log, **kwds)\n\n def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):\n # horizontal bars\n ax.set_ylim((start_edge, end_edge))\n ax.set_yticks(self.tick_pos)\n ax.set_yticklabels(ticklabels)\n if name is not None and self.use_index:\n ax.set_ylabel(name)\n\n\nclass HistPlot(LinePlot):\n _kind = 'hist'\n\n def __init__(self, data, bins=10, bottom=0, **kwargs):\n self.bins = bins # use mpl default\n self.bottom = bottom\n # Do not call LinePlot.__init__ which may fill nan\n MPLPlot.__init__(self, data, **kwargs)\n\n def _args_adjust(self):\n if is_integer(self.bins):\n # create common bin edge\n values = (self.data._convert(datetime=True)._get_numeric_data())\n values = np.ravel(values)\n values = values[~isna(values)]\n\n hist, self.bins = np.histogram(\n values, bins=self.bins,\n range=self.kwds.get('range', None),\n weights=self.kwds.get('weights', None))\n\n if is_list_like(self.bottom):\n self.bottom = np.array(self.bottom)\n\n @classmethod\n def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,\n stacking_id=None, **kwds):\n if column_num == 0:\n cls._initialize_stacker(ax, stacking_id, len(bins) - 1)\n y = y[~isna(y)]\n\n base = np.zeros(len(bins) - 1)\n bottom = bottom + \\\n cls._get_stacked_values(ax, stacking_id, base, kwds['label'])\n # ignore style\n n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)\n cls._update_stacker(ax, stacking_id, n)\n return patches\n\n def _make_plot(self):\n colors = self._get_colors()\n stacking_id = self._get_stacking_id()\n\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n\n kwds = self.kwds.copy()\n\n label = pprint_thing(label)\n kwds['label'] = label\n\n style, kwds = self._apply_style_colors(colors, kwds, i, label)\n if style is not None:\n kwds['style'] = style\n\n kwds = self._make_plot_keywords(kwds, y)\n artists = self._plot(ax, y, column_num=i,\n stacking_id=stacking_id, **kwds)\n self._add_legend_handle(artists[0], label, index=i)\n\n def _make_plot_keywords(self, kwds, y):\n \"\"\"merge BoxPlot/KdePlot properties to passed kwds\"\"\"\n # y is required for KdePlot\n kwds['bottom'] = self.bottom\n kwds['bins'] = self.bins\n return kwds\n\n def _post_plot_logic(self, ax, data):\n if self.orientation == 'horizontal':\n ax.set_xlabel('Frequency')\n else:\n ax.set_ylabel('Frequency')\n\n @property\n def orientation(self):\n if self.kwds.get('orientation', None) == 'horizontal':\n return 'horizontal'\n else:\n return 'vertical'\n\n\n_kde_docstring = \"\"\"\n Generate Kernel Density Estimate plot using Gaussian kernels.\n\n In statistics, `kernel density estimation`_ (KDE) is a non-parametric\n way to estimate the probability density function (PDF) of a random\n variable. This function uses Gaussian kernels and includes automatic\n bandwidth determination.\n\n .. _kernel density estimation:\n https://en.wikipedia.org/wiki/Kernel_density_estimation\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable.\n If None (default), 'scott' is used.\n See :class:`scipy.stats.gaussian_kde` for more information.\n ind : NumPy array or integer, optional\n Evaluation points for the estimated PDF. If None (default),\n 1000 equally spaced points are used. If `ind` is a NumPy array, the\n KDE is evaluated at the points passed. If `ind` is an integer,\n `ind` number of equally spaced points are used.\n **kwds : optional\n Additional keyword arguments are documented in\n :meth:`pandas.%(this-datatype)s.plot`.\n\n Returns\n -------\n axes : matplotlib.axes.Axes or numpy.ndarray of them\n\n See Also\n --------\n scipy.stats.gaussian_kde : Representation of a kernel-density\n estimate using Gaussian kernels. This is the function used\n internally to estimate the PDF.\n %(sibling-datatype)s.plot.kde : Generate a KDE plot for a\n %(sibling-datatype)s.\n\n Examples\n --------\n %(examples)s\n \"\"\"\n\n\nclass KdePlot(HistPlot):\n _kind = 'kde'\n orientation = 'vertical'\n\n def __init__(self, data, bw_method=None, ind=None, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n self.bw_method = bw_method\n self.ind = ind\n\n def _args_adjust(self):\n pass\n\n def _get_ind(self, y):\n if self.ind is None:\n # np.nanmax() and np.nanmin() ignores the missing values\n sample_range = np.nanmax(y) - np.nanmin(y)\n ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,\n np.nanmax(y) + 0.5 * sample_range, 1000)\n elif is_integer(self.ind):\n sample_range = np.nanmax(y) - np.nanmin(y)\n ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,\n np.nanmax(y) + 0.5 * sample_range, self.ind)\n else:\n ind = self.ind\n return ind\n\n @classmethod\n def _plot(cls, ax, y, style=None, bw_method=None, ind=None,\n column_num=None, stacking_id=None, **kwds):\n from scipy.stats import gaussian_kde\n from scipy import __version__ as spv\n\n y = remove_na_arraylike(y)\n\n if LooseVersion(spv) >= '0.11.0':\n gkde = gaussian_kde(y, bw_method=bw_method)\n else:\n gkde = gaussian_kde(y)\n if bw_method is not None:\n msg = ('bw_method was added in Scipy 0.11.0.' +\n ' Scipy version in use is {spv}.'.format(spv=spv))\n warnings.warn(msg)\n\n y = gkde.evaluate(ind)\n lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)\n return lines\n\n def _make_plot_keywords(self, kwds, y):\n kwds['bw_method'] = self.bw_method\n kwds['ind'] = self._get_ind(y)\n return kwds\n\n def _post_plot_logic(self, ax, data):\n ax.set_ylabel('Density')\n\n\nclass PiePlot(MPLPlot):\n _kind = 'pie'\n _layout_type = 'horizontal'\n\n def __init__(self, data, kind=None, **kwargs):\n data = data.fillna(value=0)\n if (data < 0).any().any():\n raise ValueError(\"{0} doesn't allow negative values\".format(kind))\n MPLPlot.__init__(self, data, kind=kind, **kwargs)\n\n def _args_adjust(self):\n self.grid = False\n self.logy = False\n self.logx = False\n self.loglog = False\n\n def _validate_color_args(self):\n pass\n\n def _make_plot(self):\n colors = self._get_colors(\n num_colors=len(self.data), color_kwds='colors')\n self.kwds.setdefault('colors', colors)\n\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n if label is not None:\n label = pprint_thing(label)\n ax.set_ylabel(label)\n\n kwds = self.kwds.copy()\n\n def blank_labeler(label, value):\n if value == 0:\n return ''\n else:\n return label\n\n idx = [pprint_thing(v) for v in self.data.index]\n labels = kwds.pop('labels', idx)\n # labels is used for each wedge's labels\n # Blank out labels for values of 0 so they don't overlap\n # with nonzero wedges\n if labels is not None:\n blabels = [blank_labeler(l, value) for\n l, value in zip(labels, y)]\n else:\n blabels = None\n results = ax.pie(y, labels=blabels, **kwds)\n\n if kwds.get('autopct', None) is not None:\n patches, texts, autotexts = results\n else:\n patches, texts = results\n autotexts = []\n\n if self.fontsize is not None:\n for t in texts + autotexts:\n t.set_fontsize(self.fontsize)\n\n # leglabels is used for legend labels\n leglabels = labels if labels is not None else idx\n for p, l in zip(patches, leglabels):\n self._add_legend_handle(p, l)\n\n\nclass BoxPlot(LinePlot):\n _kind = 'box'\n _layout_type = 'horizontal'\n\n _valid_return_types = (None, 'axes', 'dict', 'both')\n # namedtuple to hold results\n BP = namedtuple(\"Boxplot\", ['ax', 'lines'])\n\n def __init__(self, data, return_type='axes', **kwargs):\n # Do not call LinePlot.__init__ which may fill nan\n if return_type not in self._valid_return_types:\n raise ValueError(\n \"return_type must be {None, 'axes', 'dict', 'both'}\")\n\n self.return_type = return_type\n MPLPlot.__init__(self, data, **kwargs)\n\n def _args_adjust(self):\n if self.subplots:\n # Disable label ax sharing. Otherwise, all subplots shows last\n # column label\n if self.orientation == 'vertical':\n self.sharex = False\n else:\n self.sharey = False\n\n @classmethod\n def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):\n if y.ndim == 2:\n y = [remove_na_arraylike(v) for v in y]\n # Boxplot fails with empty arrays, so need to add a NaN\n # if any cols are empty\n # GH 8181\n y = [v if v.size > 0 else np.array([np.nan]) for v in y]\n else:\n y = remove_na_arraylike(y)\n bp = ax.boxplot(y, **kwds)\n\n if return_type == 'dict':\n return bp, bp\n elif return_type == 'both':\n return cls.BP(ax=ax, lines=bp), bp\n else:\n return ax, bp\n\n def _validate_color_args(self):\n if 'color' in self.kwds:\n if self.colormap is not None:\n warnings.warn(\"'color' and 'colormap' cannot be used \"\n \"simultaneously. Using 'color'\")\n self.color = self.kwds.pop('color')\n\n if isinstance(self.color, dict):\n valid_keys = ['boxes', 'whiskers', 'medians', 'caps']\n for key, values in compat.iteritems(self.color):\n if key not in valid_keys:\n raise ValueError(\"color dict contains invalid \"\n \"key '{0}' \"\n \"The key must be either {1}\"\n .format(key, valid_keys))\n else:\n self.color = None\n\n # get standard colors for default\n colors = _get_standard_colors(num_colors=3,\n colormap=self.colormap,\n color=None)\n # use 2 colors by default, for box/whisker and median\n # flier colors isn't needed here\n # because it can be specified by ``sym`` kw\n self._boxes_c = colors[0]\n self._whiskers_c = colors[0]\n self._medians_c = colors[2]\n self._caps_c = 'k' # mpl default\n\n def _get_colors(self, num_colors=None, color_kwds='color'):\n pass\n\n def maybe_color_bp(self, bp):\n if isinstance(self.color, dict):\n boxes = self.color.get('boxes', self._boxes_c)\n whiskers = self.color.get('whiskers', self._whiskers_c)\n medians = self.color.get('medians', self._medians_c)\n caps = self.color.get('caps', self._caps_c)\n else:\n # Other types are forwarded to matplotlib\n # If None, use default colors\n boxes = self.color or self._boxes_c\n whiskers = self.color or self._whiskers_c\n medians = self.color or self._medians_c\n caps = self.color or self._caps_c\n\n from matplotlib.artist import setp\n setp(bp['boxes'], color=boxes, alpha=1)\n setp(bp['whiskers'], color=whiskers, alpha=1)\n setp(bp['medians'], color=medians, alpha=1)\n setp(bp['caps'], color=caps, alpha=1)\n\n def _make_plot(self):\n if self.subplots:\n from pandas.core.series import Series\n self._return_obj = Series()\n\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n\n ret, bp = self._plot(ax, y, column_num=i,\n return_type=self.return_type, **kwds)\n self.maybe_color_bp(bp)\n self._return_obj[label] = ret\n\n label = [pprint_thing(label)]\n self._set_ticklabels(ax, label)\n else:\n y = self.data.values.T\n ax = self._get_ax(0)\n kwds = self.kwds.copy()\n\n ret, bp = self._plot(ax, y, column_num=0,\n return_type=self.return_type, **kwds)\n self.maybe_color_bp(bp)\n self._return_obj = ret\n\n labels = [l for l, _ in self._iter_data()]\n labels = [pprint_thing(l) for l in labels]\n if not self.use_index:\n labels = [pprint_thing(key) for key in range(len(labels))]\n self._set_ticklabels(ax, labels)\n\n def _set_ticklabels(self, ax, labels):\n if self.orientation == 'vertical':\n ax.set_xticklabels(labels)\n else:\n ax.set_yticklabels(labels)\n\n def _make_legend(self):\n pass\n\n def _post_plot_logic(self, ax, data):\n pass\n\n @property\n def orientation(self):\n if self.kwds.get('vert', True):\n return 'vertical'\n else:\n return 'horizontal'\n\n @property\n def result(self):\n if self.return_type is None:\n return super(BoxPlot, self).result\n else:\n return self._return_obj\n\n\n# kinds supported by both dataframe and series\n_common_kinds = ['line', 'bar', 'barh',\n 'kde', 'density', 'area', 'hist', 'box']\n# kinds supported by dataframe\n_dataframe_kinds = ['scatter', 'hexbin']\n# kinds supported only by series or dataframe single column\n_series_kinds = ['pie']\n_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds\n\n_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,\n ScatterPlot, HexBinPlot, AreaPlot, PiePlot]\n\n_plot_klass = {}\nfor klass in _klasses:\n _plot_klass[klass._kind] = klass\n\n\ndef _plot(data, x=None, y=None, subplots=False,\n ax=None, kind='line', **kwds):\n kind = _get_standard_kind(kind.lower().strip())\n if kind in _all_kinds:\n klass = _plot_klass[kind]\n else:\n raise ValueError(\"%r is not a valid plot kind\" % kind)\n\n if kind in _dataframe_kinds:\n if isinstance(data, ABCDataFrame):\n plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,\n kind=kind, **kwds)\n else:\n raise ValueError(\"plot kind %r can only be used for data frames\"\n % kind)\n\n elif kind in _series_kinds:\n if isinstance(data, ABCDataFrame):\n if y is None and subplots is False:\n msg = \"{0} requires either y column or 'subplots=True'\"\n raise ValueError(msg.format(kind))\n elif y is not None:\n if is_integer(y) and not data.columns.holds_integer():\n y = data.columns[y]\n # converted to series actually. copy to not modify\n data = data[y].copy()\n data.index.name = y\n plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)\n else:\n if isinstance(data, ABCDataFrame):\n data_cols = data.columns\n if x is not None:\n if is_integer(x) and not data.columns.holds_integer():\n x = data_cols[x]\n elif not isinstance(data[x], ABCSeries):\n raise ValueError(\"x must be a label or position\")\n data = data.set_index(x)\n\n if y is not None:\n # check if we have y as int or list of ints\n int_ylist = is_list_like(y) and all(is_integer(c) for c in y)\n int_y_arg = is_integer(y) or int_ylist\n if int_y_arg and not data.columns.holds_integer():\n y = data_cols[y]\n\n label_kw = kwds['label'] if 'label' in kwds else False\n for kw in ['xerr', 'yerr']:\n if (kw in kwds) and \\\n (isinstance(kwds[kw], string_types) or\n is_integer(kwds[kw])):\n try:\n kwds[kw] = data[kwds[kw]]\n except (IndexError, KeyError, TypeError):\n pass\n\n # don't overwrite\n data = data[y].copy()\n\n if isinstance(data, ABCSeries):\n label_name = label_kw or y\n data.name = label_name\n else:\n match = is_list_like(label_kw) and len(label_kw) == len(y)\n if label_kw and not match:\n raise ValueError(\n \"label should be list-like and same length as y\"\n )\n label_name = label_kw or data.columns\n data.columns = label_name\n\n plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)\n\n plot_obj.generate()\n plot_obj.draw()\n return plot_obj.result\n\n\ndf_kind = \"\"\"- 'scatter' : scatter plot\n - 'hexbin' : hexbin plot\"\"\"\nseries_kind = \"\"\n\ndf_coord = \"\"\"x : label or position, default None\n y : label, position or list of label, positions, default None\n Allows plotting of one column versus another\"\"\"\nseries_coord = \"\"\n\ndf_unique = \"\"\"stacked : boolean, default False in line and\n bar plots, and True in area plot. If True, create stacked plot.\n sort_columns : boolean, default False\n Sort column names to determine plot ordering\n secondary_y : boolean or sequence, default False\n Whether to plot on the secondary y-axis\n If a list/tuple, which columns to plot on secondary y-axis\"\"\"\nseries_unique = \"\"\"label : label argument to provide to plot\n secondary_y : boolean or sequence of ints, default False\n If True then y-axis will be on the right\"\"\"\n\ndf_ax = \"\"\"ax : matplotlib axes object, default None\n subplots : boolean, default False\n Make separate subplots for each column\n sharex : boolean, default True if ax is None else False\n In case subplots=True, share x axis and set some x axis labels to\n invisible; defaults to True if ax is None otherwise False if an ax\n is passed in; Be aware, that passing in both an ax and sharex=True\n will alter all x axis labels for all axis in a figure!\n sharey : boolean, default False\n In case subplots=True, share y axis and set some y axis labels to\n invisible\n layout : tuple (optional)\n (rows, columns) for the layout of subplots\"\"\"\nseries_ax = \"\"\"ax : matplotlib axes object\n If not passed, uses gca()\"\"\"\n\ndf_note = \"\"\"- If `kind` = 'scatter' and the argument `c` is the name of a dataframe\n column, the values of that column are used to color each point.\n - If `kind` = 'hexbin', you can control the size of the bins with the\n `gridsize` argument. By default, a histogram of the counts around each\n `(x, y)` point is computed. You can specify alternative aggregations\n by passing values to the `C` and `reduce_C_function` arguments.\n `C` specifies the value at each `(x, y)` point and `reduce_C_function`\n is a function of one argument that reduces all the values in a bin to\n a single number (e.g. `mean`, `max`, `sum`, `std`).\"\"\"\nseries_note = \"\"\n\n_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',\n klass_kind=df_kind, klass_coord=df_coord,\n klass_ax=df_ax, klass_unique=df_unique,\n klass_note=df_note)\n_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',\n klass_kind=series_kind,\n klass_coord=series_coord, klass_ax=series_ax,\n klass_unique=series_unique,\n klass_note=series_note)\n\n_shared_docs['plot'] = \"\"\"\n Make plots of %(klass)s using matplotlib / pylab.\n\n *New in version 0.17.0:* Each plot kind has a corresponding method on the\n ``%(klass)s.plot`` accessor:\n ``%(klass_obj)s.plot(kind='line')`` is equivalent to\n ``%(klass_obj)s.plot.line()``.\n\n Parameters\n ----------\n data : %(klass)s\n %(klass_coord)s\n kind : str\n - 'line' : line plot (default)\n - 'bar' : vertical bar plot\n - 'barh' : horizontal bar plot\n - 'hist' : histogram\n - 'box' : boxplot\n - 'kde' : Kernel Density Estimation plot\n - 'density' : same as 'kde'\n - 'area' : area plot\n - 'pie' : pie plot\n %(klass_kind)s\n %(klass_ax)s\n figsize : a tuple (width, height) in inches\n use_index : boolean, default True\n Use index as ticks for x axis\n title : string or list\n Title to use for the plot. If a string is passed, print the string at\n the top of the figure. If a list is passed and `subplots` is True,\n print each item in the list above the corresponding subplot.\n grid : boolean, default None (matlab style default)\n Axis grid lines\n legend : False/True/'reverse'\n Place legend on axis subplots\n style : list or dict\n matplotlib line style per column\n logx : boolean, default False\n Use log scaling on x axis\n logy : boolean, default False\n Use log scaling on y axis\n loglog : boolean, default False\n Use log scaling on both x and y axes\n xticks : sequence\n Values to use for the xticks\n yticks : sequence\n Values to use for the yticks\n xlim : 2-tuple/list\n ylim : 2-tuple/list\n rot : int, default None\n Rotation for ticks (xticks for vertical, yticks for horizontal plots)\n fontsize : int, default None\n Font size for xticks and yticks\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that name\n from matplotlib.\n colorbar : boolean, optional\n If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)\n position : float\n Specify relative alignments for bar plot layout.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)\n table : boolean, Series or DataFrame, default False\n If True, draw a table using the data in the DataFrame and the data will\n be transposed to meet matplotlib's default layout.\n If a Series or DataFrame is passed, use passed data to draw a table.\n yerr : DataFrame, Series, array-like, dict and str\n See :ref:`Plotting with Error Bars <visualization.errorbars>` for\n detail.\n xerr : same types as yerr.\n %(klass_unique)s\n mark_right : boolean, default True\n When using a secondary_y axis, automatically mark the column\n labels with \"(right)\" in the legend\n `**kwds` : keywords\n Options to pass to matplotlib plotting method\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n Notes\n -----\n\n - See matplotlib documentation online for more on this subject\n - If `kind` = 'bar' or 'barh', you can specify relative alignments\n for bar plot layout by `position` keyword.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)\n %(klass_note)s\n\n \"\"\"\n\n\n@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)\ndef plot_frame(data, x=None, y=None, kind='line', ax=None,\n subplots=False, sharex=None, sharey=False, layout=None,\n figsize=None, use_index=True, title=None, grid=None,\n legend=True, style=None, logx=False, logy=False, loglog=False,\n xticks=None, yticks=None, xlim=None, ylim=None,\n rot=None, fontsize=None, colormap=None, table=False,\n yerr=None, xerr=None,\n secondary_y=False, sort_columns=False,\n **kwds):\n return _plot(data, kind=kind, x=x, y=y, ax=ax,\n subplots=subplots, sharex=sharex, sharey=sharey,\n layout=layout, figsize=figsize, use_index=use_index,\n title=title, grid=grid, legend=legend,\n style=style, logx=logx, logy=logy, loglog=loglog,\n xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,\n rot=rot, fontsize=fontsize, colormap=colormap, table=table,\n yerr=yerr, xerr=xerr,\n secondary_y=secondary_y, sort_columns=sort_columns,\n **kwds)\n\n\n@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)\ndef plot_series(data, kind='line', ax=None, # Series unique\n figsize=None, use_index=True, title=None, grid=None,\n legend=False, style=None, logx=False, logy=False, loglog=False,\n xticks=None, yticks=None, xlim=None, ylim=None,\n rot=None, fontsize=None, colormap=None, table=False,\n yerr=None, xerr=None,\n label=None, secondary_y=False, # Series unique\n **kwds):\n\n import matplotlib.pyplot as plt\n if ax is None and len(plt.get_fignums()) > 0:\n ax = _gca()\n ax = MPLPlot._get_ax_layer(ax)\n return _plot(data, kind=kind, ax=ax,\n figsize=figsize, use_index=use_index, title=title,\n grid=grid, legend=legend,\n style=style, logx=logx, logy=logy, loglog=loglog,\n xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,\n rot=rot, fontsize=fontsize, colormap=colormap, table=table,\n yerr=yerr, xerr=xerr,\n label=label, secondary_y=secondary_y,\n **kwds)\n\n\n_shared_docs['boxplot'] = \"\"\"\n Make a box plot from DataFrame columns.\n\n Make a box-and-whisker plot from DataFrame columns, optionally grouped\n by some other columns. A box plot is a method for graphically depicting\n groups of numerical data through their quartiles.\n The box extends from the Q1 to Q3 quartile values of the data,\n with a line at the median (Q2). The whiskers extend from the edges\n of box to show the range of the data. The position of the whiskers\n is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box.\n Outlier points are those past the end of the whiskers.\n\n For further details see\n Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.\n\n Parameters\n ----------\n column : str or list of str, optional\n Column name or list of names, or vector.\n Can be any valid input to :meth:`pandas.DataFrame.groupby`.\n by : str or array-like, optional\n Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.\n One box-plot will be done per value of columns in `by`.\n ax : object of class matplotlib.axes.Axes, optional\n The matplotlib axes to be used by boxplot.\n fontsize : float or str\n Tick label font size in points or as a string (e.g., `large`).\n rot : int or float, default 0\n The rotation angle of labels (in degrees)\n with respect to the screen coordinate system.\n grid : boolean, default True\n Setting this to True will show the grid.\n figsize : A tuple (width, height) in inches\n The size of the figure to create in matplotlib.\n layout : tuple (rows, columns), optional\n For example, (3, 5) will display the subplots\n using 3 columns and 5 rows, starting from the top-left.\n return_type : {'axes', 'dict', 'both'} or None, default 'axes'\n The kind of object to return. The default is ``axes``.\n\n * 'axes' returns the matplotlib axes the boxplot is drawn on.\n * 'dict' returns a dictionary whose values are the matplotlib\n Lines of the boxplot.\n * 'both' returns a namedtuple with the axes and dict.\n * when grouping with ``by``, a Series mapping columns to\n ``return_type`` is returned.\n\n If ``return_type`` is `None`, a NumPy array\n of axes with the same shape as ``layout`` is returned.\n **kwds\n All other plotting keyword arguments to be passed to\n :func:`matplotlib.pyplot.boxplot`.\n\n Returns\n -------\n result :\n\n The return type depends on the `return_type` parameter:\n\n * 'axes' : object of class matplotlib.axes.Axes\n * 'dict' : dict of matplotlib.lines.Line2D objects\n * 'both' : a namedtuple with structure (ax, lines)\n\n For data grouped with ``by``:\n\n * :class:`~pandas.Series`\n * :class:`~numpy.array` (for ``return_type = None``)\n\n See Also\n --------\n Series.plot.hist: Make a histogram.\n matplotlib.pyplot.boxplot : Matplotlib equivalent plot.\n\n Notes\n -----\n Use ``return_type='dict'`` when you want to tweak the appearance\n of the lines after plotting. In this case a dict containing the Lines\n making up the boxes, caps, fliers, medians, and whiskers is returned.\n\n Examples\n --------\n\n Boxplots can be created for every column in the dataframe\n by ``df.boxplot()`` or indicating the columns to be used:\n\n .. plot::\n :context: close-figs\n\n >>> np.random.seed(1234)\n >>> df = pd.DataFrame(np.random.randn(10,4),\n ... columns=['Col1', 'Col2', 'Col3', 'Col4'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])\n\n Boxplots of variables distributions grouped by the values of a third\n variable can be created using the option ``by``. For instance:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10, 2),\n ... columns=['Col1', 'Col2'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> boxplot = df.boxplot(by='X')\n\n A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot\n in order to group the data by combination of the variables in the x-axis:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10,3),\n ... columns=['Col1', 'Col2', 'Col3'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',\n ... 'B', 'A', 'B', 'A', 'B'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])\n\n The layout of boxplot can be adjusted giving a tuple to ``layout``:\n\n .. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... layout=(2, 1))\n\n Additional formatting can be done to the boxplot, like suppressing the grid\n (``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)\n or changing the fontsize (i.e. ``fontsize=15``):\n\n .. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)\n\n The parameter ``return_type`` can be used to select the type of element\n returned by `boxplot`. When ``return_type='axes'`` is selected,\n the matplotlib axes on which the boxplot is drawn are returned:\n\n >>> boxplot = df.boxplot(column=['Col1','Col2'], return_type='axes')\n >>> type(boxplot)\n <class 'matplotlib.axes._subplots.AxesSubplot'>\n\n When grouping with ``by``, a Series mapping columns to ``return_type``\n is returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type='axes')\n >>> type(boxplot)\n <class 'pandas.core.series.Series'>\n\n If ``return_type`` is `None`, a NumPy array of axes with the same shape\n as ``layout`` is returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type=None)\n >>> type(boxplot)\n <class 'numpy.ndarray'>\n \"\"\"\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot(data, column=None, by=None, ax=None, fontsize=None,\n rot=0, grid=True, figsize=None, layout=None, return_type=None,\n **kwds):\n\n # validate return_type:\n if return_type not in BoxPlot._valid_return_types:\n raise ValueError(\"return_type must be {'axes', 'dict', 'both'}\")\n\n if isinstance(data, ABCSeries):\n data = data.to_frame('x')\n column = 'x'\n\n def _get_colors():\n return _get_standard_colors(color=kwds.get('color'), num_colors=1)\n\n def maybe_color_bp(bp):\n if 'color' not in kwds:\n from matplotlib.artist import setp\n setp(bp['boxes'], color=colors[0], alpha=1)\n setp(bp['whiskers'], color=colors[0], alpha=1)\n setp(bp['medians'], color=colors[2], alpha=1)\n\n def plot_group(keys, values, ax):\n keys = [pprint_thing(x) for x in keys]\n values = [np.asarray(remove_na_arraylike(v)) for v in values]\n bp = ax.boxplot(values, **kwds)\n if fontsize is not None:\n ax.tick_params(axis='both', labelsize=fontsize)\n if kwds.get('vert', 1):\n ax.set_xticklabels(keys, rotation=rot)\n else:\n ax.set_yticklabels(keys, rotation=rot)\n maybe_color_bp(bp)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type == 'dict':\n return bp\n elif return_type == 'both':\n return BoxPlot.BP(ax=ax, lines=bp)\n else:\n return ax\n\n colors = _get_colors()\n if column is None:\n columns = None\n else:\n if isinstance(column, (list, tuple)):\n columns = column\n else:\n columns = [column]\n\n if by is not None:\n # Prefer array return type for 2-D plots to match the subplot layout\n # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580\n result = _grouped_plot_by_column(plot_group, data, columns=columns,\n by=by, grid=grid, figsize=figsize,\n ax=ax, layout=layout,\n return_type=return_type)\n else:\n if return_type is None:\n return_type = 'axes'\n if layout is not None:\n raise ValueError(\"The 'layout' keyword is not supported when \"\n \"'by' is None\")\n\n if ax is None:\n rc = {'figure.figsize': figsize} if figsize is not None else {}\n ax = _gca(rc)\n data = data._get_numeric_data()\n if columns is None:\n columns = data.columns\n else:\n data = data[columns]\n\n result = plot_group(columns, data.values.T, ax)\n ax.grid(grid)\n\n return result\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,\n grid=True, figsize=None, layout=None,\n return_type=None, **kwds):\n import matplotlib.pyplot as plt\n _converter._WARN = False\n ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,\n grid=grid, rot=rot, figsize=figsize, layout=layout,\n return_type=return_type, **kwds)\n plt.draw_if_interactive()\n return ax\n\n\ndef scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,\n **kwargs):\n \"\"\"\n Make a scatter plot from two DataFrame columns\n\n Parameters\n ----------\n data : DataFrame\n x : Column name for the x-axis values\n y : Column name for the y-axis values\n ax : Matplotlib axis object\n figsize : A tuple (width, height) in inches\n grid : Setting this to True will show the grid\n kwargs : other plotting keyword arguments\n To be passed to scatter function\n\n Returns\n -------\n fig : matplotlib.Figure\n \"\"\"\n import matplotlib.pyplot as plt\n\n kwargs.setdefault('edgecolors', 'none')\n\n def plot_group(group, ax):\n xvals = group[x].values\n yvals = group[y].values\n ax.scatter(xvals, yvals, **kwargs)\n ax.grid(grid)\n\n if by is not None:\n fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)\n else:\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n plot_group(data, ax)\n ax.set_ylabel(pprint_thing(y))\n ax.set_xlabel(pprint_thing(x))\n\n ax.grid(grid)\n\n return fig\n\n\ndef hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,\n xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,\n sharey=False, figsize=None, layout=None, bins=10, **kwds):\n \"\"\"\n Make a histogram of the DataFrame's.\n\n A `histogram`_ is a representation of the distribution of data.\n This function calls :meth:`matplotlib.pyplot.hist`, on each series in\n the DataFrame, resulting in one histogram per column.\n\n .. _histogram: https://en.wikipedia.org/wiki/Histogram\n\n Parameters\n ----------\n data : DataFrame\n The pandas object holding the data.\n column : string or sequence\n If passed, will be used to limit data to a subset of columns.\n by : object, optional\n If passed, then used to form histograms for separate groups.\n grid : boolean, default True\n Whether to show axis grid lines.\n xlabelsize : int, default None\n If specified changes the x-axis label size.\n xrot : float, default None\n Rotation of x axis labels. For example, a value of 90 displays the\n x labels rotated 90 degrees clockwise.\n ylabelsize : int, default None\n If specified changes the y-axis label size.\n yrot : float, default None\n Rotation of y axis labels. For example, a value of 90 displays the\n y labels rotated 90 degrees clockwise.\n ax : Matplotlib axes object, default None\n The axes to plot the histogram on.\n sharex : boolean, default True if ax is None else False\n In case subplots=True, share x axis and set some x axis labels to\n invisible; defaults to True if ax is None otherwise False if an ax\n is passed in.\n Note that passing in both an ax and sharex=True will alter all x axis\n labels for all subplots in a figure.\n sharey : boolean, default False\n In case subplots=True, share y axis and set some y axis labels to\n invisible.\n figsize : tuple\n The size in inches of the figure to create. Uses the value in\n `matplotlib.rcParams` by default.\n layout : tuple, optional\n Tuple of (rows, columns) for the layout of the histograms.\n bins : integer or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n **kwds\n All other plotting keyword arguments to be passed to\n :meth:`matplotlib.pyplot.hist`.\n\n Returns\n -------\n axes : matplotlib.AxesSubplot or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.hist : Plot a histogram using matplotlib.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n This example draws a histogram based on the length and width of\n some animals, displayed in three bins\n\n >>> df = pd.DataFrame({\n ... 'length': [1.5, 0.5, 1.2, 0.9, 3],\n ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]\n ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])\n >>> hist = df.hist(bins=3)\n \"\"\"\n _raise_if_no_mpl()\n _converter._WARN = False\n if by is not None:\n axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,\n figsize=figsize, sharex=sharex, sharey=sharey,\n layout=layout, bins=bins, xlabelsize=xlabelsize,\n xrot=xrot, ylabelsize=ylabelsize,\n yrot=yrot, **kwds)\n return axes\n\n if column is not None:\n if not isinstance(column, (list, np.ndarray, ABCIndexClass)):\n column = [column]\n data = data[column]\n data = data._get_numeric_data()\n naxes = len(data.columns)\n\n fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,\n sharex=sharex, sharey=sharey, figsize=figsize,\n layout=layout)\n _axes = _flatten(axes)\n\n for i, col in enumerate(com.try_sort(data.columns)):\n ax = _axes[i]\n ax.hist(data[col].dropna().values, bins=bins, **kwds)\n ax.set_title(col)\n ax.grid(grid)\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n fig.subplots_adjust(wspace=0.3, hspace=0.3)\n\n return axes\n\n\ndef hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,\n xrot=None, ylabelsize=None, yrot=None, figsize=None,\n bins=10, **kwds):\n \"\"\"\n Draw histogram of the input series using matplotlib\n\n Parameters\n ----------\n by : object, optional\n If passed, then used to form histograms for separate groups\n ax : matplotlib axis object\n If not passed, uses gca()\n grid : boolean, default True\n Whether to show axis grid lines\n xlabelsize : int, default None\n If specified changes the x-axis label size\n xrot : float, default None\n rotation of x axis labels\n ylabelsize : int, default None\n If specified changes the y-axis label size\n yrot : float, default None\n rotation of y axis labels\n figsize : tuple, default None\n figure size in inches by default\n bins : integer or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n bins: integer, default 10\n Number of histogram bins to be used\n `**kwds` : keywords\n To be passed to the actual plotting function\n\n See Also\n --------\n matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n if by is None:\n if kwds.get('layout', None) is not None:\n raise ValueError(\"The 'layout' keyword is not supported when \"\n \"'by' is None\")\n # hack until the plotting interface is a bit more unified\n fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else\n plt.figure(figsize=figsize))\n if (figsize is not None and tuple(figsize) !=\n tuple(fig.get_size_inches())):\n fig.set_size_inches(*figsize, forward=True)\n if ax is None:\n ax = fig.gca()\n elif ax.get_figure() != fig:\n raise AssertionError('passed axis not bound to passed figure')\n values = self.dropna().values\n\n ax.hist(values, bins=bins, **kwds)\n ax.grid(grid)\n axes = np.array([ax])\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n\n else:\n if 'figure' in kwds:\n raise ValueError(\"Cannot pass 'figure' when using the \"\n \"'by' argument, since a new 'Figure' instance \"\n \"will be created\")\n axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,\n bins=bins, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot, **kwds)\n\n if hasattr(axes, 'ndim'):\n if axes.ndim == 1 and len(axes) == 1:\n return axes[0]\n return axes\n\n\ndef grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,\n layout=None, sharex=False, sharey=False, rot=90, grid=True,\n xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,\n **kwargs):\n \"\"\"\n Grouped histogram\n\n Parameters\n ----------\n data: Series/DataFrame\n column: object, optional\n by: object, optional\n ax: axes, optional\n bins: int, default 50\n figsize: tuple, optional\n layout: optional\n sharex: boolean, default False\n sharey: boolean, default False\n rot: int, default 90\n grid: bool, default True\n kwargs: dict, keyword arguments passed to matplotlib.Axes.hist\n\n Returns\n -------\n axes: collection of Matplotlib Axes\n \"\"\"\n _raise_if_no_mpl()\n _converter._WARN = False\n\n def plot_group(group, ax):\n ax.hist(group.dropna().values, bins=bins, **kwargs)\n\n xrot = xrot or rot\n\n fig, axes = _grouped_plot(plot_group, data, column=column,\n by=by, sharex=sharex, sharey=sharey, ax=ax,\n figsize=figsize, layout=layout, rot=rot)\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,\n hspace=0.5, wspace=0.3)\n return axes\n\n\ndef boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,\n rot=0, grid=True, ax=None, figsize=None,\n layout=None, sharex=False, sharey=True, **kwds):\n \"\"\"\n Make box plots from DataFrameGroupBy data.\n\n Parameters\n ----------\n grouped : Grouped DataFrame\n subplots :\n * ``False`` - no subplots will be used\n * ``True`` - create a subplot for each group\n column : column name or list of names, or vector\n Can be any valid input to groupby\n fontsize : int or string\n rot : label rotation angle\n grid : Setting this to True will show the grid\n ax : Matplotlib axis object, default None\n figsize : A tuple (width, height) in inches\n layout : tuple (optional)\n (rows, columns) for the layout of the plot\n sharex : bool, default False\n Whether x-axes will be shared among subplots\n\n .. versionadded:: 0.23.1\n sharey : bool, default True\n Whether y-axes will be shared among subplots\n\n .. versionadded:: 0.23.1\n `**kwds` : Keyword Arguments\n All other plotting keyword arguments to be passed to\n matplotlib's boxplot function\n\n Returns\n -------\n dict of key/value = group key/DataFrame.boxplot return value\n or DataFrame.boxplot return value in case subplots=figures=False\n\n Examples\n --------\n >>> import itertools\n >>> tuples = [t for t in itertools.product(range(1000), range(4))]\n >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])\n >>> data = np.random.randn(len(index),4)\n >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)\n >>>\n >>> grouped = df.groupby(level='lvl1')\n >>> boxplot_frame_groupby(grouped)\n >>>\n >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)\n >>> boxplot_frame_groupby(grouped, subplots=False)\n \"\"\"\n _raise_if_no_mpl()\n _converter._WARN = False\n if subplots is True:\n naxes = len(grouped)\n fig, axes = _subplots(naxes=naxes, squeeze=False,\n ax=ax, sharex=sharex, sharey=sharey,\n figsize=figsize, layout=layout)\n axes = _flatten(axes)\n\n from pandas.core.series import Series\n ret = Series()\n for (key, group), ax in zip(grouped, axes):\n d = group.boxplot(ax=ax, column=column, fontsize=fontsize,\n rot=rot, grid=grid, **kwds)\n ax.set_title(pprint_thing(key))\n ret.loc[key] = d\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,\n right=0.9, wspace=0.2)\n else:\n from pandas.core.reshape.concat import concat\n keys, frames = zip(*grouped)\n if grouped.axis == 0:\n df = concat(frames, keys=keys, axis=1)\n else:\n if len(frames) > 1:\n df = frames[0].join(frames[1::])\n else:\n df = frames[0]\n ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,\n grid=grid, ax=ax, figsize=figsize,\n layout=layout, **kwds)\n return ret\n\n\ndef _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,\n figsize=None, sharex=True, sharey=True, layout=None,\n rot=0, ax=None, **kwargs):\n\n if figsize == 'default':\n # allowed to specify mpl default with 'default'\n warnings.warn(\"figsize='default' is deprecated. Specify figure\"\n \"size by tuple instead\", FutureWarning, stacklevel=4)\n figsize = None\n\n grouped = data.groupby(by)\n if column is not None:\n grouped = grouped[column]\n\n naxes = len(grouped)\n fig, axes = _subplots(naxes=naxes, figsize=figsize,\n sharex=sharex, sharey=sharey, ax=ax,\n layout=layout)\n\n _axes = _flatten(axes)\n\n for i, (key, group) in enumerate(grouped):\n ax = _axes[i]\n if numeric_only and isinstance(group, ABCDataFrame):\n group = group._get_numeric_data()\n plotf(group, ax, **kwargs)\n ax.set_title(pprint_thing(key))\n\n return fig, axes\n\n\ndef _grouped_plot_by_column(plotf, data, columns=None, by=None,\n numeric_only=True, grid=False,\n figsize=None, ax=None, layout=None,\n return_type=None, **kwargs):\n grouped = data.groupby(by)\n if columns is None:\n if not isinstance(by, (list, tuple)):\n by = [by]\n columns = data._get_numeric_data().columns.difference(by)\n naxes = len(columns)\n fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,\n figsize=figsize, ax=ax, layout=layout)\n\n _axes = _flatten(axes)\n\n ax_values = []\n\n for i, col in enumerate(columns):\n ax = _axes[i]\n gp_col = grouped[col]\n keys, values = zip(*gp_col)\n re_plotf = plotf(keys, values, ax, **kwargs)\n ax.set_title(col)\n ax.set_xlabel(pprint_thing(by))\n ax_values.append(re_plotf)\n ax.grid(grid)\n\n from pandas.core.series import Series\n result = Series(ax_values, index=columns)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type is None:\n result = axes\n\n byline = by[0] if len(by) == 1 else by\n fig.suptitle('Boxplot grouped by {byline}'.format(byline=byline))\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)\n\n return result\n\n\nclass BasePlotMethods(PandasObject):\n\n def __init__(self, data):\n self._parent = data # can be Series or DataFrame\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass SeriesPlotMethods(BasePlotMethods):\n \"\"\"Series plotting accessor and method\n\n Examples\n --------\n >>> s.plot.line()\n >>> s.plot.bar()\n >>> s.plot.hist()\n\n Plotting methods can also be accessed by calling the accessor as a method\n with the ``kind`` argument:\n ``s.plot(kind='line')`` is equivalent to ``s.plot.line()``\n \"\"\"\n\n def __call__(self, kind='line', ax=None,\n figsize=None, use_index=True, title=None, grid=None,\n legend=False, style=None, logx=False, logy=False,\n loglog=False, xticks=None, yticks=None,\n xlim=None, ylim=None,\n rot=None, fontsize=None, colormap=None, table=False,\n yerr=None, xerr=None,\n label=None, secondary_y=False, **kwds):\n return plot_series(self._parent, kind=kind, ax=ax, figsize=figsize,\n use_index=use_index, title=title, grid=grid,\n legend=legend, style=style, logx=logx, logy=logy,\n loglog=loglog, xticks=xticks, yticks=yticks,\n xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,\n colormap=colormap, table=table, yerr=yerr,\n xerr=xerr, label=label, secondary_y=secondary_y,\n **kwds)\n __call__.__doc__ = plot_series.__doc__\n\n def line(self, **kwds):\n \"\"\"\n Line plot\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 3, 2])\n >>> s.plot.line()\n \"\"\"\n return self(kind='line', **kwds)\n\n def bar(self, **kwds):\n \"\"\"\n Vertical bar plot\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='bar', **kwds)\n\n def barh(self, **kwds):\n \"\"\"\n Horizontal bar plot\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='barh', **kwds)\n\n def box(self, **kwds):\n \"\"\"\n Boxplot\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='box', **kwds)\n\n def hist(self, bins=10, **kwds):\n \"\"\"\n Histogram\n\n Parameters\n ----------\n bins: integer, default 10\n Number of histogram bins to be used\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='hist', bins=bins, **kwds)\n\n @Appender(_kde_docstring % {\n 'this-datatype': 'Series',\n 'sibling-datatype': 'DataFrame',\n 'examples': \"\"\"\n Given a Series of points randomly sampled from an unknown\n distribution, estimate its PDF using KDE with automatic\n bandwidth determination and plot the results, evaluating them at\n 1000 equally spaced points (default):\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])\n >>> ax = s.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])\n \"\"\".strip()\n })\n def kde(self, bw_method=None, ind=None, **kwds):\n return self(kind='kde', bw_method=bw_method, ind=ind, **kwds)\n\n density = kde\n\n def area(self, **kwds):\n \"\"\"\n Area plot\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='area', **kwds)\n\n def pie(self, **kwds):\n \"\"\"\n Pie chart\n\n Parameters\n ----------\n `**kwds` : optional\n Additional keyword arguments are documented in\n :meth:`pandas.Series.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n \"\"\"\n return self(kind='pie', **kwds)\n\n\nclass FramePlotMethods(BasePlotMethods):\n \"\"\"DataFrame plotting accessor and method\n\n Examples\n --------\n >>> df.plot.line()\n >>> df.plot.scatter('x', 'y')\n >>> df.plot.hexbin()\n\n These plotting methods can also be accessed by calling the accessor as a\n method with the ``kind`` argument:\n ``df.plot(kind='line')`` is equivalent to ``df.plot.line()``\n \"\"\"\n\n def __call__(self, x=None, y=None, kind='line', ax=None,\n subplots=False, sharex=None, sharey=False, layout=None,\n figsize=None, use_index=True, title=None, grid=None,\n legend=True, style=None, logx=False, logy=False, loglog=False,\n xticks=None, yticks=None, xlim=None, ylim=None,\n rot=None, fontsize=None, colormap=None, table=False,\n yerr=None, xerr=None,\n secondary_y=False, sort_columns=False, **kwds):\n return plot_frame(self._parent, kind=kind, x=x, y=y, ax=ax,\n subplots=subplots, sharex=sharex, sharey=sharey,\n layout=layout, figsize=figsize, use_index=use_index,\n title=title, grid=grid, legend=legend, style=style,\n logx=logx, logy=logy, loglog=loglog, xticks=xticks,\n yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,\n fontsize=fontsize, colormap=colormap, table=table,\n yerr=yerr, xerr=xerr, secondary_y=secondary_y,\n sort_columns=sort_columns, **kwds)\n __call__.__doc__ = plot_frame.__doc__\n\n def line(self, x=None, y=None, **kwds):\n \"\"\"\n Plot DataFrame columns as lines.\n\n This function is useful to plot lines using DataFrame's values\n as coordinates.\n\n Parameters\n ----------\n x : int or str, optional\n Columns to use for the horizontal axis.\n Either the location or the label of the columns to be used.\n By default, it will use the DataFrame indices.\n y : int, str, or list of them, optional\n The values to be plotted.\n Either the location or the label of the columns to be used.\n By default, it will use the remaining DataFrame numeric columns.\n **kwds\n Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`\n Returns an ndarray when ``subplots=True``.\n\n See Also\n --------\n matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n The following example shows the populations for some animals\n over the years.\n\n >>> df = pd.DataFrame({\n ... 'pig': [20, 18, 489, 675, 1776],\n ... 'horse': [4, 25, 281, 600, 1900]\n ... }, index=[1990, 1997, 2003, 2009, 2014])\n >>> lines = df.plot.line()\n\n .. plot::\n :context: close-figs\n\n An example with subplots, so an array of axes is returned.\n\n >>> axes = df.plot.line(subplots=True)\n >>> type(axes)\n <class 'numpy.ndarray'>\n\n .. plot::\n :context: close-figs\n\n The following example shows the relationship between both\n populations.\n\n >>> lines = df.plot.line(x='pig', y='horse')\n \"\"\"\n return self(kind='line', x=x, y=y, **kwds)\n\n def bar(self, x=None, y=None, **kwds):\n \"\"\"\n Vertical bar plot.\n\n A bar plot is a plot that presents categorical data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n\n Parameters\n ----------\n x : label or position, optional\n Allows plotting of one column versus another. If not specified,\n the index of the DataFrame is used.\n y : label or position, optional\n Allows plotting of one column versus another. If not specified,\n all numerical columns are used.\n **kwds\n Additional keyword arguments are documented in\n :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : matplotlib.axes.Axes or np.ndarray of them\n An ndarray is returned with one :class:`matplotlib.axes.Axes`\n per column when ``subplots=True``.\n\n See Also\n --------\n pandas.DataFrame.plot.barh : Horizontal bar plot.\n pandas.DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.bar : Make a bar plot with matplotlib.\n\n Examples\n --------\n Basic plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})\n >>> ax = df.plot.bar(x='lab', y='val', rot=0)\n\n Plot a whole dataframe to a bar plot. Each column is assigned a\n distinct color, and each row is nested in a group along the\n horizontal axis.\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.bar(rot=0)\n\n Instead of nesting, the figure can be split by column with\n ``subplots=True``. In this case, a :class:`numpy.ndarray` of\n :class:`matplotlib.axes.Axes` are returned.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(rot=0, subplots=True)\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n Plot a single column.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(y='speed', rot=0)\n\n Plot only selected categories for the DataFrame.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(x='lifespan', rot=0)\n \"\"\"\n return self(kind='bar', x=x, y=y, **kwds)\n\n def barh(self, x=None, y=None, **kwds):\n \"\"\"\n Make a horizontal bar plot.\n\n A horizontal bar plot is a plot that presents quantitative data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n\n Parameters\n ----------\n x : label or position, default DataFrame.index\n Column to be used for categories.\n y : label or position, default All numeric columns in dataframe\n Columns to be plotted from the DataFrame.\n **kwds\n Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them.\n\n See Also\n --------\n pandas.DataFrame.plot.bar: Vertical bar plot.\n pandas.DataFrame.plot : Make plots of DataFrame using matplotlib.\n matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.\n\n Examples\n --------\n Basic example\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})\n >>> ax = df.plot.barh(x='lab', y='val')\n\n Plot a whole DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh()\n\n Plot a column of the DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(y='speed')\n\n Plot DataFrame versus the desired column\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(x='lifespan')\n \"\"\"\n return self(kind='barh', x=x, y=y, **kwds)\n\n def box(self, by=None, **kwds):\n r\"\"\"\n Make a box plot of the DataFrame columns.\n\n A box plot is a method for graphically depicting groups of numerical\n data through their quartiles.\n The box extends from the Q1 to Q3 quartile values of the data,\n with a line at the median (Q2). The whiskers extend from the edges\n of box to show the range of the data. The position of the whiskers\n is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the\n box. Outlier points are those past the end of the whiskers.\n\n For further details see Wikipedia's\n entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.\n\n A consideration when using this chart is that the box and the whiskers\n can overlap, which is very common when plotting small sets of data.\n\n Parameters\n ----------\n by : string or sequence\n Column in the DataFrame to group by.\n **kwds : optional\n Additional keywords are documented in\n :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n pandas.DataFrame.boxplot: Another method to draw a box plot.\n pandas.Series.plot.box: Draw a box plot from a Series object.\n matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.\n\n Examples\n --------\n Draw a box plot from a DataFrame with four columns of randomly\n generated data.\n\n .. plot::\n :context: close-figs\n\n >>> data = np.random.randn(25, 4)\n >>> df = pd.DataFrame(data, columns=list('ABCD'))\n >>> ax = df.plot.box()\n \"\"\"\n return self(kind='box', by=by, **kwds)\n\n def hist(self, by=None, bins=10, **kwds):\n \"\"\"\n Draw one histogram of the DataFrame's columns.\n\n A histogram is a representation of the distribution of data.\n This function groups the values of all given Series in the DataFrame\n into bins and draws all bins in one :class:`matplotlib.axes.Axes`.\n This is useful when the DataFrame's Series are in a similar scale.\n\n Parameters\n ----------\n by : str or sequence, optional\n Column in the DataFrame to group by.\n bins : int, default 10\n Number of histogram bins to be used.\n **kwds\n Additional keyword arguments are documented in\n :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : matplotlib.AxesSubplot histogram.\n\n See Also\n --------\n DataFrame.hist : Draw histograms per DataFrame's Series.\n Series.hist : Draw a histogram with Series' data.\n\n Examples\n --------\n When we draw a dice 6000 times, we expect to get each value around 1000\n times. But when we draw two dices and sum the result, the distribution\n is going to be quite different. A histogram illustrates those\n distributions.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(\n ... np.random.randint(1, 7, 6000),\n ... columns = ['one'])\n >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)\n >>> ax = df.plot.hist(bins=12, alpha=0.5)\n \"\"\"\n return self(kind='hist', by=by, bins=bins, **kwds)\n\n @Appender(_kde_docstring % {\n 'this-datatype': 'DataFrame',\n 'sibling-datatype': 'Series',\n 'examples': \"\"\"\n Given several Series of points randomly sampled from unknown\n distributions, estimate their PDFs using KDE with automatic\n bandwidth determination and plot the results, evaluating them at\n 1000 equally spaced points (default):\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],\n ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],\n ... })\n >>> ax = df.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])\n \"\"\".strip()\n })\n def kde(self, bw_method=None, ind=None, **kwds):\n return self(kind='kde', bw_method=bw_method, ind=ind, **kwds)\n\n density = kde\n\n def area(self, x=None, y=None, **kwds):\n \"\"\"\n Draw a stacked area plot.\n\n An area plot displays quantitative data visually.\n This function wraps the matplotlib area function.\n\n Parameters\n ----------\n x : label or position, optional\n Coordinates for the X axis. By default uses the index.\n y : label or position, optional\n Column to plot. By default uses all columns.\n stacked : bool, default True\n Area plots are stacked by default. Set to False to create a\n unstacked plot.\n **kwds : optional\n Additional keyword arguments are documented in\n :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray\n Area plot, or array of area plots if subplots is True\n\n See Also\n --------\n DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.\n\n Examples\n --------\n Draw an area plot based on basic business metrics:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3, 9, 10, 6],\n ... 'signups': [5, 5, 6, 12, 14, 13],\n ... 'visits': [20, 42, 28, 62, 81, 50],\n ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',\n ... freq='M'))\n >>> ax = df.plot.area()\n\n Area plots are stacked by default. To produce an unstacked plot,\n pass ``stacked=False``:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(stacked=False)\n\n Draw an area plot for a single column:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(y='sales')\n\n Draw with a different `x`:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3],\n ... 'visits': [20, 42, 28],\n ... 'day': [1, 2, 3],\n ... })\n >>> ax = df.plot.area(x='day')\n \"\"\"\n return self(kind='area', x=x, y=y, **kwds)\n\n def pie(self, y=None, **kwds):\n \"\"\"\n Generate a pie plot.\n\n A pie plot is a proportional representation of the numerical data in a\n column. This function wraps :meth:`matplotlib.pyplot.pie` for the\n specified column. If no column reference is passed and\n ``subplots=True`` a pie plot is drawn for each numerical column\n independently.\n\n Parameters\n ----------\n y : int or label, optional\n Label or position of the column to plot.\n If not provided, ``subplots=True`` argument must be passed.\n **kwds\n Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : matplotlib.axes.Axes or np.ndarray of them.\n A NumPy array is returned when `subplots` is True.\n\n See Also\n --------\n Series.plot.pie : Generate a pie plot for a Series.\n DataFrame.plot : Make plots of a DataFrame.\n\n Examples\n --------\n In the example below we have a DataFrame with the information about\n planet's mass and radius. We pass the the 'mass' column to the\n pie function to get a pie plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],\n ... 'radius': [2439.7, 6051.8, 6378.1]},\n ... index=['Mercury', 'Venus', 'Earth'])\n >>> plot = df.plot.pie(y='mass', figsize=(5, 5))\n\n .. plot::\n :context: close-figs\n\n >>> plot = df.plot.pie(subplots=True, figsize=(6, 3))\n\n \"\"\"\n return self(kind='pie', y=y, **kwds)\n\n def scatter(self, x, y, s=None, c=None, **kwds):\n \"\"\"\n Create a scatter plot with varying marker point size and color.\n\n The coordinates of each point are defined by two dataframe columns and\n filled circles are used to represent each point. This kind of plot is\n useful to see complex correlations between two variables. Points could\n be for instance natural 2D coordinates like longitude and latitude in\n a map or, in general, any pair of metrics that can be plotted against\n each other.\n\n Parameters\n ----------\n x : int or str\n The column name or column position to be used as horizontal\n coordinates for each point.\n y : int or str\n The column name or column position to be used as vertical\n coordinates for each point.\n s : scalar or array_like, optional\n The size of each point. Possible values are:\n\n - A single scalar so all points have the same size.\n\n - A sequence of scalars, which will be used for each point's size\n recursively. For instance, when passing [2,14] all points size\n will be either 2 or 14, alternatively.\n\n c : str, int or array_like, optional\n The color of each point. Possible values are:\n\n - A single color string referred to by name, RGB or RGBA code,\n for instance 'red' or '#a98d19'.\n\n - A sequence of color strings referred to by name, RGB or RGBA\n code, which will be used for each point's color recursively. For\n instance ['green','yellow'] all points will be filled in green or\n yellow, alternatively.\n\n - A column name or position whose values will be used to color the\n marker points according to a colormap.\n\n **kwds\n Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.scatter : scatter plot using multiple input data\n formats.\n\n Examples\n --------\n Let's see how to draw a scatter plot using coordinates from the values\n in a DataFrame's columns.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],\n ... [6.4, 3.2, 1], [5.9, 3.0, 2]],\n ... columns=['length', 'width', 'species'])\n >>> ax1 = df.plot.scatter(x='length',\n ... y='width',\n ... c='DarkBlue')\n\n And now with the color determined by a column as well.\n\n .. plot::\n :context: close-figs\n\n >>> ax2 = df.plot.scatter(x='length',\n ... y='width',\n ... c='species',\n ... colormap='viridis')\n \"\"\"\n return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)\n\n def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,\n **kwds):\n \"\"\"\n Generate a hexagonal binning plot.\n\n Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`\n (the default), this is a histogram of the number of occurrences\n of the observations at ``(x[i], y[i])``.\n\n If `C` is specified, specifies values at given coordinates\n ``(x[i], y[i])``. These values are accumulated for each hexagonal\n bin and then reduced according to `reduce_C_function`,\n having as default the NumPy's mean function (:meth:`numpy.mean`).\n (If `C` is specified, it must also be a 1-D sequence\n of the same length as `x` and `y`, or a column label.)\n\n Parameters\n ----------\n x : int or str\n The column label or position for x points.\n y : int or str\n The column label or position for y points.\n C : int or str, optional\n The column label or position for the value of `(x, y)` point.\n reduce_C_function : callable, default `np.mean`\n Function of one argument that reduces all the values in a bin to\n a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).\n gridsize : int or tuple of (int, int), default 100\n The number of hexagons in the x-direction.\n The corresponding number of hexagons in the y-direction is\n chosen in a way that the hexagons are approximately regular.\n Alternatively, gridsize can be a tuple with two elements\n specifying the number of hexagons in the x-direction and the\n y-direction.\n **kwds\n Additional keyword arguments are documented in\n :meth:`pandas.DataFrame.plot`.\n\n Returns\n -------\n matplotlib.AxesSubplot\n The matplotlib ``Axes`` on which the hexbin is plotted.\n\n See Also\n --------\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.hexbin : hexagonal binning plot using matplotlib,\n the matplotlib function that is used under the hood.\n\n Examples\n --------\n The following examples are generated with random data from\n a normal distribution.\n\n .. plot::\n :context: close-figs\n\n >>> n = 10000\n >>> df = pd.DataFrame({'x': np.random.randn(n),\n ... 'y': np.random.randn(n)})\n >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)\n\n The next example uses `C` and `np.sum` as `reduce_C_function`.\n Note that `'observations'` values ranges from 1 to 5 but the result\n plot shows values up to more than 25. This is because of the\n `reduce_C_function`.\n\n .. plot::\n :context: close-figs\n\n >>> n = 500\n >>> df = pd.DataFrame({\n ... 'coord_x': np.random.uniform(-3, 3, size=n),\n ... 'coord_y': np.random.uniform(30, 50, size=n),\n ... 'observations': np.random.randint(1,5, size=n)\n ... })\n >>> ax = df.plot.hexbin(x='coord_x',\n ... y='coord_y',\n ... C='observations',\n ... reduce_C_function=np.sum,\n ... gridsize=10,\n ... cmap=\"viridis\")\n \"\"\"\n if reduce_C_function is not None:\n kwds['reduce_C_function'] = reduce_C_function\n if gridsize is not None:\n kwds['gridsize'] = gridsize\n return self(kind='hexbin', x=x, y=y, C=C, **kwds)\n" ]
[ [ "numpy.nanmax", "pandas.core.common.try_sort", "pandas.plotting._tools._flatten", "numpy.nanmin", "pandas.plotting._timeseries._decorate_axes", "pandas.core.dtypes.missing.notna", "pandas.compat.map", "matplotlib.artist.setp", "pandas.plotting._compat._mpl_ge_3_0_0", "scipy.stats.gaussian_kde", "pandas.compat.iteritems", "pandas.core.dtypes.missing.remove_na_arraylike", "numpy.ma.array", "numpy.ma.masked_where", "numpy.where", "pandas.core.common._any_not_none", "matplotlib.pyplot.gca", "pandas.core.config.get_option", "pandas.plotting._converter.register", "pandas.core.series.Series", "pandas.errors.AbstractMethodError", "pandas.plotting._tools._subplots", "matplotlib.pyplot.gcf", "pandas.core.dtypes.common.is_number", "pandas.plotting._timeseries.format_dateaxis", "pandas.core.dtypes.common.is_iterator", "pandas.plotting._timeseries._maybe_resample", "numpy.ravel", "numpy.zeros", "pandas.plotting._tools.format_date_labels", "matplotlib.pyplot.figure", "pandas.core.dtypes.common.is_list_like", "pandas.plotting._tools._get_xlim", "pandas.util._decorators.Appender", "pandas.plotting._tools._set_ticks_props", "pandas.core.dtypes.common.is_hashable", "numpy.atleast_2d", "pandas.core.reshape.concat.concat", "matplotlib.pyplot.get_fignums", "numpy.array", "matplotlib.pyplot.rc_context", "pandas.core.dtypes.common.is_integer", "numpy.tile", "matplotlib.pyplot.draw_if_interactive", "pandas.compat.zip", "pandas.core.dtypes.missing.isna", "pandas.plotting._style._get_standard_colors", "pandas.plotting._tools.table", "pandas.plotting._tools._get_all_lines", "pandas.io.formats.printing.pprint_thing", "pandas.compat.range" ] ]
hrosc/TrainYourOwnYOLO
[ "bcffa3f6b1acd37bd003635b471239fbb804a19f" ]
[ "Utils/Convert_Format.py" ]
[ "from os import path, makedirs\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nfrom PIL import Image\nfrom Get_File_Paths import GetFileList, ChangeToOtherMachine\n\n\ndef convert_vott_csv_to_yolo(\n vott_df,\n labeldict=dict(\n zip(\n [\"Cat_Face\"],\n [\n 0,\n ],\n )\n ),\n path=\"\",\n target_name=\"data_train.txt\",\n abs_path=False,\n):\n\n # Encode labels according to labeldict if code's don't exist\n if not \"code\" in vott_df.columns:\n vott_df[\"code\"] = vott_df[\"label\"].apply(lambda x: labeldict[x])\n # Round float to ints\n for col in vott_df[[\"xmin\", \"ymin\", \"xmax\", \"ymax\"]]:\n vott_df[col] = (vott_df[col]).apply(lambda x: round(x))\n\n # Create Yolo Text file\n last_image = \"\"\n txt_file = \"\"\n\n for index, row in vott_df.iterrows():\n if not last_image == row[\"image\"]:\n if abs_path:\n txt_file += \"\\n\" + row[\"image_path\"] + \" \"\n else:\n txt_file += \"\\n\" + os.path.join(path, row[\"image\"]) + \" \"\n txt_file += \",\".join(\n [\n str(x)\n for x in (row[[\"xmin\", \"ymin\", \"xmax\", \"ymax\", \"code\"]].tolist())\n ]\n )\n else:\n txt_file += \" \"\n txt_file += \",\".join(\n [\n str(x)\n for x in (row[[\"xmin\", \"ymin\", \"xmax\", \"ymax\", \"code\"]].tolist())\n ]\n )\n last_image = row[\"image\"]\n file = open(target_name, \"w\")\n file.write(txt_file[1:])\n file.close()\n return True\n\n\ndef csv_from_xml(directory, path_name=\"\"):\n # First get all images and xml files from path and its subfolders\n image_paths = GetFileList(directory, \".jpg\")\n xml_paths = GetFileList(directory, \".xml\")\n result_df = pd.DataFrame()\n if not len(image_paths) == len(xml_paths):\n print(\"number of annotations doesnt match number of images\")\n return False\n for image in image_paths:\n target_filename = os.path.join(path_name, image) if path_name else image\n source_filename = os.path.join(directory, image)\n y_size, x_size, _ = np.array(Image.open(source_filename)).shape\n source_xml = image.replace(\".jpg\", \".xml\")\n txt = open(source_xml, \"r\").read()\n y_vals = re.findall(r\"(?:x>\\n)(.*)(?:\\n</)\", txt)\n ymin_vals = y_vals[::2]\n ymax_vals = y_vals[1::2]\n x_vals = re.findall(r\"(?:y>\\n)(.*)(?:\\n</)\", txt)\n xmin_vals = x_vals[::2]\n xmax_vals = x_vals[1::2]\n label_vals = re.findall(r\"(?:label>\\n)(.*)(?:\\n</)\", txt)\n label_name_vals = re.findall(r\"(?:labelname>\\n)(.*)(?:\\n</)\", txt)\n df = pd.DataFrame()\n df[\"xmin\"] = xmin_vals\n df[\"xmin\"] = df[\"xmin\"].astype(float) * x_size\n df[\"ymin\"] = ymin_vals\n df[\"ymin\"] = df[\"ymin\"].astype(float) * y_size\n df[\"xmax\"] = xmax_vals\n df[\"xmax\"] = df[\"xmax\"].astype(float) * x_size\n df[\"ymax\"] = ymax_vals\n df[\"ymax\"] = df[\"ymax\"].astype(float) * y_size\n df[\"label\"] = label_name_vals\n df[\"code\"] = label_vals\n df[\"image_path\"] = target_filename\n df[\"image\"] = os.path.basename(target_filename)\n result_df = result_df.append(df)\n # Bring image column first\n cols = list(df.columns)\n cols = [cols[-1]] + cols[:-1]\n result_df = result_df[cols]\n return result_df\n\n\ndef crop_and_save(\n image_df,\n target_path,\n target_file,\n one=True,\n label_dict={0: \"house\"},\n postfix=\"cropped\",\n):\n \"\"\"Takes a vott_csv file with image names, labels and crop_boxes\n and crops the images accordingly\n\n Input csv file format:\n\n image xmin ymin xmax ymax label\n im.jpg 0 10 100 500 house\n\n\n Parameters\n ----------\n df : pd.Dataframe\n The input dataframe with file_names, bounding box info\n and label\n source_path : str\n Path of source images\n target_path : str, optional\n Path to save cropped images\n one : boolean, optional\n if True, only the most central house will be returned\n\n Returns\n -------\n True if completed succesfully\n \"\"\"\n if not path.isdir(target_path):\n makedirs(target_path)\n\n previous_name = \"\"\n counter = 0\n image_df.dropna(inplace=True)\n image_df[\"image_path\"] = ChangeToOtherMachine(image_df[\"image_path\"].values)\n\n def find_rel_position(row):\n current_name = row[\"image_path\"]\n x_size, _ = Image.open(current_name).size\n x_centrality = abs((row[\"xmin\"] + row[\"xmax\"]) / 2 / x_size - 0.5)\n return x_centrality\n\n if one:\n centrality = []\n for index, row in image_df.iterrows():\n centrality.append(find_rel_position(row))\n image_df[\"x_centrality\"] = pd.Series(centrality)\n image_df.sort_values([\"image\", \"x_centrality\"], inplace=True)\n image_df.drop_duplicates(subset=\"image\", keep=\"first\", inplace=True)\n new_paths = []\n for index, row in image_df.iterrows():\n current_name = row[\"image_path\"]\n if current_name == previous_name:\n counter += 1\n else:\n counter = 0\n imageObject = Image.open(current_name)\n cropped = imageObject.crop((row[\"xmin\"], row[\"ymin\"], row[\"xmax\"], row[\"ymax\"]))\n label = row[\"label\"]\n if type(label) == int:\n label = label_dict[label]\n image_name_cropped = (\n \"_\".join([row[\"image\"][:-4], postfix, label, str(counter)]) + \".jpg\"\n )\n new_path = os.path.join(target_path, image_name_cropped)\n cropped.save(new_path)\n new_paths.append(new_path.replace(\"\\\\\", \"/\"))\n previous_name = current_name\n pd.DataFrame(new_paths, columns=[\"image_path\"]).to_csv(target_file)\n return True\n\n\nif __name__ == \"__main__\":\n # Prepare the houses dataset for YOLO\n labeldict = dict(\n zip(\n [\"house\"],\n [\n 0,\n ],\n )\n )\n multi_df = pd.read_csv(\n \"C:/Users/Anton/Documents/Insight/eq/EQ_new/Train_Housing_detector/2/annotations/Housing_cropping-export.csv\"\n )\n multi_df.drop_duplicates(subset=None, keep=\"first\", inplace=True)\n convert_vott_csv_to_yolo(\n multi_df,\n labeldict,\n path=\"/home/ubuntu/logohunter/data/houses/\",\n target_name=\"data_train.txt\",\n )\n\n # Prepare the windows dataset for YOLO\n path = \"C:/Users/Anton/Documents/Insight/eq/EQ_new/Train_Window_Detector/base\"\n csv_from_xml(path, \"/home/ubuntu/logohunter/data/windows\").to_csv(\n \"C:/Users/Anton/Documents/Insight/eq/EQ_new/Train_Window_Detector/base/annotations.csv\"\n )\n\n label_names = [\n \"background\",\n \"facade\",\n \"molding\",\n \"cornice\",\n \"pillar\",\n \"window\",\n \"door\",\n \"sill\",\n \"blind\",\n \"balcony\",\n \"shop\",\n \"deco\",\n ]\n labeldict = dict(zip(label_names, list(range(12))))\n convert_vott_csv_to_yolo(\n csv_from_xml(path, \"/home/ubuntu/logohunter/data/windows\"), labeldict\n )\n" ]
[ [ "pandas.read_csv", "pandas.Series", "pandas.DataFrame" ] ]
yu9824/yikit
[ "d2a0732f543e70b8be985b22847504a06c9837fc" ]
[ "yikit/models/models.py" ]
[ "\n'''\nCopyright (c) 2021 yu9824\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom sklearn.base import is_classifier, is_regressor\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import check_array, check_X_y, check_random_state\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils import Bunch\nfrom sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score\nfrom sklearn.model_selection import check_cv\nfrom sklearn.model_selection._validation import _fit_and_score, _aggregate_score_dicts, _score\nfrom sklearn.metrics import check_scoring\nfrom sklearn.metrics._scorer import _check_multimetric_scoring\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\nfrom sklearn.linear_model import Ridge, Lasso, LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.base import clone\nfrom sklearn.svm import SVR\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.inspection import permutation_importance\nfrom ngboost import NGBRegressor\nfrom lightgbm import LGBMRegressor\nimport sys\n\nfrom yikit.feature_selection import BorutaPy\nfrom yikit.tools import is_notebook\nif is_notebook():\n from tqdm.notebook import tqdm\nelse:\n from tqdm import tqdm\n\nimport optuna\nfrom joblib import Parallel, delayed\n\nimport numpy as np\nimport pandas as pd\n\n__all__ = [\n 'NNRegressor',\n 'GBDTRegressor',\n 'LinearModelRegressor',\n 'SupportVectorRegressor',\n 'EnsembleRegressor',\n 'Objective'\n]\n\n\nclass NNRegressor(BaseEstimator, RegressorMixin):\n def __init__(self, input_dropout = 0.0, hidden_layers = 3, hidden_units = 96, hidden_activation = 'relu', hidden_dropout = 0.2, batch_norm = 'before_act', optimizer_type = 'adam', lr = 0.001, batch_size = 64, l = 0.01, random_state = None, epochs = 200, patience = 20, progress_bar = True, scale = True):\n try:\n import keras\n except ModuleNotFoundError as e:\n sys.stdout.write(e)\n raise ModuleNotFoundError('If you want to use this module, please install keras.')\n\n self.input_dropout = input_dropout\n self.hidden_layers = hidden_layers\n self.hidden_units = hidden_units\n self.hidden_activation = hidden_activation\n self.hidden_dropout = hidden_dropout\n self.batch_norm = batch_norm\n self.optimizer_type = optimizer_type\n self.lr = lr\n self.batch_size = batch_size\n self.l = l\n self.random_state = random_state\n self.epochs = epochs\n self.patience = patience\n self.progress_bar = progress_bar\n self.scale = scale\n\n def fit(self, X, y):\n from keras.callbacks import EarlyStopping\n from keras.layers.advanced_activations import ReLU, PReLU\n from keras.layers import Dense, Dropout\n from keras.layers.normalization import BatchNormalization\n from keras.models import Sequential\n from keras.optimizers import SGD, Adam\n from keras.regularizers import l2\n from keras.backend import clear_session\n \n # 入力されたXとyが良い感じか判定(サイズが適切かetc)\n X, y = check_X_y(X, y)\n\n '''\n sklearn/utils/estimator_checks.py:3063:\n FutureWarning: As of scikit-learn 0.23, estimators should expose a n_features_in_ attribute, \n unless the 'no_validation' tag is True.\n This attribute should be equal to the number of features passed to the fit method.\n An error will be raised from version 1.0 (renaming of 0.25) when calling check_estimator().\n See SLEP010: https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\n '''\n self.n_features_in_ = X.shape[1] # check_X_yのあとでないとエラーになりうる.\n\n # check_random_state\n self.rng_ = check_random_state(self.random_state)\n\n # 標準化\n if self.scale:\n self.scaler_X_ = StandardScaler()\n X_ = self.scaler_X_.fit_transform(X)\n\n self.scaler_y_ = StandardScaler()\n y_ = self.scaler_y_.fit_transform(np.array(y).reshape(-1, 1)).flatten()\n else:\n X_ = X\n y_ = y\n \n\n # なぜか本当によくわかんないけど名前が一緒だと怒られるのでそれをずらすためのやつをつくる\n j = int(10E5)\n\n # プログレスバー\n if self.progress_bar:\n pbar = tqdm(total = 1 + self.hidden_layers + 6)\n self.estimator_ = Sequential()\n\n # 入力層\n self.estimator_.add(Dropout(self.input_dropout, input_shape = (self.n_features_in_,), seed = self.rng_.randint(2 ** 32), name = 'Dropout_' + str(j)))\n if self.progress_bar:\n pbar.update(1)\n\n\n # 中間層\n for i in range(self.hidden_layers):\n self.estimator_.add(Dense(units = self.hidden_units, kernel_regularizer = l2(l = self.l), name = 'Dense_' + str(j+i+1))) # kernel_regularizer: 過学習対策\n if self.batch_norm == 'before_act':\n self.estimator_.add(BatchNormalization(name = 'BatchNormalization' + str(j+i+1)))\n \n # 活性化関数\n if self.hidden_activation == 'relu':\n self.estimator_.add(ReLU(name = 'Re_Lu_' + str(j+i+1)))\n elif self.hidden_activation == 'prelu':\n self.estimator_.add(PReLU(name = 'PRe_Lu_' + str(j+i+1)))\n else:\n raise NotImplementedError\n\n self.estimator_.add(Dropout(self.hidden_dropout, seed = self.rng_.randint(2 ** 32), name = 'Dropout_' + str(j+i+1)))\n\n # プログレスバー\n if self.progress_bar:\n pbar.update(1)\n \n # 出力層\n self.estimator_.add(Dense(1, activation = 'linear', name = 'Dense_' + str(j+self.hidden_layers+1)))\n\n # optimizer\n if self.optimizer_type == 'adam':\n optimizer_ = Adam(lr = self.lr, beta_1 = 0.9, beta_2 = 0.999, decay = 0.0)\n elif self.optimizer_type == 'sgd':\n optimizer_ = SGD(lr = self.lr, decay = 1E-6, momentum = 0.9, nesterov = True)\n else:\n raise NotImplementedError\n \n # 目的関数,評価指標などの設定\n self.estimator_.compile(loss = 'mean_squared_error', optimizer = optimizer_, metrics=['mse', 'mae'])\n\n # プログレスバー\n if self.progress_bar:\n pbar.update(1)\n\n # 変数の定義\n self.early_stopping_ = EarlyStopping(patience = self.patience, restore_best_weights = True)\n self.validation_split_ = 0.2\n\n # fit\n self.estimator_.fit(X_, y_, validation_split = self.validation_split_, epochs = self.epochs, batch_size = self.batch_size, callbacks = [self.early_stopping_], verbose = 0)\n\n # プログレスバー\n if self.progress_bar:\n pbar.update(5)\n pbar.close()\n\n return self\n\n def predict(self, X):\n # fitが行われたかどうかをインスタンス変数が定義されているかで判定(第二引数を文字列ではなくてリストで与えることでより厳密に判定可能)\n check_is_fitted(self, 'estimator_')\n\n # 入力されたXが妥当か判定\n X = check_array(X)\n\n if self.scale:\n X_ = self.scaler_X_.transform(X)\n y_pred_ = self.estimator_.predict(X_)\n y_pred_ = self.scaler_y_.inverse_transform(y_pred_).flatten()\n else:\n X_ = X\n y_pred_ = self.estimator_.predict(X_).flatten()\n return y_pred_\n \n\n\nclass GBDTRegressor(RegressorMixin, BaseEstimator):\n def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split', **kwargs):\n # self.hoge = hogeとしなければいけない.つまりself.fuga = hogeだと怒られる\n self.boosting_type = boosting_type\n self.num_leaves = num_leaves\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.n_estimators = n_estimators\n self.subsample_for_bin = subsample_for_bin\n self.objective = objective\n self.class_weight = class_weight\n self.min_split_gain = min_split_gain\n self.min_child_weight = min_child_weight\n self.min_child_samples = min_child_samples\n self.subsample = subsample\n self.subsample_freq = subsample_freq\n self.colsample_bytree = colsample_bytree\n self.reg_alpha = reg_alpha\n self.reg_lambda = reg_lambda\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.silent = silent\n self.importance_type = importance_type\n if len(kwargs):\n self.kwargs = kwargs\n\n def fit(self, X, y):\n try:\n kwargs = self.kwargs\n except:\n kwargs = {}\n\n # check_random_state\n self.rng_ = check_random_state(self.random_state)\n\n # fitしたあとに確定する値は変数名 + '_' としなければならない.\n self.estimator_ = LGBMRegressor(boosting_type = self.boosting_type,\n num_leaves = self.num_leaves,\n max_depth = self.max_depth,\n learning_rate = self.learning_rate,\n n_estimators = self.n_estimators,\n subsample_for_bin = self.subsample_for_bin,\n objective = self.objective,\n class_weight = self.class_weight,\n min_split_gain = self.min_split_gain,\n min_child_weight = self.min_child_weight,\n min_child_samples = self.min_child_samples,\n subsample = self.subsample,\n subsample_freq = self.subsample_freq,\n colsample_bytree = self.colsample_bytree,\n reg_alpha = self.reg_alpha,\n reg_lambda = self.reg_lambda,\n random_state = self.rng_,\n n_jobs = self.n_jobs,\n silent = self.silent,\n importance_type = self.importance_type,\n **kwargs\n )\n\n # 入力されたXとyが良い感じか判定(サイズが適切かetc)\n X, y = check_X_y(X, y)\n\n '''\n sklearn/utils/estimator_checks.py:3063:\n FutureWarning: As of scikit-learn 0.23, estimators should expose a n_features_in_ attribute, \n unless the 'no_validation' tag is True.\n This attribute should be equal to the number of features passed to the fit method.\n An error will be raised from version 1.0 (renaming of 0.25) when calling check_estimator().\n See SLEP010: https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\n '''\n self.n_features_in_ = X.shape[1] # check_X_yのあとでないとエラーになりうる.\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = self.rng_, test_size = 0.2)\n\n self.estimator_.fit(X, y, eval_set = [(X_test, y_test)], eval_metric = ['mse', 'mae'], early_stopping_rounds = 20, verbose = False)\n self.feature_importances_ = self.estimator_.feature_importances_\n\n # 慣例と聞いたはずなのにこれをreturnしないと怒られる.審査が厳しい.\n return self\n\n def predict(self, X):\n # fitが行われたかどうかをインスタンス変数が定義されているかで判定(第二引数を文字列ではなくてリストで与えることでより厳密に判定可能)\n check_is_fitted(self, 'estimator_')\n\n # 入力されたXが妥当か判定\n X = check_array(X)\n\n # 予測結果を返す\n return self.estimator_.predict(X)\n\n\n# スケールの概念が入っていないので,それらを内包したscikit-learn準拠モデルを自分で定義する必要がある.\nclass EnsembleRegressor(BaseEstimator, RegressorMixin):\n def __init__(self, estimators = (RandomForestRegressor(),), method = 'blending', cv = 5, n_jobs = -1, random_state = None, scoring = 'neg_mean_squared_error', verbose = 0, boruta = True, opt = True):\n '''\n Parameters\n ----------\n estimators: 1-d list, default = (RandomForestRegressor(), )\n List of estimators to ensemble.\n\n method: {'blending', 'average', 'stacking'}, default = 'blending'\n How to ensemble.\n\n cv: int or callable, default = 5\n\n n_jobs: int, default = -1\n\n random_state: None, int or callable, default = None\n\n scoring: str, callable or list, default = 'neg_mean_squared_error'\n https://scikit-learn.org/stable/modules/model_evaluation.html\n\n verbose: int, default = 0\n\n boruta: bool, default = True\n Do boruta or not.\n\n opt: bool, default = True\n Do hyperparameter optimization or not.\n '''\n self.estimators = estimators\n self.method = method\n self.cv = cv\n self.n_jobs = n_jobs\n self.random_state = random_state\n self.scoring = scoring\n self.verbose = verbose\n self.boruta = boruta\n self.opt = opt\n\n def fit(self, X, y):\n # よく使うので変数化\n self.n_estimators_ = len(self.estimators)\n\n # check_X_y\n X, y = check_X_y(X, y)\n\n '''\n sklearn/utils/estimator_checks.py:3063:\n FutureWarning: As of scikit-learn 0.23, estimators should expose a n_features_in_ attribute, \n unless the 'no_validation' tag is True.\n This attribute should be equal to the number of features passed to the fit method.\n An error will be raised from version 1.0 (renaming of 0.25) when calling check_estimator().\n See SLEP010: https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\n '''\n self.n_features_in_ = X.shape[1] # check_X_yのあとでないとエラーになりうる.\n \n # check_random_state\n rng_ = check_random_state(self.random_state)\n \n # isRegressor\n if sum([is_regressor(estimator) for estimator in self.estimators]) != self.n_estimators_:\n raise ValueError\n\n # check_cv\n cv_ = check_cv(self.cv, y = y, classifier = False)\n\n # check_scoring\n estimator = self.estimators[0]\n if callable(self.scoring):\n scorers = self.scoring\n elif self.scoring is None or isinstance(self.scoring, str):\n scorers = check_scoring(estimator = estimator)\n else:\n # 0.24.1のコードだと辞書を返すことになっているが,0.23.2ではtupleが返ってきてしまう?\n scorers = _check_multimetric_scoring(estimator, self.scoring)\n if isinstance(scorers, tuple):\n scorers = scorers[0]\n\n # 並列処理する部分を関数化\n def _f(i_train, i_test):\n X_train, X_test, y_train, y_test = X[i_train], X[i_test], y[i_train], y[i_test]\n\n if self.boruta:\n # 特徴量削減\n feature_selector_ = BorutaPy(estimator = RandomForestRegressor(n_jobs = -1, random_state = rng_), random_state = rng_, max_iter = 300, verbose = self.verbose)\n feature_selector_.fit(X_train, y_train)\n \n # 抽出\n support_ = feature_selector_.get_support()\n X_train_selected = feature_selector_.transform(X_train)\n X_test_selected = feature_selector_.transform(X_test)\n else: # borutaしない場合でもresultsに組み込まれるので変数を定義しておく.\n support_ = np.ones(X_train.shape[1], dtype = np.bool)\n X_train_selected = X_train\n X_test_selected = X_test\n\n # verbose\n if self.verbose == 0:\n optuna.logging.disable_default_handler()\n\n results_estimators = []\n for estimator in self.estimators:\n if self.opt:\n # ハイパーパラメータ(scoringで最初にしていしたやつで最適化)\n objective = Objective(estimator, X_train_selected, y_train, cv = cv_, random_state = rng_, scoring = scorers.values()[0] if isinstance(scorers, dict) else scorers)\n sampler = optuna.samplers.TPESampler(seed = rng_.randint(2 ** 32))\n\n study = optuna.create_study(sampler = sampler, direction = 'maximize')\n study.optimize(objective, n_trials = 100, n_jobs = 1)\n\n # 最適化後のモデル\n _best_estimator_ = objective.model(**objective.fixed_params_, **study.best_params)\n else: # optunaしない場合でもresultsに組み込まれるので変数を定義しておく.\n study = None\n _best_estimator_ = clone(estimator)\n\n # fit\n _best_estimator_.fit(X_train_selected, y_train)\n\n # predict\n _y_pred_on_train = _best_estimator_.predict(X_train_selected)\n _y_pred_on_test = _best_estimator_.predict(X_test_selected)\n\n # score\n _train_scores = _score(_best_estimator_, X_train_selected, y_train, scorers)\n _test_scores = _score(_best_estimator_, X_test_selected, y_test, scorers)\n\n # importances\n _gi = _best_estimator_.feature_importances_ if 'feature_importances_' in dir(_best_estimator_) else None\n _pi = permutation_importance(_best_estimator_, X_test_selected, y_test, scoring = 'neg_mean_squared_error', n_repeats = 10, n_jobs = -1, random_state = rng_).importances\n\n # 予測結果をDataFrameにまとめる.\n _y_train = pd.DataFrame(np.hstack([y_train.reshape(-1, 1), _y_pred_on_train.reshape(-1, 1)]), columns = ['true', 'pred'], index = i_train)\n _y_test = pd.DataFrame(np.hstack([y_test.reshape(-1, 1), _y_pred_on_test.reshape(-1, 1)]), columns = ['true', 'pred'], index = i_test)\n\n results_estimators.append({\n 'estimators': _best_estimator_,\n 'params': _best_estimator_.get_params(),\n 'y_train': _y_train,\n 'y_test': _y_test,\n 'train_scores': _train_scores,\n 'test_scores': _test_scores,\n 'gini_importances': _gi,\n 'permutation_importances': _pi,\n 'studies': study,\n })\n # verbose\n if self.verbose == 0:\n optuna.logging.disable_default_handler()\n\n # 出力結果をいい感じにする.←ここから\n ret = {}\n temp = {}\n for result_estimator in results_estimators: # それぞれのestimatorについて\n for k, v in result_estimator.items(): # その中の各々の値について\n # スコア系かつそれらが複数指定されている場合だけ特別処理\n if '_score' in k and isinstance(v, dict):\n if k not in temp:\n temp[k] = {}\n for score_name, score in v.items():\n if score_name in temp[k]:\n temp[k][score_name].append(score)\n else:\n temp[k][score_name] = [score]\n else: # スコア系以外\n if k in ret:\n ret[k].append(v)\n else:\n ret[k] = [v]\n # scoreをためてるやつをBunch型に変換\n for k in temp:\n temp[k] = Bunch(**temp[k])\n \n # 返すように最終整形\n ret['support_'] = support_\n ret.update(temp)\n return ret\n \n # 上記で定義した_f関数どうしは互いに独立なので並列で処理する.\n parallel = Parallel(n_jobs = self.n_jobs, verbose = self.verbose)\n results = parallel(delayed(_f)(i_train, i_test) for i_train, i_test in cv_.split(X, y))\n # results = [_f(i_train, i_test) for i_train, i_test in cv_.split(X, y)] # デバッグ用.並列しないでやる方法\n\n # データを整形\n self.results_ = {}\n for result in results:\n for k, v in result.items():\n if k in self.results_:\n self.results_[k].append(v)\n else:\n self.results_[k] = [v]\n\n # 扱いやすいようにBunch型に変換\n self.results_ = Bunch(**self.results_)\n\n # OOFの予測結果を取得\n dfs_y_oof_ = [pd.concat([lst[n] for lst in self.results_['y_test']], axis = 0).sort_index() for n in range(self.n_estimators_)]\n y_oof_ = pd.concat([df.loc[:, 'pred'] for df in dfs_y_oof_], axis = 1)\n y_oof_.columns = ['estimator{}'.format(n) for n in range(self.n_estimators_)]\n\n # *** ensemble ***\n # モデルがひとつのとき.\n if self.method == 'average' or self.n_estimators_ == 1:\n self.weights_ = None\n elif self.method == 'blending':\n # rmseで最適化(今後指定できるようにしてもいいかも.)\n def objective(trial):\n params = {'weight{0}'.format(i): trial.suggest_uniform('weight{0}'.format(i), 0, 1) for i in range(self.n_estimators_)}\n weights = np.array(list(params.values()))\n y_oof_ave = np.average(y_oof_, weights = weights, axis = 1)\n return mean_squared_error(y_oof_ave, y, squared = False)\n\n # optunaのログを非表示\n if self.verbose == 0:\n optuna.logging.disable_default_handler()\n\n # 重みの最適化\n sampler_ = optuna.samplers.TPESampler(seed = rng_.randint(2 ** 32))\n study = optuna.create_study(sampler = sampler_, direction = 'minimize') # 普通のRMSEなので.\n study.optimize(objective, n_trials = 100, n_jobs = 1) # -1にするとなぜかバグるので.(そもそもそんなに重くないので1で.)\n\n # optunaのログを再表示\n if self.verbose == 0:\n optuna.logging.enable_default_handler()\n\n self.weights_ = np.array(list(study.best_params.values()), dtype = np.float64)\n self.weights_ /= np.sum(self.weights_)\n elif self.method == 'stacking':\n # 線形モデルの定義\n self.stacking_model_ = LinearRegression(n_jobs = self.n_jobs)\n self.stacking_model_.fit(y_oof_.values, y)\n # resultsに保存するために定義だけする.\n self.weights_ = None\n else:\n raise NotImplementedError\n\n # 重みを結果に保存\n self.results_['weights'] = self.weights_\n\n return self\n\n def predict(self, X):\n # fitが行われたかどうかをインスタンス変数が定義されているかで判定(第二引数を文字列ではなくてリストで与えることでより厳密に判定可能)\n check_is_fitted(self, 'results_')\n\n # 入力されたXが妥当か判定\n X = check_array(X)\n\n # 各予測モデルの予測結果をまとめる.(内包リストで得られる配列は3-Dベクトル (n_estimators_, cv, n_samples))\n y_preds_ = np.average(np.array([[estimators[n].predict(X[:, self.results_.support_[m]]) for m, estimators in enumerate(self.results_.estimators)] for n in range(self.n_estimators_)]), axis = 1).transpose() # 同じ種類のやつは単純に平均を取る.\n \n if self.method in ('blending', 'average') or self.n_estimators_ == 1:\n y_pred_ = np.average(y_preds_, weights = self.weights_, axis = 1)\n elif self.method == 'stacking':\n y_pred_ = self.stacking_model_.predict(y_preds_)\n return y_pred_\n\n\nclass SupportVectorRegressor(BaseEstimator, RegressorMixin):\n def __init__(self, kernel = 'rbf', gamma = 'auto', tol = 0.01, C = 1.0, epsilon = 0.1, scale = True):\n self.kernel = kernel\n self.gamma = gamma\n self.tol = tol\n self.C = C\n self.epsilon = epsilon\n self.scale = scale\n\n def fit(self, X, y):\n # 入力されたXとyが良い感じか判定(サイズが適切かetc)\n X, y = check_X_y(X, y)\n\n '''\n sklearn/utils/estimator_checks.py:3063:\n FutureWarning: As of scikit-learn 0.23, estimators should expose a n_features_in_ attribute, \n unless the 'no_validation' tag is True.\n This attribute should be equal to the number of features passed to the fit method.\n An error will be raised from version 1.0 (renaming of 0.25) when calling check_estimator().\n See SLEP010: https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\n '''\n self.n_features_in_ = X.shape[1] # check_X_yのあとでないとエラーになりうる.\n\n if self.scale:\n self.scaler_X_ = StandardScaler()\n X_ = self.scaler_X_.fit_transform(X)\n\n self.scaler_y_ = StandardScaler()\n y_ = self.scaler_y_.fit_transform(np.array(y).reshape(-1, 1)).flatten()\n else:\n X_ = X\n y_ = y\n\n self.estimator_ = SVR(kernel=self.kernel, gamma=self.gamma, tol=self.tol, C=self.C, epsilon=self.epsilon)\n self.estimator_.fit(X_, y_)\n\n return self\n\n def predict(self, X):\n # fitが行われたかどうかをインスタンス変数が定義されているかで判定(第二引数を文字列ではなくてリストで与えることでより厳密に判定可能)\n check_is_fitted(self, 'estimator_')\n\n # 入力されたXが妥当か判定\n X = check_array(X)\n\n if self.scale:\n X_ = self.scaler_X_.transform(X)\n else:\n X_ = X\n\n y_pred_ = self.estimator_.predict(X_)\n if self.scale:\n y_pred_ = self.scaler_y_.inverse_transform(np.array(y_pred_).reshape(-1, 1)).flatten()\n \n return y_pred_\n\nclass LinearModelRegressor(BaseEstimator, RegressorMixin):\n def __init__(self, linear_model = 'ridge', alpha = 1.0, fit_intercept = True, max_iter = 1000, tol = 0.001, random_state = None):\n self.linear_model = linear_model\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.tol = tol\n self.random_state = random_state\n\n def fit(self, X, y):\n X, y = check_X_y(X, y)\n\n '''\n sklearn/utils/estimator_checks.py:3063:\n FutureWarning: As of scikit-learn 0.23, estimators should expose a n_features_in_ attribute, \n unless the 'no_validation' tag is True.\n This attribute should be equal to the number of features passed to the fit method.\n An error will be raised from version 1.0 (renaming of 0.25) when calling check_estimator().\n See SLEP010: https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\n '''\n self.n_features_in_ = X.shape[1] # check_X_yのあとでないとエラーになりうる.\n\n self.rng_ = check_random_state(self.random_state)\n\n # max_iterを引数に入れてるとこの変数ないとダメ!って怒られるから.\n self.n_iter_ = 1\n\n if self.linear_model == 'ridge':\n model_ = Ridge\n elif self.linear_model == 'lasso':\n model_ = Lasso\n else:\n raise NotImplementedError\n\n self.estimator_ = model_(alpha = self.alpha, fit_intercept = self.fit_intercept, max_iter = self.max_iter, tol = self.tol, random_state = self.rng_)\n self.estimator_.fit(X, y)\n return self\n \n def predict(self, X):\n check_is_fitted(self, 'estimator_')\n\n X = check_array(X)\n\n return self.estimator_.predict(X)\n\nclass Objective:\n def __init__(self, estimator, X, y, custom_params = lambda trial: {}, fixed_params = {}, cv = 5, random_state = None, scoring = None, n_jobs = None):\n \"\"\"objective function of optuna.\n\n Parameters\n ----------\n estimator : sklearn-based estimator instance\n e.g. sklearn.ensemble.RandomForestRegressor()\n X : 2-d array\n features\n y : 1-d array\n target\n custom_params : func, optional\n If you want to do your own custom range of optimization, you can define it here with a function that returns a dictionary., by default lambda trial:{}\n fixed_params : dict, optional\n If you have a fixed variable, you can specify it in the dictionary., by default {}\n cv : int, KFold object, optional\n How to cross-validate, by default 5\n random_state : int or RandomState object, optional\n seed, by default None\n scoring : scorer object, str, etc., optional\n If you don't specify it, it will use the default evaluation function of sklearn., by default None\n n_jobs : int, optional\n parallel processing, by default None\n \"\"\"\n self.estimator = estimator\n self.X, self.y = check_X_y(X, y)\n self.custom_params = custom_params\n self._fixed_params = fixed_params\n self.cv = check_cv(cv)\n self.rng = check_random_state(random_state)\n self.scoring = check_scoring(estimator, scoring)\n self.n_jobs = n_jobs\n\n # sampler\n self.sampler = optuna.samplers.TPESampler(seed = self.rng.randint(2 ** 32 - 1))\n\n def __call__(self, trial):\n if isinstance(self.estimator, NNRegressor):\n params_ = {\n 'input_dropout': trial.suggest_uniform('input_dropout', 0.0, 0.3),\n 'hidden_layers': trial.suggest_int('hidden_layers', 2, 4),\n 'hidden_units' : trial.suggest_int('hidden_units', 32, 1024, 32),\n 'hidden_activation' : trial.suggest_categorical('hidden_activation', ['prelu', 'relu']),\n 'hidden_dropout' : trial.suggest_uniform('hidden_dropout', 0.2, 0.5),\n 'batch_norm' : trial.suggest_categorical('batch_norm', ['before_act', 'no']),\n 'optimizer_type' : trial.suggest_categorical('optimizer_type', ['adam', 'sgd']),\n 'lr' : trial.suggest_loguniform('lr', 0.00001, 0.01),\n 'batch_size' : trial.suggest_int('hidden_units', 32, 1024, 32),\n 'l' : trial.suggest_loguniform('l', 1E-7, 0.1),\n }\n self.fixed_params_ = {\n 'progress_bar': False,\n 'random_state': self.rng,\n }\n elif isinstance(self.estimator, (GBDTRegressor, LGBMRegressor)):\n params_ = {\n 'n_estimators' : trial.suggest_int('n_estimators', 10, 1000, log=True),\n # 'max_depth' : trial.suggest_int('n_estimators', 3, 9), # num_leaves変えた方が良さそう.制約条件的に.\n 'min_child_weight' : trial.suggest_loguniform('min_child_weight', 0.001, 10),\n 'colsample_bytree' : trial.suggest_uniform('colsample_bytree', 0.6, 0.95),\n 'subsample': trial.suggest_uniform('subsample', 0.6, 0.95),\n 'num_leaves' : trial.suggest_int('num_leaves', 2 ** 3, 2 ** 9, log = True)\n }\n self.fixed_params_ = {\n 'random_state' : self.rng,\n 'n_jobs' : -1,\n 'objective' : 'regression',\n }\n elif isinstance(self.estimator, RandomForestRegressor):\n # 最適化するべきパラメータ\n params_ = {\n 'min_samples_split' : trial.suggest_int('min_samples_split', 2, 16),\n 'max_depth' : trial.suggest_int('max_depth', 10, 100),\n 'n_estimators' : trial.suggest_int('n_estimators', 10, 1000, log=True),\n }\n # 固定するパラメータ (外でも取り出せるようにインスタンス変数としてる.)\n self.fixed_params_ = {\n 'random_state' : self.rng,\n 'n_jobs' : -1,\n }\n elif isinstance(self.estimator, (SupportVectorRegressor, SVR)):\n # 最適化するべきパラメータ\n params_ = {\n 'C' : trial.suggest_loguniform('C', 2 ** -5, 2 ** 10),\n 'epsilon' : trial.suggest_loguniform('epsilon', 2 ** -10, 2 ** 0),\n }\n # 固定するパラメータ (外でも取り出せるようにインスタンス変数としてる.)\n self.fixed_params_ = {\n 'gamma' : 'auto',\n 'kernel' : 'rbf'\n }\n elif isinstance(self.estimator, LinearModelRegressor):\n # 最適化するべきパラメータ\n params_ = {\n 'linear_model' : trial.suggest_categorical('linear_model', ['ridge', 'lasso']),\n 'alpha' : trial.suggest_loguniform('alpha', 0.1, 10),\n 'fit_intercept' : trial.suggest_categorical('fit_intercept', [True, False]),\n 'max_iter' : trial.suggest_loguniform('max_iter', 100, 10000),\n 'tol' : trial.suggest_loguniform('tol', 0.0001, 0.01),\n }\n # 固定するパラメータ (外でも取り出せるようにインスタンス変数としてる.)\n self.fixed_params_ = {\n 'random_state' : self.rng,\n }\n elif isinstance(self.estimator, MLPRegressor):\n # 最適化するべきパラメータ\n params_ = {\n 'hidden_layer_sizes': trial.suggest_int('hidden_layer_sizes', 50, 300),\n 'alpha': trial.suggest_loguniform('alpha', 1e-5, 1e-3),\n 'learning_rate_init': trial.suggest_loguniform('learning_rate_init', 1e-5, 1e-3),\n }\n # 固定するパラメータ (外でも取り出せるようにインスタンス変数としてる.)\n self.fixed_params_ = {\n 'random_state' : self.rng,\n }\n elif isinstance(self.estimator, NGBRegressor):\n # 最適化するべきパラメータ\n params_ = {\n 'Base': DecisionTreeRegressor(\n max_depth = trial.suggest_int('Base__max_depth', 2, 100),\n criterion = trial.suggest_categorical('Base__criterion', ['mse', 'friedman_mse']),\n random_state = self.rng,\n ),\n 'n_estimators' : trial.suggest_int('n_estimators', 10, 1000, log=True),\n 'minibatch_frac': trial.suggest_uniform('minibatch_frac', 0.5, 1.0),\n }\n # 固定するパラメータ (外でも取り出せるようにインスタンス変数としてる.)\n self.fixed_params_ = {\n 'random_state' : self.rng,\n }\n elif self.custom_params(trial):\n params_ = self.custom_params(trial)\n self.fixed_params_ = {} # あとで加えるので空でOK.\n else:\n raise NotImplementedError('{0}'.format(self.estimator))\n\n # もしfixed_paramsを追加で指定されたらそれを取り入れる\n self.fixed_params_.update(self._fixed_params)\n\n self.model = type(self.estimator)\n # self.estimator_ = self.model(**params_, **self.fixed_params_)\n self.estimator_ = clone(self.estimator)\n self.estimator_.set_params(\n **params_,\n **self.fixed_params_,\n )\n\n parallel = Parallel(n_jobs = self.n_jobs)\n results = parallel(\n delayed(_fit_and_score)(\n clone(self.estimator_), self.X, self.y, self.scoring, train, test, 0, dict(**self.fixed_params_, **params_), None\n )\n for train, test in self.cv.split(self.X, self.y))\n return np.mean([d['test_scores'] for d in results]) # scikit-learn>=0.24.1\n\n def get_best_estimator(self, study):\n best_params_ = self.get_best_params(study)\n return self.model(**best_params_)\n\n def get_best_params(self, study):\n if isinstance(self.estimator_, NGBRegressor):\n dt_best_params_ = {}\n best_params_ = {}\n key_base = 'Base__'\n for k, v in study.best_params.items():\n if key_base in k:\n dt_best_params_[k[len(key_base):]] = v\n else:\n best_params_[k] = v\n else:\n if 'random_state' in self.fixed_params_:\n dt_best_params_['random_state'] = self.fixed_params_['random_state']\n best_params_['Base'] = DecisionTreeRegressor(**dt_best_params_)\n else:\n best_params_ = study.best_params\n best_params_.update(**self.fixed_params_)\n return best_params_\n\n\n\nif __name__ == '__main__':\n pass" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.utils.validation.check_is_fitted", "sklearn.model_selection._validation._score", "sklearn.metrics._scorer._check_multimetric_scoring", "sklearn.metrics.mean_squared_error", "sklearn.base.clone", "numpy.mean", "sklearn.utils.Bunch", "sklearn.utils.check_X_y", "sklearn.base.is_regressor", "pandas.concat", "sklearn.metrics.check_scoring", "sklearn.model_selection.train_test_split", "sklearn.model_selection.check_cv", "sklearn.svm.SVR", "numpy.array", "numpy.sum", "sklearn.tree.DecisionTreeRegressor", "sklearn.utils.check_array", "sklearn.inspection.permutation_importance", "numpy.ones", "sklearn.linear_model.LinearRegression", "sklearn.preprocessing.StandardScaler", "numpy.average", "sklearn.utils.check_random_state" ] ]
Eveneko/SUSTech-Courses
[ "0420873110e91e8d13e6e85a974f1856e01d28d6" ]
[ "CS303_Artifical-Intelligence/Gomoku/Chessboard/graph.py" ]
[ "import numpy as np\nfrom graphics import *\n\nGRID_WIDTH = 20\nCOLUMN = 15\nROW = 15\n\nlist1 = [] # black\nlist2 = [] # white\nlist3 = [] # all\nchessboard = np.zeros((15, 15))\n\nlist_all = [] # 整个棋盘的\n\n\ndef game_win(list):\n for m in range(COLUMN):\n for n in range(ROW):\n\n if n < ROW - 4 and (m, n) in list and (m, n + 1) in list and (m, n + 2) in list and (\n m, n + 3) in list and (m, n + 4) in list:\n return True\n elif m < ROW - 4 and (m, n) in list and (m + 1, n) in list and (m + 2, n) in list and (\n m + 3, n) in list and (m + 4, n) in list:\n return True\n elif m < ROW - 4 and n < ROW - 4 and (m, n) in list and (m + 1, n + 1) in list and (\n m + 2, n + 2) in list and (m + 3, n + 3) in list and (m + 4, n + 4) in list:\n return True\n elif m < ROW - 4 and n > 3 and (m, n) in list and (m + 1, n - 1) in list and (\n m + 2, n - 2) in list and (m + 3, n - 3) in list and (m + 4, n - 4) in list:\n return True\n return False\n\n\ndef gobangwin():\n win = GraphWin(\"this is a gobang game\", 320, 320) \n win.setBackground(\"pink\")\n i1 = 20\n\n while i1 < 321:\n l = Line(Point(i1, 20), Point(i1, 300))\n l.draw(win)\n i1 = i1 + 20\n\n i2 = 20\n\n while i2 < 321:\n l = Line(Point(20, i2), Point(300, i2))\n l.draw(win)\n i2 = i2 + 20\n return win\n\ndef main():\n win = gobangwin()\n for i in range(COLUMN + 1):\n for j in range(ROW + 1):\n list_all.append((i, j))\n\n change = 0\n g = 0\n\n while g == 0:\n\n if change % 2 == 1:\n p2 = win.getMouse()\n\n if not ((round((p2.getX()) / GRID_WIDTH), round((p2.getY()) / GRID_WIDTH)) in list3):\n\n a2 = round((p2.getX()) / GRID_WIDTH - 1)\n b2 = round((p2.getY()) / GRID_WIDTH - 1)\n print(\"HU pos: \" + str([a2, b2]))\n\n chessboard[a2, b2] = 1\n list1.append((a2, b2))\n list3.append((a2, b2))\n\n piece = Circle(Point(GRID_WIDTH * (a2 + 1), GRID_WIDTH * (b2 + 1)), 8)\n piece.setFill('white')\n piece.draw(win)\n if game_win(list1):\n message = Text(Point(100, 100), \"white win.\")\n message.draw(win)\n g = 1\n\n change = change + 1\n\n else:\n p2 = win.getMouse()\n\n if not ((round((p2.getX()) / GRID_WIDTH), round((p2.getY()) / GRID_WIDTH)) in list3):\n\n a2 = round((p2.getX()) / GRID_WIDTH - 1)\n b2 = round((p2.getY()) / GRID_WIDTH - 1)\n print(\"HU pos: \" + str([a2, b2]))\n\n chessboard[a2, b2] = 1\n list2.append((a2, b2))\n list3.append((a2, b2))\n\n piece = Circle(Point(GRID_WIDTH * (a2 + 1), GRID_WIDTH * (b2 + 1)), 8)\n piece.setFill('black')\n piece.draw(win)\n if game_win(list2):\n message = Text(Point(100, 100), \"black win.\")\n message.draw(win)\n g = 1\n\n change = change + 1\n\n message = Text(Point(100, 120), \"Click anywhere to quit.\")\n message.draw(win)\n win.getMouse()\n win.close()\n\n\nmain()\n" ]
[ [ "numpy.zeros" ] ]
thomasg3/energy-price-aware-scheduling
[ "fdde23dff891a382f2f3d8f2b852675832f83e8d" ]
[ "learners/nearest_neighbors.py" ]
[ "#!/usr/bin/env python\n\nimport core\nfrom sklearn import neighbors\n\n\nbasic_features = ['HolidayFlag', 'DayOfWeek', 'PeriodOfDay', 'ForecastWindProduction', 'SystemLoadEA', 'SMPEA']\nall_features = ['HolidayFlag', 'DayOfWeek', 'WeekOfYear', 'Day', 'Month', 'Year', 'PeriodOfDay',\n 'ForecastWindProduction', 'SystemLoadEA', 'SMPEA', 'ORKTemperature', 'ORKWindspeed',\n 'CO2Intensity', 'ActualWindProduction', 'SystemLoadEP2']\nfew_features = ['PeriodOfDay', 'ForecastWindProduction', 'SystemLoadEA']\ncolumn_predict = 'SMPEP2'\n\nsimple_preprocessors = [core.interpolate_none]\n\n\ndef knn_11nn_distance_few_feat_30_days(day, prediction_data, instance):\n historic_days = 30\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(few_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_few_feat_30_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_few_feat_90_days(day, prediction_data, instance):\n historic_days = 90\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(few_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_few_feat_90_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_few_feat_300_days(day, prediction_data, instance):\n historic_days = 300\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(few_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_few_feat_300_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_basic_feat_30_days(day, prediction_data, instance):\n historic_days = 30\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(basic_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_basic_feat_30_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_basic_feat_90_days(day, prediction_data, instance):\n historic_days = 90\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(basic_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_basic_feat_90_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_basic_feat_300_days(day, prediction_data, instance):\n historic_days = 300\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(basic_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_basic_feat_300_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_all_feat_30_days(day, prediction_data, instance):\n historic_days = 30\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(all_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_all_feat_30_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_all_feat_90_days(day, prediction_data, instance):\n historic_days = 90\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(all_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_all_feat_90_days', day, instance)\n return predictions\n\n\ndef knn_11nn_distance_all_feat_300_days(day, prediction_data, instance):\n historic_days = 300\n clf = neighbors.KNeighborsRegressor(n_neighbors=11, weights='distance')\n predictions = core.generic_learner(all_features, column_predict, simple_preprocessors, clf,\n day, prediction_data, instance, historic_days)\n core.export(predictions, 'knn_11nn_distance_all_feat_300_days', day, instance)\n return predictions\n\n\n\n\n\n" ]
[ [ "sklearn.neighbors.KNeighborsRegressor" ] ]
mina-payout/mina
[ "4fab6c9366292b9d0c964e498fea743eb47623f7" ]
[ "automation/services/mina-bp-stats/payout-process/main_app/payouts_validate.py" ]
[ "from numpy.core.numeric import NaN\nimport pandas as pd\nimport psycopg2\nfrom google.cloud import storage\nimport os\nimport json\nfrom payouts_config import BaseConfig\nfrom datetime import datetime, timezone, timedelta\nimport math\nimport sys\nfrom validate_email import second_mail\nfrom logger_util import logger\nfrom payout_summary_mail import payout_summary_mail\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nconnection_archive = psycopg2.connect(\n host=BaseConfig.POSTGRES_ARCHIVE_HOST,\n port=BaseConfig.POSTGRES_ARCHIVE_PORT,\n database=BaseConfig.POSTGRES_ARCHIVE_DB,\n user=BaseConfig.POSTGRES_ARCHIVE_USER,\n password=BaseConfig.POSTGRES_ARCHIVE_PASSWORD\n)\nconnection_payout = psycopg2.connect(\n host=BaseConfig.POSTGRES_PAYOUT_HOST,\n port=BaseConfig.POSTGRES_PAYOUT_PORT,\n database=BaseConfig.POSTGRES_PAYOUT_DB,\n user=BaseConfig.POSTGRES_PAYOUT_USER,\n password=BaseConfig.POSTGRES_PAYOUT_PASSWORD\n)\n\nERROR = 'Error: {0}'\n\n\ndef read_delegation_record_table(epoch_no):\n cursor = connection_payout.cursor()\n query = 'select * from payout_summary '\n try:\n cursor.execute(query, str(epoch_no))\n delegation_record_list = cursor.fetchall()\n delegation_record_df = pd.DataFrame(delegation_record_list,\n columns=['provider_pub_key', 'winner_pub_key', 'blocks', 'payout_amount',\n 'payout_balance', 'last_delegation_epoch', 'last_slot_validated'])\n\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(ERROR.format(error))\n cursor.close()\n\n return delegation_record_df\n\n\ndef get_gcs_client():\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = BaseConfig.CREDENTIAL_PATH\n return storage.Client()\n\n\ndef read_staking_json(epoch_no):\n modified_staking_df = pd.DataFrame()\n storage_client = get_gcs_client()\n # get bucket with name\n bucket = storage_client.get_bucket(BaseConfig.GCS_BUCKET_NAME)\n if is_genesis_epoch(epoch_no):\n staking_file_prefix = \"staking-1-\" # use first ledger, filter out 10,11,12 and so on ..\n else:\n staking_file_prefix = \"staking-\" + str(epoch_no)\n blobs = storage_client.list_blobs(bucket, prefix=staking_file_prefix)\n # convert to string\n file_dict_for_memory = dict()\n for blob in blobs:\n file_dict_for_memory[blob.name] = blob.updated\n\n sorted_list = [k for k, v in sorted(file_dict_for_memory.items(), key=lambda p: p[1], reverse=False)]\n recent_file = sorted_list[-1] \n blobs = storage_client.list_blobs(bucket, prefix=recent_file)\n for blob in blobs:\n logger.info(blob.name)\n json_data_string = blob.download_as_string()\n json_data_dict = json.loads(json_data_string)\n staking_df = pd.json_normalize(json_data_dict)\n modified_staking_df = staking_df[['pk', 'balance', 'delegate']]\n modified_staking_df['pk'] = modified_staking_df['pk'].astype(str)\n modified_staking_df['balance'] = modified_staking_df['balance'].astype(float)\n modified_staking_df['delegate'] = modified_staking_df['delegate'].astype(str)\n return modified_staking_df\n\n\ndef determine_slot_range_for_validation(epoch_no, last_slot_validated):\n # find entry from summary table for matching winner+provider pub key\n # check last_delegation_epoch\n # - when NULL : start = epoch_no-1 * 7140, end = ((epoch_no+1)*7140) +3500\n # - when < (epoch_no-1) : start = (last_delegation_epoch * 7140)+3500, end = ((epoch_no+1)*7140) +3500\n # - when == epoch_no : start = epoch * 7140, end = ((epoch+1)*7140) +3500\n\n # then fetch the payout transactions for above period for each winner+provider pub key combination\n start_slot = ((epoch_no) * 7140) + 3500\n end_slot = ((epoch_no+1) * 7140) + 3500 - 1\n \n # update code to use simple condition\n # as the validation for received amount is done against Delegating account\n # can't use same slot duration again, even for discontinued delegation\n \"\"\" if last_slot_validated >0:\n start_slot = last_slot_validated +1\n else:\n start_slot = ((epoch_no-1) * 7140) \"\"\"\n return start_slot, end_slot\n\n\ndef get_record_for_validation(epoch_no):\n cursor = connection_archive.cursor()\n query = '''WITH RECURSIVE chain AS (\n (SELECT b.id, b.state_hash,parent_id, b.creator_id,b.height,b.global_slot_since_genesis,b.global_slot_since_genesis/7140 as epoch,b.staking_epoch_data_id\n FROM blocks b WHERE height = (select MAX(height) from blocks)\n ORDER BY timestamp ASC\n LIMIT 1)\n UNION ALL\n SELECT b.id, b.state_hash,b.parent_id, b.creator_id,b.height,b.global_slot_since_genesis,b.global_slot_since_genesis/7140 as epoch,b.staking_epoch_data_id\n FROM blocks b\n INNER JOIN chain ON b.id = chain.parent_id AND chain.id <> chain.parent_id\n ) SELECT sum(amount)/power(10,9) as total_pay, pk.value as creator ,epoch\n FROM chain c INNER JOIN blocks_user_commands AS buc on c.id = buc.block_id\n inner join (SELECT * FROM user_commands where type='payment' ) AS uc on\n uc.id = buc.user_command_id and status <>'failed'\n INNER JOIN public_keys as PK ON PK.id = uc.receiver_id \n GROUP BY pk.value, epoch'''\n\n try:\n cursor.execute(query)\n validation_record_list = cursor.fetchall()\n validation_record_df = pd.DataFrame(validation_record_list,\n columns=['total_pay', 'provider_pub_key', 'epoch'])\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(ERROR.format(error))\n cursor.close()\n\n return validation_record_df\n\n\ndef get_record_for_validation_for_single_acc(provider_key, start_slot, end_slot):\n cursor = connection_archive.cursor()\n query = '''WITH RECURSIVE chain AS ( (SELECT b.id, b.state_hash,parent_id, b.creator_id,b.height, \n b.global_slot_since_genesis,b.global_slot_since_genesis/7140 as epoch,b.staking_epoch_data_id FROM blocks b WHERE \n height = (select MAX(height) from blocks) ORDER BY timestamp ASC LIMIT 1) \n UNION ALL SELECT b.id, b.state_hash, \n b.parent_id, b.creator_id,b.height,b.global_slot_since_genesis,b.global_slot_since_genesis/7140 as epoch, \n b.staking_epoch_data_id FROM blocks b INNER JOIN chain ON b.id = chain.parent_id AND chain.id <> chain.parent_id \n ) , whitelist as \n ( SELECT amount, uc.receiver_id FROM chain c INNER JOIN blocks_user_commands AS buc on c.id = \n buc.block_id inner join (SELECT * FROM user_commands where type='payment' ) AS uc on uc.id = buc.user_command_id \n and status <>'failed' Join public_keys as sk on uc.source_id=sk.id where sk.value not in (select public_key from \n whitelist_records wr) and global_slot_since_genesis BETWEEN %s and %s ) \n SELECT sum(amount)/power(10,9) as total_pay, pk.value as creator FROM whitelist c INNER JOIN public_keys as PK ON PK.id = c.receiver_id where \n pk.value = %s GROUP BY pk.value'''\n\n try:\n cursor.execute(query, (start_slot, end_slot, provider_key))\n validation_record_list = cursor.fetchall()\n validation_record_df = pd.DataFrame(validation_record_list,\n columns=['total_pay', 'provider_pub_key'])\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(ERROR.format(error))\n cursor.close()\n\n return validation_record_df\n\n\ndef insert_into_audit_table(epoch_no):\n timestamp = datetime.now(timezone.utc)\n values = timestamp, epoch_no, 'validation'\n insert_audit_sql = \"\"\"INSERT INTO payout_audit_log (updated_at, epoch_id,job_type) \n values(%s, %s, %s ) \"\"\"\n try:\n cursor = connection_payout.cursor()\n cursor.execute(insert_audit_sql, values)\n connection_payout.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(ERROR.format(error))\n connection_payout.rollback()\n cursor.close()\n finally:\n cursor.close()\n connection_payout.commit()\n\n# make sure DB restore is done before validation process\ndef check_db_restore_status(epoch_no):\n max_blocks = 0\n end_slot = ((epoch_no+1) * 7140) + 3500 - 1\n result = -1\n query = \"select max(global_slot_since_genesis) from blocks\"\n cursor = connection_archive.cursor()\n try:\n cursor.execute(query)\n data = cursor.fetchall()\n max_blocks = int(data[-1][-1])\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(\"Error: {0} \".format(error))\n cursor.close()\n if not end_slot > max_blocks:\n result = 1\n return result\n\ndef get_payout_due_records(epoch_no):\n undelegated_df = read_delegation_record_table(epoch_no)\n undelegated_df = undelegated_df.loc[\n (undelegated_df['last_delegation_epoch'] < epoch_no)]\n filter_df = pd.DataFrame()\n if not undelegated_df.empty:\n filter_df = undelegated_df[\n ['provider_pub_key', 'winner_pub_key', 'payout_balance', 'last_delegation_epoch', 'last_slot_validated']]\n filter_df = filter_df.rename(\n columns={'payout_balance': 'payout_obligation', 'last_delegation_epoch': 'epoch_no',\n 'last_slot_validated': 'end_slot'})\n filter_df['start_slot'] = filter_df['epoch_no'] * 7140 + 3500\n filter_df['payout_received'] = 0\n filter_df['balance_this_epoch'] = filter_df['payout_obligation']\n filter_df['balance_cumulative'] = filter_df['payout_obligation']\n filter_df = filter_df[\n ['provider_pub_key', 'winner_pub_key', 'payout_obligation', 'payout_received', 'balance_this_epoch',\n 'balance_cumulative','epoch_no', 'start_slot', 'end_slot']]\n\n return filter_df\n\ndef truncate(number, digits=5) -> float:\n stepper = 10.0 ** digits\n return math.trunc(stepper * number) / stepper\n\n\ndef main(epoch_no, do_send_email):\n result = 0\n logger.info(\"###### in payout_validation main for epoch: {0}\".format(epoch_no))\n delegation_record_df = read_delegation_record_table(epoch_no=epoch_no)\n validation_record_df = get_record_for_validation(epoch_no=epoch_no)\n staking_df = read_staking_json(epoch_no=epoch_no)\n result = check_db_restore_status(epoch_no)\n if not staking_df.empty and result >=0:\n email_rows = []\n payouts_rows = []\n\n for row in delegation_record_df.itertuples():\n pub_key = getattr(row, \"provider_pub_key\")\n payout_amount = getattr(row, \"payout_amount\")\n payout_balance = getattr(row, \"payout_balance\")\n last_delegation_epoch = getattr(row, 'last_delegation_epoch')\n delegate_pub_key = getattr(row, 'winner_pub_key')\n last_slot_validated = getattr(row, 'last_slot_validated')\n filter_validation_record_df = staking_df.loc[(staking_df['pk'] == pub_key) & (staking_df['delegate'] == delegate_pub_key)]\n if not filter_validation_record_df.empty:\n start_slot, end_slot = determine_slot_range_for_validation(epoch_no, last_slot_validated)\n payout_recieved = get_record_for_validation_for_single_acc(pub_key, start_slot, end_slot)\n total_pay_received = 0\n balance_this_epoch = 0\n if not payout_recieved.empty:\n total_pay_received = truncate(payout_recieved.iloc[0]['total_pay'],5)\n balance_this_epoch = payout_amount - total_pay_received\n else:\n balance_this_epoch = payout_amount\n balance_this_epoch = truncate(balance_this_epoch, 5) \n new_payout_balance = truncate((payout_amount + payout_balance) - total_pay_received)\n filter_staking_df = staking_df.loc[staking_df['pk'] == pub_key, 'delegate']\n winner_pub_key = filter_staking_df.iloc[0]\n email_rows.append([pub_key, winner_pub_key, payout_amount, total_pay_received])\n payouts_rows.append(\n [pub_key, winner_pub_key, payout_amount, total_pay_received, balance_this_epoch, new_payout_balance, epoch_no, start_slot, end_slot])\n winner_match = False\n if delegate_pub_key == winner_pub_key:\n winner_match = True\n logger.debug(\n '{0} {1} {2} {3} {4} {5} {6} {7}'.format(winner_match, pub_key, delegate_pub_key, winner_pub_key,\n start_slot, end_slot,\n total_pay_received,\n new_payout_balance))\n\n # update record in payout summary\n query = ''' UPDATE payout_summary SET payout_amount = 0, payout_balance = %s,\n last_delegation_epoch = %s, last_slot_validated = %s\n WHERE provider_pub_key = %s and winner_pub_key = %s\n '''\n try:\n cursor = connection_payout.cursor()\n cursor.execute(query, (new_payout_balance, epoch_no, end_slot, pub_key, winner_pub_key))\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(\"Error: {0} \", format(error))\n connection_payout.rollback()\n cursor.close()\n result = -1\n finally:\n cursor.close()\n else:\n logger.warning(\"No records found in staking ledger: {0}\".format(pub_key))\n insert_into_audit_table(epoch_no)\n # sending second mail 24 hours left for making payments back to foundations account\n result = epoch_no\n undelegate_df = get_payout_due_records(epoch_no)\n email_df = pd.DataFrame(email_rows, columns=[\"provider_pub_key\", \"winner_pub_key\", \"payout_amount\", \"payout_received\"])\n if do_send_email:\n second_mail(email_df, epoch_no)\n\n payout_summary_df = pd.DataFrame(payouts_rows,\n columns=['provider_pub_key', 'winner_pub_key', 'payout_amount',\n 'payout_received', 'balance_this_epoch', 'payout_balance', 'epoch_no', 'start_slot', 'end_slot']) \n payout_summary_df = payout_summary_df.rename(columns={'payout_amount': 'payout_obligation', 'payout_balance': 'balance_cumulative'})\n # append undelegated records\n if not undelegate_df.empty:\n payout_summary_df = payout_summary_df.append(undelegate_df)\n csv_name=BaseConfig.LOGGING_LOCATION + BaseConfig.VALIDATION_CSV_FILE % (epoch_no)\n payout_summary_df.to_csv(csv_name)\n payout_summary_mail(csv_name, epoch_no, do_send_email)\n \n else:\n logger.warning(\"Staking ledger not found or archive db not updated for epoch number {0}\".format(epoch_no))\n sys.exit(-1)\n return result\n\ndef get_last_processed_epoch_from_audit(job_type):\n audit_query = '''select epoch_id from payout_audit_log where job_type=%s \n order by id desc limit 1'''\n last_epoch = 0\n values = job_type,\n try:\n cursor = connection_payout.cursor()\n cursor.execute(audit_query, values)\n if cursor.rowcount > 0:\n data_count = cursor.fetchall()\n last_epoch = int(data_count[-1][-1])\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(ERROR.format(error))\n cursor.close()\n return -1\n finally:\n cursor.close()\n return last_epoch\n\n# for epoch 0 & epoch 1 \n# - have to use same staking ledger 'staking-1'\n# - blocks produced would be for epoch 0 & epoch 1\n# - payment recieved would be for epoch 0 & epoch 1\ndef is_genesis_epoch(epoch_id):\n return True if epoch_id<2 else False\n\n# this will check audit log table, and will determine last processed epoch\n# if no entries found, default to first epoch\ndef initialize():\n result = 0\n last_epoch = get_last_processed_epoch_from_audit('validation')\n logger.info(last_epoch)\n result = main(last_epoch + 1, True)\n if can_run_job(last_epoch+1):\n logger.info(\" validation Audit found for epoch {0}\".format(last_epoch))\n result = main(last_epoch + 1, True)\n else:\n result = last_epoch\n return result\n\n# determine whether process can run now for given epoch number\ndef can_run_job(next_epoch):\n next_epoch_end = (int(next_epoch+1) * 7140 * 3) + (3500 * 3)\n next_job_time = BaseConfig.GENESIS_DATE + timedelta(minutes=next_epoch_end)\n next_job_time = next_job_time.replace(tzinfo=timezone.utc)\n next_job_time = next_job_time + timedelta(days=1)\n next_job_time= next_job_time.replace(hour=00, minute=30)\n current_time = datetime.now(timezone.utc)\n if next_job_time > current_time:\n result = False\n else:\n result = True\n return result\n\nif __name__ == \"__main__\":\n epoch_no = initialize()\n if epoch_no is not None:\n sys.exit(epoch_no)\n else:\n sys.exit(-1)\n" ]
[ [ "pandas.json_normalize", "pandas.DataFrame" ] ]
joe-siyuan-qiao/mmdetection
[ "2fcac9660cd40c374bf713dcf333d4b7a51bea06" ]
[ "mmdet/models/roi_heads/roi_extractors/groie.py" ]
[ "\"\"\"Generic RoI Extractor.\n\nA novel Region of Interest Extraction Layer for Instance Segmentation.\n\"\"\"\n\nfrom torch import nn\n\nfrom mmdet.core import force_fp32\nfrom mmdet.models.builder import ROI_EXTRACTORS\nfrom mmdet.ops.plugin import build_plugin_layer\nfrom .single_level import SingleRoIExtractor\n\n\n@ROI_EXTRACTORS.register_module\nclass SumGenericRoiExtractor(SingleRoIExtractor):\n \"\"\"Extract RoI features from all summed feature maps levels.\n\n https://arxiv.org/abs/2004.13665\n\n Args:\n pre_cfg (dict): Specify pre-processing modules.\n post_cfg (dict): Specify post-processing modules.\n kwargs (keyword arguments): Arguments that are the same\n as :class:`SingleRoIExtractor`.\n \"\"\"\n\n def __init__(self, pre_cfg, post_cfg, **kwargs):\n super(SumGenericRoiExtractor, self).__init__(**kwargs)\n\n # build pre/post processing modules\n self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]\n self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]\n self.relu = nn.ReLU(inplace=False)\n\n @force_fp32(apply_to=('feats', ), out_fp16=True)\n def forward(self, feats, rois, roi_scale_factor=None):\n if len(feats) == 1:\n return self.roi_layers[0](feats[0], rois)\n\n out_size = self.roi_layers[0].out_size\n num_levels = len(feats)\n roi_feats = feats[0].new_zeros(\n rois.size(0), self.out_channels, *out_size)\n\n # some times rois is an empty tensor\n if roi_feats.shape[0] == 0:\n return roi_feats\n\n if roi_scale_factor is not None:\n rois = self.roi_rescale(rois, roi_scale_factor)\n\n for i in range(num_levels):\n # apply pre-processing to a RoI extracted from each layer\n roi_feats_t = self.roi_layers[i](feats[i], rois)\n roi_feats_t = self.pre_module(roi_feats_t)\n roi_feats_t = self.relu(roi_feats_t)\n # and sum them all\n roi_feats += roi_feats_t\n\n # apply post-processing before return the result\n x = self.post_module(roi_feats)\n return x\n" ]
[ [ "torch.nn.ReLU" ] ]
bhneo/SparsePooling
[ "6575774ad95cd782bbd228fb08c588b475035fc6" ]
[ "models/res/ex1.py" ]
[ "import os\nimport sys\n\nsys.path.append(os.getcwd())\n\nimport tensorflow as tf\ntf.get_logger().setLevel('ERROR')\nfrom common.inputs import data_input\nfrom common import layers, utils, train, res_blocks, attacks\n\nimport config\n\n\nWEIGHT_DECAY = 1e-4\nBATCH_NORM_EPSILON = 1e-3\nBATCH_NORM_DECAY = 0.99\n\nkernel_regularizer = tf.keras.regularizers.l2(WEIGHT_DECAY)\nkernel_initializer = tf.keras.initializers.he_normal()\nBASE_NAME = 'ex1'\n\n\ndef build_model_name(params):\n model_name = BASE_NAME\n model_name += '_b{}'.format(params.model.resblock)\n\n if params.dataset.flip:\n model_name += '_flip'\n if params.dataset.crop:\n model_name += '_crop'\n return model_name\n\n\ndef get_loss_opt():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n optimizer = tf.keras.optimizers.SGD(0.1)\n return loss, optimizer\n\n\ndef build_model(shape, num_out, params):\n inputs = tf.keras.Input(shape=shape)\n model_name = build_model_name(params)\n probs, tensor_log = build(inputs, num_out,\n params.model.resblock)\n model = tf.keras.Model(inputs=inputs, outputs=probs, name=model_name)\n log_model = tf.keras.Model(inputs=inputs, outputs=tensor_log.get_outputs(), name=model_name + '_log')\n tensor_log.set_model(log_model)\n loss, optimizer = get_loss_opt()\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=[])\n model.summary()\n lr_scheduler = tf.keras.callbacks.LearningRateScheduler(schedule=lr_schedule, verbose=1)\n lr_scheduler.set_model(model)\n callbacks = [lr_scheduler]\n model.callbacks = callbacks\n return model, tensor_log\n\n\ndef build(inputs, num_out, resblock):\n log = utils.TensorLog()\n resblock = utils.parse_resblock(resblock)\n backbone = res_blocks.build_resnet_backbone(inputs=inputs, repetitions=resblock, layer_num=0,\n start_filters=16, arch='cifar',\n use_bias=False,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n bn_axis=-1, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON,\n version='v2')\n log.add_hist('backbone', backbone)\n pool = tf.keras.layers.GlobalAveragePooling2D()(backbone)\n output = tf.keras.layers.Dense(num_out)(pool)\n return output, log\n\n\ndef get_norm_fn(dataset):\n channel = 1\n if dataset == 'cifar10' or dataset == 'cifar100' or dataset == 'svhn_cropped':\n channel = 3\n\n def norm(image):\n if channel == 3:\n image = tf.image.per_image_standardization(image)\n return image\n return norm\n\n\ndef build_parse(dataset, flip=False, crop=False, is_train=False, with_norm=True):\n if dataset not in ['cifar10', 'cifar100', 'mnist', 'kmnist', 'emnist', 'fashion_mnist', 'svhn_cropped']:\n raise Exception('{} not support!'.format(dataset))\n if dataset == 'cifar10' or dataset == 'cifar100' or dataset == 'svhn_cropped':\n height, width, channel = 32, 32, 3\n if dataset == 'mnist' or dataset == 'kmnist' or dataset == 'fashion_mnist' or dataset == 'emnist':\n height, width, channel = 28, 28, 1\n\n def parse(image, label):\n image = tf.cast(image, tf.float32)\n image = tf.divide(image, 255.)\n if with_norm:\n image = get_norm_fn(dataset)(image)\n if is_train:\n if flip:\n image = tf.image.random_flip_left_right(image)\n if crop:\n image = tf.image.resize_with_crop_or_pad(image, height+8, width+8)\n image = tf.image.random_crop(image, [height, width, channel])\n return image, label\n return parse\n\n\ndef lr_schedule(epoch, lr):\n if epoch in [60, 80]:\n lr /= 10\n return lr\n\n\ndef main():\n args, params = config.parse_args()\n if params.task == 'train':\n train_set, test_set, info = data_input.build_dataset(params.dataset.name,\n parser_train=build_parse(params.dataset.name,\n flip=params.dataset.flip,\n crop=params.dataset.crop,\n is_train=True),\n parser_test=build_parse(params.dataset.name,\n is_train=False),\n batch_size=params.training.batch_size)\n model, tensor_log = build_model(shape=info.features['image'].shape,\n num_out=info.features['label'].num_classes,\n params=params)\n\n trainer = train.Trainer(model, params, info, tensor_log)\n if args.train:\n trainer.fit(train_set, test_set)\n else:\n trainer.evaluate(test_set)\n elif params.task == 'attack':\n do_adv(os.getcwd())\n\n\ndef load_ckpt(model, model_dir):\n loss, optimizer = get_loss_opt()\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=[])\n ckpt = tf.train.Checkpoint(optimizer=model.optimizer, net=model)\n manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3)\n ckpt.restore(manager.latest_checkpoint)\n if manager.latest_checkpoint:\n print(\"Restored from {}\".format(manager.latest_checkpoint))\n\n\ndef get_input_set(dataset):\n if dataset == 'fashion_mnist' or dataset == 'kmnist':\n data_shape = (28, 28, 1)\n num_out = 10\n flip = False\n crop = True\n elif dataset == 'cifar10':\n data_shape = (32, 32, 3)\n num_out = 10\n flip = True\n crop = True\n elif dataset == 'svhn_cropped':\n data_shape = (32, 32, 3)\n num_out = 10\n flip = False\n crop = True\n return data_shape, num_out, flip, crop\n\n\ndef get_model_dir(dataset, log='log', resblocks='333'):\n data_shape, num_out, flip, crop = get_input_set(dataset)\n model_dir = '{}/{}/{}_b{}'.format(log, dataset, BASE_NAME, resblocks)\n\n if flip:\n model_dir += '_flip'\n if crop:\n model_dir += '_crop'\n\n if not os.path.exists(model_dir):\n raise Exception('model not exist:{}'.format(model_dir))\n return model_dir, data_shape, num_out, flip, crop\n\n\ndef load_model(data_shape, model_dir, num_out,\n resblocks='333', input_norm=None):\n inputs = tf.keras.Input(data_shape)\n probs, log = build(inputs=inputs if input_norm is None else layers.InputNorm(input_norm)(inputs),\n num_out=num_out,\n resblock=resblocks)\n model = tf.keras.Model(inputs=inputs, outputs=probs, name='x')\n load_ckpt(model, model_dir)\n return model\n\n\ndef evaluate_attack(epsilons, root='', log='log', dataset='kmnist', metric='acc', all_target=False,\n method='FGSM', steps=10, black_box=False,\n resblocks='333'):\n model_dir, data_shape, num_out, flip, crop = get_model_dir(dataset, root+log, resblocks=resblocks)\n model = load_model(data_shape, model_dir, num_out,\n resblocks=resblocks, input_norm=get_norm_fn(dataset))\n if black_box:\n print('load black box source model')\n model_dir, data_shape, num_out, flip, crop = get_model_dir(dataset, root + log, resblocks=resblocks)\n model_src = load_model(data_shape, model_dir, num_out,\n resblocks=resblocks, input_norm=get_norm_fn(dataset))\n else:\n model_src = model\n\n loss, _ = get_loss_opt()\n _, test_set, info = data_input.build_dataset(dataset,\n path=root + 'data',\n parser_train=build_parse(dataset,\n flip=False,\n crop=False,\n is_train=True),\n parser_test=build_parse(dataset,\n is_train=False,\n with_norm=False),\n batch_size=512)\n\n acc_adv = tf.keras.metrics.SparseCategoricalAccuracy(name='acc_adv')\n if metric == 'acc':\n results = attacks.evaluate_model_after_attacks(epsilons, acc_adv, test_set, model, loss, method=method, steps=steps, x_min=0, x_max=1, model_src=model_src)\n elif metric == 'success':\n if all_target:\n categories = [i for i in range(10)]\n results = attacks.evaluate_attacks_success_rate_all_target(epsilons, test_set, model, loss, categories, method=method, steps=steps, x_min=0, x_max=1, cost=True, model_src=model_src)\n else:\n results = attacks.evaluate_attacks_success_rate(epsilons, test_set, model, loss, method=method, steps=steps, x_min=0, x_max=1, model_src=model_src)\n return results\n\n\ndef do_adv(root):\n import time\n all_target = False\n methods = ['PGD', 'BIM', 'FGSM']\n datasets = ['fashion_mnist', 'svhn_cropped', 'cifar10']\n black_box = False\n for dataset in datasets:\n print('dataset:', dataset)\n if dataset == 'cifar10':\n if all_target:\n epsilons = [0.05]\n else:\n epsilons = [0.01, 0.03, 0.06, 0.1]\n else:\n if all_target:\n epsilons = [0.1]\n else:\n epsilons = [0.1, 0.2, 0.3]\n for method in methods:\n print('method:', method)\n t1 = time.time()\n evaluate_attack(epsilons,\n root=root,\n log='log',\n dataset=dataset,\n metric='success',\n all_target=all_target,\n method=method,\n steps=10,\n black_box=black_box)\n t2 = time.time()\n print('time:', t2-t1)\n\n\nif __name__ == \"__main__\":\n utils.init_devices(True)\n main()\n" ]
[ [ "tensorflow.cast", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.image.random_crop", "tensorflow.keras.initializers.he_normal", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.Input", "tensorflow.image.random_flip_left_right", "tensorflow.keras.regularizers.l2", "tensorflow.divide", "tensorflow.train.CheckpointManager", "tensorflow.image.resize_with_crop_or_pad", "tensorflow.train.Checkpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.image.per_image_standardization", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.get_logger", "tensorflow.keras.metrics.SparseCategoricalAccuracy" ] ]
min942773/parlai_wandb
[ "1d9ba1a0df2199d0247cee8c4929a2598ac7e41a" ]
[ "parlai/core/torch_generator_agent.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\nGeneric PyTorch-based Generator agent.\n\nImplements quite a bit of boilerplate, including forced-decoding loss and a tree search.\n\nContains the following utilities:\n\n* `ref:TorchGeneratorAgent` class, which serves as a useful parent for generative torch\n agents.\n* Beam class which provides some generic beam functionality for classes to use\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import TypeVar, List, Dict, Optional, Tuple, Set, Iterable\nimport math\nfrom operator import attrgetter\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom parlai.core.opt import Opt\nfrom parlai.utils.distributed import is_distributed, sync_parameters\nfrom parlai.core.torch_agent import TorchAgent, Batch, Output, DictionaryAgent\nfrom parlai.utils.misc import warn_once\nfrom parlai.utils.io import PathManager\nimport parlai.utils.logging as logging\nfrom parlai.core.metrics import (\n Metric,\n SumMetric,\n AverageMetric,\n BleuMetric,\n FairseqBleuMetric,\n)\nfrom parlai.utils.fp16 import FP16SafeCrossEntropy\nfrom parlai.utils.torch import (\n neginf,\n total_parameters,\n trainable_parameters,\n PipelineHelper,\n)\n\n\ntry:\n from nltk.translate import bleu_score as nltkbleu\n\nexcept ImportError:\n nltkbleu = None\n\ntry:\n from fairseq import bleu as fairseq_bleu\n\nexcept ImportError:\n fairseq_bleu = None\n\n\nclass SearchBlocklist(object):\n \"\"\"\n Search block list facilitates blocking ngrams from being generated.\n \"\"\"\n\n def __init__(self, dict_agent: DictionaryAgent) -> None:\n self.dict = dict_agent\n self._phrases: Set[str] = set()\n self._phrase_ngrams: Dict[int, List[List[int]]] = {}\n\n def __bool__(self):\n return bool(self._phrases)\n\n def clear(self) -> None:\n self._phrases = set()\n self._phrase_ngrams = {}\n\n def _add_literal(self, phrase_literal: str):\n if phrase_literal in self._phrases:\n return\n ngram = self.dict.txt2vec(phrase_literal)\n self._phrases.add(phrase_literal)\n logging.debug(f\"Adding '{phrase_literal}' to the beam block_list {ngram}\")\n l = len(ngram)\n if l not in self._phrase_ngrams:\n self._phrase_ngrams[l] = []\n self._phrase_ngrams[l].append(ngram)\n\n def add(self, phrase: str):\n phrase = phrase.strip()\n if not phrase:\n return\n self._add_literal(phrase)\n self._add_literal(phrase + \"s\")\n self._add_literal(phrase.lower())\n self._add_literal(phrase.lower() + \"s\")\n self._add_literal(phrase.upper())\n self._add_literal(phrase.upper() + \"S\")\n self._add_literal(phrase.title())\n self._add_literal(phrase.title() + \"S\")\n self._add_literal(phrase[0].upper() + phrase[1:])\n self._add_literal(phrase[0].upper() + phrase[1:] + \"s\")\n self._add_literal(phrase[0].upper() + phrase[1:].lower())\n self._add_literal(phrase[0].upper() + phrase[1:].lower() + \"s\")\n\n def items(self) -> Iterable[Tuple[int, List[List[int]]]]:\n return self._phrase_ngrams.items()\n\n\nTSType = TypeVar('TSType', bound='TreeSearch')\n\n\nclass TorchGeneratorModel(nn.Module, ABC):\n \"\"\"\n Abstract TorchGeneratorModel.\n\n This interface expects you to implement model with the following reqs:\n\n :attribute model.encoder:\n takes input returns tuple (enc_out, enc_hidden, attn_mask)\n\n :attribute model.decoder:\n takes decoder params and returns decoder outputs after attn\n\n :attribute model.output:\n takes decoder outputs and returns distr over dictionary\n \"\"\"\n\n def __init__(\n self,\n padding_idx=0,\n start_idx=1,\n end_idx=2,\n unknown_idx=3,\n input_dropout=0,\n longest_label=1,\n ):\n super().__init__()\n self.NULL_IDX = padding_idx\n self.END_IDX = end_idx\n self.START_IDX = start_idx\n self.register_buffer('START', torch.LongTensor([start_idx]))\n self.longest_label = longest_label\n\n def _get_initial_forced_decoder_input(self, bsz: int, inputs: torch.LongTensor):\n \"\"\"\n Return initial input to the decoder.\n\n :param bsz:\n batchsize\n :param inputs:\n inputs to decode\n\n :return initial_input:\n initial input for the decoder.\n \"\"\"\n return torch.cat([self.START.detach().expand(bsz, 1), inputs], 1)\n\n def decode_forced(self, encoder_states, ys):\n \"\"\"\n Decode with a fixed, true sequence, computing loss.\n\n Useful for training, or ranking fixed candidates.\n\n :param ys:\n the prediction targets. Contains both the start and end tokens.\n\n :type ys:\n LongTensor[bsz, time]\n\n :param encoder_states:\n Output of the encoder. Model specific types.\n\n :type encoder_states:\n model specific\n\n :return:\n pair (logits, choices) containing the logits and MLE predictions\n\n :rtype:\n (FloatTensor[bsz, ys, vocab], LongTensor[bsz, ys])\n \"\"\"\n bsz = ys.size(0)\n seqlen = ys.size(1)\n inputs = ys.narrow(1, 0, seqlen - 1)\n if (ys[:, 0] == self.START_IDX).any():\n raise AssertionError(\n \"The Beginning of Sentence token is automatically added to the \"\n \"label in decode_forced, but you included it in the label. This means \"\n \"your model will have a double BOS token, which is probably not what \"\n \"you intended.\"\n )\n inputs = self._get_initial_forced_decoder_input(bsz, inputs)\n latent, _ = self.decoder(inputs, encoder_states)\n logits = self.output(latent)\n _, preds = logits.max(dim=2)\n return logits, preds\n\n @abstractmethod\n def reorder_encoder_states(self, encoder_states, indices):\n \"\"\"\n Reorder encoder states according to a new set of indices.\n\n This is an abstract method, and *must* be implemented by the user.\n\n Its purpose is to provide beam search with a model-agnostic interface for\n beam search. For example, this method is used to sort hypotheses,\n expand beams, etc.\n\n For example, assume that encoder_states is an bsz x 1 tensor of values\n\n .. code-block:: python\n\n indices = [0, 2, 2]\n encoder_states = [[0.1]\n [0.2]\n [0.3]]\n\n then the output will be\n\n .. code-block:: python\n\n output = [[0.1]\n [0.3]\n [0.3]]\n\n :param encoder_states:\n output from encoder. type is model specific.\n\n :type encoder_states:\n model specific\n\n :param indices:\n the indices to select over. The user must support non-tensor\n inputs.\n\n :type indices: list[int]\n\n :return:\n The re-ordered encoder states. It should be of the same type as\n encoder states, and it must be a valid input to the decoder.\n\n :rtype:\n model specific\n \"\"\"\n pass\n\n @abstractmethod\n def reorder_decoder_incremental_state(self, incremental_state, inds):\n \"\"\"\n Reorder incremental state for the decoder.\n\n Used to expand selected beams in beam search. Unlike reorder_encoder_states,\n implementing this method is optional. However, without incremental decoding,\n decoding a single beam becomes O(n^2) instead of O(n), which can make\n beam search impractically slow.\n\n In order to fall back to non-incremental decoding, just return None from this\n method.\n\n :param incremental_state:\n second output of model.decoder\n :type incremental_state:\n model specific\n :param inds:\n indices to select and reorder over.\n :type inds:\n LongTensor[n]\n\n :return:\n The re-ordered decoder incremental states. It should be the same\n type as incremental_state, and usable as an input to the decoder.\n This method should return None if the model does not support\n incremental decoding.\n\n :rtype:\n model specific\n \"\"\"\n pass\n\n def forward(self, *xs, ys=None, prev_enc=None, maxlen=None, bsz=None):\n \"\"\"\n Get output predictions from the model.\n\n :param xs:\n input to the encoder\n :type xs:\n LongTensor[bsz, seqlen]\n :param ys:\n Expected output from the decoder. Used\n for teacher forcing to calculate loss.\n :type ys:\n LongTensor[bsz, outlen]\n :param prev_enc:\n if you know you'll pass in the same xs multiple times, you can pass\n in the encoder output from the last forward pass to skip\n recalcuating the same encoder output.\n :param maxlen:\n max number of tokens to decode. if not set, will use the length of\n the longest label this model has seen. ignored when ys is not None.\n :param bsz:\n if ys is not provided, then you must specify the bsz for greedy\n decoding.\n\n :return:\n (scores, candidate_scores, encoder_states) tuple\n\n - scores contains the model's predicted token scores.\n (FloatTensor[bsz, seqlen, num_features])\n - candidate_scores are the score the model assigned to each candidate.\n (FloatTensor[bsz, num_cands])\n - encoder_states are the output of model.encoder. Model specific types.\n Feed this back in to skip encoding on the next call.\n \"\"\"\n assert ys is not None, \"Greedy decoding in TGModel.forward no longer supported.\"\n # TODO: get rid of longest_label\n # keep track of longest label we've ever seen\n # we'll never produce longer ones than that during prediction\n self.longest_label = max(self.longest_label, ys.size(1))\n\n # use cached encoding if available\n encoder_states = prev_enc if prev_enc is not None else self.encoder(*xs)\n\n # use teacher forcing\n scores, preds = self.decode_forced(encoder_states, ys)\n return scores, preds, encoder_states\n\n\nclass PPLMetric(AverageMetric):\n def value(self):\n return math.exp(super().value())\n\n\nclass TorchGeneratorAgent(TorchAgent, ABC):\n \"\"\"\n Abstract Generator agent; only meant to be extended.\n\n TorchGeneratorAgent aims to handle much of the bookkeeping and infrastructure work\n for any generative models, like seq2seq or transformer. It implements the train_step\n and eval_step. The only requirement is that your model *must* implemented the\n interface TorchGeneratorModel interface.\n \"\"\"\n\n @classmethod\n def upgrade_opt(cls, opt_from_disk: Opt):\n # call the parent upgrades\n opt_from_disk = super(TorchGeneratorAgent, cls).upgrade_opt(opt_from_disk)\n\n # 2019-08-18: Adding support for generation other than beam search\n # Previously, selecting --beam-size > 1 enabled beam search and == 1 was\n # greedy. New behavior is --inference greedy or --inference beam.\n if 'inference' not in opt_from_disk:\n assert 'beam_size' in opt_from_disk\n if opt_from_disk['beam_size'] == 1:\n method = 'greedy'\n else:\n method = 'beam'\n opt_from_disk['inference'] = method\n warn_once(f'Old model inference method inferred as {method}')\n\n # 2020-06-03: Changing \"blacklist\" --> \"blocklist\"\n if 'beam_blacklist_filename' in opt_from_disk:\n if opt_from_disk['beam_blacklist_filename'] is not None:\n opt_from_disk['beam_block_list_filename'] = opt_from_disk[\n 'beam_blacklist_filename'\n ]\n del opt_from_disk['beam_blacklist_filename']\n\n # 2020-08-04: Introduce full context beam blocking\n # Previous, specifying --beam-context-block-ngram > 1 would block\n # from generating ngrams from model's context, which is limited\n # by truncation parameters. Now, we block on full dialogue history.\n if 'beam_block_full_context' not in opt_from_disk:\n warn_once('Loading model with `--beam-block-full-context false`')\n opt_from_disk['beam_block_full_context'] = False\n\n return opt_from_disk\n\n @classmethod\n def add_cmdline_args(cls, argparser):\n \"\"\"\n Add command line arguments.\n \"\"\"\n agent = argparser.add_argument_group('Torch Generator Agent')\n agent.add_argument(\n '--beam-size',\n type=int,\n default=1,\n help='Beam size, if 1 then greedy search',\n )\n agent.add_argument(\n '--beam-min-length',\n type=int,\n default=1,\n help='Minimum length of prediction to be generated by the beam search',\n )\n agent.add_argument(\n '--beam-context-block-ngram',\n type=int,\n default=-1,\n help=(\n 'Size n-grams to block in beam search from the context. val <= 0 '\n 'implies no blocking'\n ),\n )\n agent.add_argument(\n '--beam-block-ngram',\n type=int,\n default=-1,\n help='Size n-grams to block in beam search. val <= 0 implies no blocking',\n )\n agent.add_argument(\n '--beam-block-full-context',\n type='bool',\n default=True,\n help='Block n-grams from the *full* history context. Specify False to block '\n 'up to m tokens in the past, where m is truncation parameter for agent',\n )\n agent.add_argument(\n '--beam-length-penalty',\n type=float,\n default=0.65,\n help='Applies a length penalty. Set to 0 for no penalty.',\n )\n agent.add_argument(\n '--skip-generation',\n type='bool',\n default=False,\n hidden=True,\n help='Skip beam search. Useful for speeding up training, '\n 'if perplexity is the validation metric.',\n )\n agent.add_argument(\n '--inference',\n choices={'beam', 'greedy', 'topk', 'nucleus', 'delayedbeam'},\n default='greedy',\n help='Generation algorithm',\n )\n agent.add_argument(\n '--topk', type=int, default=10, help='K used in Top K sampling'\n )\n agent.add_argument(\n '--topp', type=float, default=0.9, help='p used in nucleus sampling'\n )\n agent.add_argument(\n '--beam-delay', type=int, default=30, help='used in delayedbeam search'\n )\n agent.add_argument(\n '--beam-block-list-filename',\n type=str,\n default=None,\n help='Load a text file of hard blocks for beam search to never say.',\n )\n agent.add_argument(\n '--temperature',\n type=float,\n default=1.0,\n help='temperature to add during decoding',\n )\n agent.add_argument(\n '--compute-tokenized-bleu',\n type='bool',\n default=False,\n help='if true, compute tokenized bleu scores',\n )\n\n super(TorchGeneratorAgent, cls).add_cmdline_args(argparser)\n return agent\n\n def __init__(self, opt: Opt, shared=None):\n init_model, is_finetune = self._get_init_model(opt, shared)\n super().__init__(opt, shared)\n\n self.beam_size = opt.get('beam_size', 1)\n self.beam_min_length = opt.get('beam_min_length', 1)\n self.beam_block_ngram = opt.get('beam_block_ngram', -1)\n self.beam_context_block_ngram = opt.get('beam_context_block_ngram', -1)\n self.beam_block_full_context = opt.get('beam_block_full_context', False)\n self.temperature = opt.get('temperature', 1.0)\n assert self.temperature > 0, '--temperature must be greater than 0'\n self.output_token_losses = opt.get('verbose', False)\n self.compute_tokenized_bleu = opt.get('compute_tokenized_bleu', False)\n self.beam_block_list: Optional[SearchBlocklist] = None\n\n if shared:\n # set up shared properties\n states = shared.get('states', {})\n self.beam_block_list = shared.get('beam_block_list')\n else:\n # this is not a shared instance of this class, so do full init\n self.criterion = self.build_criterion()\n # ensure all distributed copies will always be in sync\n self.model = self.build_model()\n\n # load the block_list for beam search\n self.beam_block_list = self._load_beam_block_list()\n\n if self.model is None or self.criterion is None:\n raise AttributeError(\n 'build_model() and build_criterion() need to return the model or criterion'\n )\n if self.use_cuda:\n if self.model_parallel:\n ph = PipelineHelper()\n ph.check_compatibility(self.opt)\n self.model = ph.make_parallel(self.model)\n else:\n self.model.cuda()\n self.criterion.cuda()\n\n sync_parameters(self.model)\n train_params = trainable_parameters(self.model)\n total_params = total_parameters(self.model)\n logging.info(\n f\"Total parameters: {total_params:,d} ({train_params:,d} trainable)\"\n )\n\n if self.fp16:\n self.model = self.model.half()\n\n if init_model is not None:\n # load model parameters if available\n logging.info(f'Loading existing model params from {init_model}')\n states = self.load(init_model)\n else:\n states = {}\n\n if shared is not None:\n if 'optimizer' in shared:\n self.optimizer = shared['optimizer']\n elif self._should_initialize_optimizer():\n # do this regardless of share state, but don't\n self.init_optim(\n [p for p in self.model.parameters() if p.requires_grad],\n optim_states=states.get('optimizer'),\n saved_optim_type=states.get('optimizer_type'),\n )\n self.build_lr_scheduler(states, hard_reset=is_finetune)\n\n if shared is None and is_distributed():\n device_ids = None if self.model_parallel else [self.opt['gpu']]\n self.model = torch.nn.parallel.DistributedDataParallel(\n self.model, device_ids=device_ids, broadcast_buffers=False\n )\n\n self.reset()\n\n def build_criterion(self):\n \"\"\"\n Construct and return the loss function.\n\n By default torch.nn.CrossEntropyLoss.\n\n If overridden, this model should produce a sum that can be used for a per-token loss.\n \"\"\"\n if not self.fp16:\n return torch.nn.CrossEntropyLoss(\n ignore_index=self.NULL_IDX, reduction='none'\n )\n else:\n # FP16 safe cross entropy (softmax done in FP32)\n return FP16SafeCrossEntropy(ignore_index=self.NULL_IDX, reduction='none')\n\n def _v2t(self, vec):\n \"\"\"\n Convert token indices to string of tokens.\n \"\"\"\n new_vec = []\n if hasattr(vec, 'cpu'):\n vec = vec.cpu()\n for i in vec:\n if i == self.END_IDX:\n break\n elif i != self.START_IDX:\n new_vec.append(i)\n return self.dict.vec2txt(new_vec)\n\n def set_interactive_mode(self, mode, shared=False):\n \"\"\"\n Turn on interactive mode.\n \"\"\"\n super().set_interactive_mode(mode, shared)\n if mode:\n self.skip_generation = False\n else:\n self.skip_generation = self.opt.get('skip_generation', False)\n\n def _dummy_batch(self, batchsize, maxlen):\n \"\"\"\n Create a dummy batch.\n\n This is used to preinitialize the cuda buffer, or otherwise force a\n null backward pass after an OOM.\n\n If your model uses additional inputs beyond text_vec and label_vec,\n you will need to override it to add additional fields.\n \"\"\"\n text_vec = (\n torch.arange(1, maxlen + 1) # need it as long as specified\n .clamp(max=3) # cap at 3 for testing with tiny dictionaries\n .unsqueeze(0)\n .expand(batchsize, maxlen)\n .cuda()\n )\n # label vec has two tokens to make it interesting, but we we can't use the\n # start token, it's reserved.\n label_vec = (\n torch.LongTensor([self.END_IDX, self.NULL_IDX])\n .unsqueeze(0)\n .expand(batchsize, 2)\n .cuda()\n )\n return Batch(\n text_vec=text_vec, label_vec=label_vec, text_lengths=[maxlen] * batchsize\n )\n\n def _init_cuda_buffer(self, batchsize, maxlen, force=False):\n \"\"\"\n Pre-initialize CUDA buffer by doing fake forward pass.\n\n This is also used in distributed mode to force a worker to sync with others.\n \"\"\"\n if self.use_cuda and (force or not hasattr(self, 'buffer_initialized')):\n try:\n self._control_local_metrics(disabled=True)\n loss = 0 * self.compute_loss(self._dummy_batch(batchsize, maxlen))\n self._control_local_metrics(enabled=True)\n self._temporarily_disable_local_metrics = False\n self.backward(loss)\n self.buffer_initialized = True\n except RuntimeError as e:\n if 'out of memory' in str(e):\n m = (\n 'CUDA OOM: Lower batch size (-bs) from {} or lower '\n ' max sequence length (-tr) from {}'\n ''.format(batchsize, maxlen)\n )\n raise RuntimeError(m)\n else:\n raise e\n\n def reset_metrics(self):\n \"\"\"\n Reset metrics for reporting loss and perplexity.\n \"\"\"\n super().reset_metrics()\n\n def share(self):\n \"\"\"\n Share internal states between parent and child instances.\n \"\"\"\n shared = super().share()\n shared['beam_block_list'] = self.beam_block_list\n if hasattr(self, 'optimizer'):\n shared['optimizer'] = self.optimizer\n return shared\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Override vectorize for generative models.\n \"\"\"\n kwargs['add_start'] = False # model does this in module code\n kwargs['add_end'] = True # we do want this\n return super().vectorize(*args, **kwargs)\n\n def _model_input(self, batch):\n \"\"\"\n Create the input (x) value for the model.\n\n Must return a tuple. This will be passed directly into the model via\n `*args`, i.e.,\n\n >>> model(*_model_input(batch))\n\n This is intentionally overridable so that richer models can pass the\n additional inputs.\n \"\"\"\n return (batch.text_vec,)\n\n def _encoder_input(self, batch):\n \"\"\"\n Create the input (x) value for the encoder.\n\n Must return a tuple. This will be passed directly into the encoder via\n `*args`, i.e.,\n\n >>> model.encoder(*_encoder_input(batch))\n\n This is intentionally overridable so that richer models can pass the\n additional inputs directly to the encoder.\n \"\"\"\n return self._model_input(batch)\n\n def compute_loss(self, batch, return_output=False):\n \"\"\"\n Compute and return the loss for the given batch.\n\n Easily overridable for customized loss functions.\n\n If return_output is True, the full output from the call to self.model()\n is also returned, via a (loss, model_output) pair.\n \"\"\"\n if batch.label_vec is None:\n raise ValueError('Cannot compute loss without a label.')\n model_output = self.model(*self._model_input(batch), ys=batch.label_vec)\n scores, preds, *_ = model_output\n score_view = scores.view(-1, scores.size(-1))\n loss = self.criterion(score_view, batch.label_vec.view(-1))\n loss = loss.view(scores.shape[:-1]).sum(dim=1)\n # save loss to metrics\n notnull = batch.label_vec.ne(self.NULL_IDX)\n target_tokens = notnull.long().sum(dim=-1)\n correct = ((batch.label_vec == preds) * notnull).sum(dim=-1)\n\n self.record_local_metric('loss', AverageMetric.many(loss, target_tokens))\n self.record_local_metric('ppl', PPLMetric.many(loss, target_tokens))\n self.record_local_metric(\n 'token_acc', AverageMetric.many(correct, target_tokens)\n )\n # actually do backwards loss\n loss = loss.sum()\n loss /= target_tokens.sum() # average loss per token\n if return_output:\n return (loss, model_output)\n else:\n return loss\n\n def train_step(self, batch):\n \"\"\"\n Train on a single batch of examples.\n \"\"\"\n # helps with memory usage\n # note we want to use the opt's batchsize instead of the observed batch size\n # in case dynamic batching is in use\n self._init_cuda_buffer(self.opt['batchsize'], self.label_truncate or 256)\n self.model.train()\n self.zero_grad()\n\n try:\n loss = self.compute_loss(batch)\n self.backward(loss)\n self.update_params()\n oom_sync = False\n except RuntimeError as e:\n # catch out of memory exceptions during fwd/bck (skip batch)\n if 'out of memory' in str(e):\n oom_sync = True\n logging.error(\n 'Ran out of memory, skipping batch. '\n 'if this happens frequently, decrease batchsize or '\n 'truncate the inputs to the model.'\n )\n self.global_metrics.add('skipped_batches', SumMetric(1))\n else:\n raise e\n\n if oom_sync:\n # moved outside of the try-except because the raised exception in scope\n # actually prevents from the data being freed, which can sometimes cause\n # us to OOM during our OOM handling.\n # https://github.com/pytorch/pytorch/issues/18853#issuecomment-583779161\n\n # gradients are synced on backward, now this model is going to be\n # out of sync! catch up with the other workers\n self._init_cuda_buffer(8, 8, True)\n\n def _construct_token_losses(self, labels, model_output):\n # Get non-aggregated losses\n scores, _, _ = model_output\n score_view = scores.view(-1, scores.size(-1))\n losses = self.criterion(score_view, labels.view(-1)).view(len(labels), -1)\n\n # Zip decoded tokens with losses\n token_losses = []\n for i, label in enumerate(labels):\n token_losses.append(\n list(\n zip(\n [self.dict[token] for token in label.tolist()],\n losses[i].tolist(),\n )\n )\n )\n return token_losses\n\n def _compute_fairseq_bleu(self, batch: Batch, preds):\n \"\"\"\n Compute BLEU score between text and label, using the FAIRSeq BLEU Scorer.\n\n :param batch:\n Batch of observations\n :param texts:\n list of string predictions\n \"\"\"\n all_results = []\n label_vec = batch.label_vec\n assert label_vec is not None, \"label_vec must exist for fairseq bleu\"\n for i, t in enumerate(preds):\n result = FairseqBleuMetric.compute_many(\n t[1:],\n label_vec[i].unsqueeze(0),\n pad_idx=self.NULL_IDX,\n end_idx=self.END_IDX,\n unk_idx=self.dict[self.dict.unk_token],\n )\n if result is None:\n return\n all_results.append(result)\n\n bleu_scores = list(zip(*all_results))\n for k in range(4):\n self.record_local_metric(f'fairseq_bleu{k + 1}', bleu_scores[k])\n\n def _compute_nltk_bleu(self, batch: Batch, texts: List[str]):\n \"\"\"\n Compute BLEU score between text and label(s), using the NLTK BLEU Scorer.\n\n Note this differs from BLEU in ParlAI metrics in that the answers\n are unnormalized (no removal of stop words, etc.)\n\n :param batch:\n Batch of observations\n :param texts:\n list of string predictions\n \"\"\"\n\n results: Dict[int, List[Metric]] = {}\n observations = batch.observations\n assert observations is not None, 'observations must not be none in nltk bleu'\n for i, p in enumerate(texts):\n obs = observations[i]\n references = []\n for lbl in obs['eval_labels']:\n references.append(\n self._v2t(\n self._vectorize_text(\n lbl, True, True, self.label_truncate, False\n )\n )\n )\n for k in range(1, 5):\n b = BleuMetric.compute(p, references, k)\n if b is None:\n b = BleuMetric(0)\n if k not in results:\n results[k] = []\n results[k].append(b)\n\n for k in range(1, 5):\n self.record_local_metric(f'nltk_bleu{k}', results[k])\n\n def _add_generation_metrics(self, batch, preds):\n \"\"\"\n Can be overridden to allow for some metrics on the generations calculated at\n eval.\n \"\"\"\n pass\n\n def eval_step(self, batch):\n \"\"\"\n Evaluate a single batch of examples.\n \"\"\"\n if batch.text_vec is None and batch.image is None:\n return\n if batch.text_vec is not None:\n bsz = batch.text_vec.size(0)\n else:\n bsz = len(batch.image)\n self.model.eval()\n cand_scores = None\n token_losses = None\n\n if batch.label_vec is not None:\n # calculate loss on targets with teacher forcing\n loss, model_output = self.compute_loss(batch, return_output=True)\n if self.output_token_losses:\n token_losses = self._construct_token_losses(\n batch.label_vec, model_output\n )\n\n preds = None\n if self.skip_generation:\n warn_once(\"--skip-generation true produces limited metrics\")\n else:\n maxlen = self.label_truncate or 256\n beam_preds_scores, beams = self._generate(batch, self.beam_size, maxlen)\n preds, scores = zip(*beam_preds_scores)\n self._add_generation_metrics(batch, preds)\n\n # bsz x beamsize\n beam_texts: List[List[Tuple[str, float]]] = []\n for beam in beams:\n beam_texts.append([])\n for tokens, score in beam.get_rescored_finished():\n try:\n beam_texts[-1].append((self._v2t(tokens), score.item()))\n except KeyError:\n logging.error(\"Decoding error: %s\", tokens)\n continue\n\n cand_choices = None\n # TODO: abstract out the scoring here\n if self.rank_candidates:\n # compute roughly ppl to rank candidates\n cand_choices = []\n encoder_states = self.model.encoder(*self._encoder_input(batch))\n for i in range(bsz):\n num_cands = len(batch.candidate_vecs[i])\n enc = self.model.reorder_encoder_states(encoder_states, [i] * num_cands)\n cands, _ = self._pad_tensor(batch.candidate_vecs[i])\n scores, _ = self.model.decode_forced(enc, cands)\n cand_losses = F.cross_entropy(\n scores.view(num_cands * cands.size(1), -1),\n cands.view(-1),\n reduction='none',\n ).view(num_cands, cands.size(1))\n # now cand_losses is cands x seqlen size, but we still need to\n # check padding and such\n mask = (cands != self.NULL_IDX).float()\n cand_scores = (cand_losses * mask).sum(dim=1) / (mask.sum(dim=1) + 1e-9)\n _, ordering = cand_scores.sort()\n cand_choices.append([batch.candidates[i][o] for o in ordering])\n\n text = [self._v2t(p) for p in preds] if preds is not None else None\n if text and self.compute_tokenized_bleu:\n # compute additional bleu scores\n self._compute_fairseq_bleu(batch, preds)\n self._compute_nltk_bleu(batch, text)\n retval = Output(text, cand_choices, token_losses=token_losses)\n if not self.skip_generation:\n retval.beam_texts = beam_texts\n return retval\n\n def _treesearch_factory(self, device):\n method = self.opt.get('inference', 'greedy')\n beam_size = self.opt.get('beam_size', 1)\n if method == 'greedy':\n return GreedySearch(\n beam_size,\n min_length=0,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'beam':\n return BeamSearch(\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'delayedbeam':\n return DelayedBeamSearch(\n self.opt['topk'],\n self.opt['beam_delay'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'topk':\n return TopKSampling(\n self.opt['topk'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'nucleus':\n return NucleusSampling(\n self.opt['topp'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n else:\n raise ValueError(f\"Can't use inference method {method}\")\n\n def _get_context(self, batch, batch_idx):\n \"\"\"\n Set the beam context for n-gram context blocking.\n\n Intentionally overridable for more complex model histories.\n \"\"\"\n ctxt = batch.text_vec[batch_idx]\n if self.beam_block_full_context:\n full_ctxt = batch.observations[batch_idx].get('full_text_vec', ctxt)\n if not isinstance(full_ctxt, torch.Tensor):\n full_ctxt = torch.LongTensor(full_ctxt).to(ctxt.device)\n ctxt = full_ctxt\n return ctxt\n\n def _get_initial_decoder_input(\n self, bsz: int, beam_size: int, dev: torch.device\n ) -> torch.LongTensor:\n \"\"\"\n Return initial input to the decoder.\n\n :param bsz:\n batchsize\n :param beam_size:\n beam size\n :param dev:\n device to send input to.\n\n :return initial_input:\n initial input for the decoder\n \"\"\"\n return (\n torch.LongTensor([self.START_IDX]) # type: ignore\n .expand(bsz * beam_size, 1)\n .to(dev)\n )\n\n def _get_next_decoder_input(\n self,\n prev_input: torch.LongTensor,\n selection: torch.LongTensor,\n incr_state_inds: torch.LongTensor,\n ) -> torch.LongTensor:\n \"\"\"\n Return next decoder input.\n\n :param prev_input:\n previous input to decoder\n :param selection:\n token selections for current timestep\n :param inds:\n incremental state indices\n\n :return decoder input:\n return decoder input for next timestep\n \"\"\"\n prev_input = torch.index_select(prev_input, 0, incr_state_inds)\n decoder_input = torch.cat([prev_input, selection], dim=-1)\n return decoder_input\n\n def _generate(\n self,\n batch: Batch,\n beam_size: int,\n max_ts: int,\n prefix_tokens: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Generate an output with beam search.\n\n Depending on the options, this may perform greedy/topk/nucleus generation.\n\n :param Batch batch:\n Batch structure with input and labels\n :param int beam_size:\n Size of each beam during the search\n :param int max_ts:\n the maximum length of the decoded sequence\n :param prefix_tokens:\n if given, a tensor of tokens that must begin the decoded sequence.\n\n :return:\n tuple (beam_pred_scores, beams)\n\n - beam_preds_scores: list of (prediction, score) pairs for each sample in\n Batch\n - beams :list of Beam instances defined in Beam class, can be used for any\n following postprocessing, e.g. dot logging.\n \"\"\"\n model = self.model\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n model = self.model.module\n encoder_states = model.encoder(*self._encoder_input(batch))\n if batch.text_vec is not None:\n dev = batch.text_vec.device\n else:\n assert batch.label_vec is not None, \"need label_vec for _generate\"\n dev = batch.label_vec.device\n\n bsz = (\n len(batch.text_lengths)\n if batch.text_lengths is not None\n else len(batch.image) # type: ignore\n )\n if batch.text_vec is not None:\n batchsize = batch.text_vec.size(0)\n beams = [\n self._treesearch_factory(dev)\n .set_context(self._get_context(batch, batch_idx))\n .set_block_list(self.beam_block_list)\n for batch_idx in range(batchsize)\n ]\n else:\n beams = [self._treesearch_factory(dev) for _ in range(bsz)]\n\n # repeat encoder outputs and decoder inputs\n decoder_input = self._get_initial_decoder_input(bsz, beam_size, dev)\n\n inds = torch.arange(bsz).to(dev).unsqueeze(1).repeat(1, beam_size).view(-1)\n encoder_states = model.reorder_encoder_states(encoder_states, inds)\n incr_state = None\n\n for _ts in range(max_ts):\n if all((b.is_done() for b in beams)):\n # exit early if possible\n break\n\n score, incr_state = model.decoder(decoder_input, encoder_states, incr_state)\n # only need the final hidden state to make the word prediction\n score = score[:, -1:, :]\n score = model.output(score)\n # score contains softmax scores for bsz * beam_size samples\n score = score.view(bsz, beam_size, -1)\n if self.temperature != 1.0:\n score.div_(self.temperature)\n # force to fp32 to avoid overflow issues during search calculations\n score = F.log_softmax(score, dim=-1, dtype=torch.float32) # type: ignore\n if prefix_tokens is not None and _ts < prefix_tokens.size(1):\n # generate prefix_tokens for every timestep that they exist\n # achieve by setting score of all other tokens to be -inf\n prefix_toks = prefix_tokens[:, _ts].unsqueeze(-1).repeat(1, beam_size)\n prefix_score = score.gather(-1, prefix_toks.unsqueeze(-1))\n prefix_mask = prefix_toks.ne(self.NULL_IDX)\n score[prefix_mask] = neginf(score.dtype)\n score[prefix_mask] = score[prefix_mask].scatter_(\n -1,\n prefix_toks[prefix_mask].unsqueeze(-1),\n prefix_score[prefix_mask],\n )\n for i, b in enumerate(beams):\n if not b.is_done():\n b.advance(score[i])\n incr_state_inds = torch.cat(\n [\n beam_size * i + b.get_backtrack_from_current_step()\n for i, b in enumerate(beams)\n ]\n )\n incr_state = model.reorder_decoder_incremental_state(\n incr_state, incr_state_inds\n )\n selection = torch.cat(\n [b.get_output_from_current_step() for b in beams]\n ).unsqueeze(-1)\n decoder_input = self._get_next_decoder_input(\n decoder_input, selection, incr_state_inds\n )\n\n # get all finalized candidates for each sample (and validate them)\n n_best_beam_preds_scores = [b.get_rescored_finished() for b in beams]\n\n if hasattr(self, '_rerank_beams'):\n n_best_beam_preds_scores = self._rerank_beams( # type: ignore\n batch, n_best_beam_preds_scores\n )\n\n # get the top prediction for each beam (i.e. minibatch sample)\n beam_preds_scores = [n_best_list[0] for n_best_list in n_best_beam_preds_scores]\n\n return beam_preds_scores, beams\n\n def _load_beam_block_list(self) -> SearchBlocklist:\n \"\"\"\n Load the beam block_list.\n\n :return: a dict mapping ngram length to different ngrams\n \"\"\"\n block_list = SearchBlocklist(self.dict)\n if not self.opt.get('beam_block_list_filename'):\n return block_list\n\n block_list_fn = self.opt['beam_block_list_filename']\n try:\n with PathManager.open(block_list_fn) as f:\n for line in f:\n block_list.add(line.strip())\n except IOError:\n logging.error(\n f\"Could not load beam block_list {block_list_fn}, using empty block_list.\"\n )\n return block_list\n\n\nclass _HypothesisTail(object):\n \"\"\"\n Hold some bookkeeping about a hypothesis.\n \"\"\"\n\n # use slots because we don't want dynamic attributes here\n __slots__ = ['timestep', 'hypid', 'score', 'tokenid']\n\n def __init__(self, timestep, hypid, score, tokenid):\n self.timestep = timestep\n self.hypid = hypid\n self.score = score\n self.tokenid = tokenid\n\n\nclass TreeSearch(object):\n \"\"\"\n Abstract Tree Search class.\n\n It keeps information about beam_size concurrent, developing hypotheses. Concrete\n implementations make choices about which token to explore next at each point in the\n tree. Different choices result in different generation algorithms.\n \"\"\"\n\n def __init__(\n self,\n beam_size,\n block_ngram=-1,\n context_block_ngram=-1,\n padding_token=0,\n bos_token=1,\n eos_token=2,\n min_length=3,\n device='cpu',\n length_penalty=0.65,\n ):\n \"\"\"\n Instantiate Beam object.\n\n :param beam_size:\n number of hypothesis in the beam\n :param block_ngram:\n size of ngrams to block.\n :param context_block_ngram:\n size of context ngrams to block\n :param padding_token:\n padding token ID\n :param bos_token:\n beginning of sentence token ID\n :param eos_token:\n end of sentence token ID\n :param min_length:\n minimum length of the predicted sequence\n :param device:\n What device to use for computations\n \"\"\"\n self.beam_size = beam_size\n self.length_penalty = length_penalty\n self.block_ngram = block_ngram\n self.min_length = min_length\n self.eos = eos_token\n self.bos = bos_token\n self.pad = padding_token\n self.context = None\n self.context_block_ngram = context_block_ngram\n self.block_list: Optional[SearchBlocklist] = None\n self.device = device\n # recent score for each hypo in the beam\n self.scores = None\n # self.scores values per each time step\n self.all_scores = [torch.Tensor([0.0] * beam_size).to(self.device)]\n # backtracking id to hypothesis at previous time step\n self.bookkeep = []\n # output tokens at each time step\n self.outputs = [\n torch.Tensor(self.beam_size).long().fill_(self.bos).to(self.device)\n ]\n # keeps tuples (score, time_step, hyp_id)\n self.finished = []\n self.eos_top = False\n self.eos_top_ts = None\n self.n_best_counter = 0\n self.partial_hyps = [[self.bos] for i in range(beam_size)]\n\n def set_context(self: TSType, context: torch.LongTensor) -> TSType:\n \"\"\"\n Set the internal context representation and return self.\n\n :param context:\n a LongTensor representing the input context; used for context\n ngram blocking, if supplied\n \"\"\"\n self.context = context.tolist()\n return self\n\n def set_block_list(self: TSType, block_list: Optional[SearchBlocklist]) -> TSType:\n self.block_list = block_list\n return self\n\n def get_output_from_current_step(self):\n \"\"\"\n Get the outputput at the current step.\n \"\"\"\n return self.outputs[-1]\n\n def get_backtrack_from_current_step(self):\n \"\"\"\n Get the backtrack at the current step.\n \"\"\"\n return self.bookkeep[-1]\n\n @abstractmethod\n def select_paths(self, logprobs, prior_scores, current_length):\n \"\"\"\n Select the next vocabulary item in these beams.\n\n :param logprobs:\n a (beamsize x vocab) tensor of log probabilities. If this is the first\n turn in the dialogue, it will be a (1 x vocab) tensor.\n :param prior_scores:\n a (beamsize) tensor of weights with the cumulative running\n log-probability of each beam. If the first turn, it will be a (1) tensor.\n :param current_length:\n the current length in tokens\n :return:\n a (hypothesis_ids, token_id, scores) tuple, where:\n\n - hypothesis_ids is a LongTensor of hypotheses we're extending. May have\n repeats, but should always be (beamsize) long.\n - token_ids is a (beamsize) LongTensor of next-token choices for\n each of the hypotheses.\n - scores is a (beamsize) Tensor with the updated cumulative log-probs\n of each beam.\n \"\"\"\n pass\n\n def _block_ngrams(\n self, ngram_size: int, logprobs: torch.Tensor, source: torch.LongTensor = None\n ):\n \"\"\"\n Hard block ngrams from the logprobs, based on the source.\n\n :param ngram_size:\n The length of ngrams to block. Must be > 0.\n :param logprobs:\n Float or HalfTensor, representing the log-probabilities. This is\n modified in place.\n :param source:\n Source text to grab ngrams from. If None, it uses the current\n hypothesis (i.e. self-blocking).\n \"\"\"\n for beam_id, hyp in enumerate(self.partial_hyps):\n if len(hyp) < ngram_size - 1:\n continue\n source_ = hyp if source is None else source\n ngrams = self._find_ngrams(source_, ngram_size)\n prefix = hyp[-(ngram_size - 1) :]\n for ngram in ngrams:\n if ngram_size == 1 or prefix == list(ngram[:-1]):\n logprobs[beam_id][ngram[-1]] = neginf(logprobs.dtype)\n return logprobs\n\n def _block_block_list(self, logprobs: torch.Tensor) -> torch.Tensor:\n if self.block_list is None:\n return logprobs\n\n for beam_id, hyp in enumerate(self.partial_hyps):\n for ngram_size, bad_ngrams in self.block_list.items():\n prefix = hyp[-(ngram_size - 1) :]\n for ngram in bad_ngrams:\n if (ngram_size == 1) or prefix == list(ngram[:-1]):\n logprobs[beam_id][ngram[-1]] = neginf(logprobs.dtype)\n return logprobs\n\n def advance(self, logprobs):\n \"\"\"\n Advance the beam one step.\n \"\"\"\n current_length = len(self.all_scores) - 1\n if current_length < self.min_length:\n # penalize all eos probs to make it decode longer\n for hyp_id in range(logprobs.size(0)):\n logprobs[hyp_id][self.eos] = neginf(logprobs.dtype)\n\n if self.scores is None:\n self.scores = torch.zeros(1).type_as(logprobs).to(logprobs.device)\n\n # penalize hypotheses ending in EOS on the prior scores (self.scores) level\n # this is related to search which uses prior scores (self.scores) (e.g. beam)\n for hyp_id, token in enumerate(self.outputs[-1]):\n if token == self.eos:\n self.scores[hyp_id] = neginf(self.scores.dtype)\n\n # beam blocking\n if self.block_ngram > 0:\n logprobs = self._block_ngrams(self.block_ngram, logprobs, None)\n\n logprobs = self._block_block_list(logprobs)\n\n if self.context_block_ngram > 0:\n if self.context is None:\n raise ValueError(\n \"Must use TreeSearch.set_context to use context blocking.\"\n )\n logprobs = self._block_ngrams(\n self.context_block_ngram, logprobs, self.context\n )\n\n hyp_ids, tok_ids, self.scores = self.select_paths(\n logprobs, self.scores, current_length\n )\n # use clone() here to ensure that self.all_scores will not be changed\n # later due to any penalties to self.scores\n self.all_scores.append(self.scores.clone())\n\n self.outputs.append(tok_ids)\n self.bookkeep.append(hyp_ids)\n self.partial_hyps = [\n self.partial_hyps[hyp_ids[i]] + [tok_ids[i].item()]\n for i in range(self.beam_size)\n ]\n\n # check new hypos for eos label, if we have some, add to finished\n for hypid in range(self.beam_size):\n if self.outputs[-1][hypid] == self.eos:\n if self.scores[hypid] <= neginf(self.scores.dtype):\n continue\n # this is finished hypo, adding to finished\n eostail = _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=hypid,\n score=self.all_scores[-1][hypid],\n tokenid=self.eos,\n )\n self.finished.append(eostail)\n self.n_best_counter += 1\n\n if self.outputs[-1][0] == self.eos:\n self.eos_top = True\n if self.eos_top_ts is None:\n self.eos_top_ts = len(self.outputs) - 1\n\n def is_done(self):\n \"\"\"\n Return whether beam search is complete.\n \"\"\"\n return self.eos_top and self.n_best_counter >= self.beam_size\n\n def _find_ngrams(self, input_list, n):\n \"\"\"\n Find ngrams of size n in input list.\n \"\"\"\n return list(zip(*[input_list[i:] for i in range(n)]))\n\n def _get_hyp_from_finished(self, hypothesis_tail):\n \"\"\"\n Extract hypothesis ending with EOS at timestep with hyp_id.\n\n :param timestep:\n timestep with range up to len(self.outputs) - 1\n\n :param hyp_id:\n id with range up to beam_size - 1\n\n :return:\n hypothesis sequence\n \"\"\"\n hyp_idx = []\n endback = hypothesis_tail.hypid\n for i in range(hypothesis_tail.timestep, -1, -1):\n hyp_idx.append(\n _HypothesisTail(\n timestep=i,\n hypid=endback,\n score=self.all_scores[i][endback],\n tokenid=self.outputs[i][endback],\n )\n )\n endback = self.bookkeep[i - 1][endback]\n\n return hyp_idx\n\n def _get_pretty_hypothesis(self, list_of_hypotails):\n \"\"\"\n Return hypothesis as a tensor of token ids.\n \"\"\"\n return torch.stack([ht.tokenid for ht in reversed(list_of_hypotails)])\n\n def get_rescored_finished(self, n_best=None):\n \"\"\"\n Return finished hypotheses according to adjusted scores.\n\n Score adjustment is done according to the Google NMT paper, which\n penalizes long utterances.\n\n :param n_best:\n number of finalized hypotheses to return\n\n :return:\n list of (tokens, score) pairs, in sorted order, where:\n - tokens is a tensor of token ids\n - score is the adjusted log probability of the entire utterance\n \"\"\"\n # if we never actually finished, force one\n if not self.finished:\n self.outputs[-1][0] = self.eos\n self.finished.append(\n _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=0,\n score=self.all_scores[-1][0],\n tokenid=self.outputs[-1][0],\n )\n )\n\n rescored_finished = []\n for finished_item in self.finished:\n current_length = finished_item.timestep + 1\n # these weights are from Google NMT paper\n length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)\n rescored_finished.append(\n _HypothesisTail(\n timestep=finished_item.timestep,\n hypid=finished_item.hypid,\n score=finished_item.score / length_penalty,\n tokenid=finished_item.tokenid,\n )\n )\n\n # Note: beam size is almost always pretty small, so sorting is cheap enough\n srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True)\n\n if n_best is not None:\n srted = srted[:n_best]\n\n n_best_list = [\n (self._get_pretty_hypothesis(self._get_hyp_from_finished(hyp)), hyp.score)\n for hyp in srted\n ]\n\n # check that there is at least one finished candidate\n # and assert that each of them contains only one EOS\n assert (\n len(n_best_list) >= 1\n ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1'\n for (pred, score) in n_best_list:\n assert (\n pred == self.eos\n ).sum() == 1, f'TreeSearch returned a finalized hypo with multiple end tokens \\\n with score {score.item():.2f}'\n\n return n_best_list\n\n\nclass GreedySearch(TreeSearch):\n \"\"\"\n Greedy search.\n\n Picks the highest probability utterance at each step. Only works with\n --beam-size 1.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.beam_size != 1:\n raise ValueError('Greedy search can only be run with beam size 1.')\n\n def select_paths(self, logprobs, prior_scores, current_length):\n tok_scores, tok_ids = logprobs.max(1)\n best_scores = tok_scores + prior_scores\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass BeamSearch(TreeSearch):\n \"\"\"\n Beam search.\n \"\"\"\n\n def select_paths(self, logprobs, prior_scores, current_length):\n \"\"\"\n Select the next vocabulary item in these beams.\n \"\"\"\n # if numel is 1, then this is the first time step, only one hyp is expanded\n if prior_scores.numel() == 1:\n logprobs = logprobs[0:1]\n\n # beam search actually looks over all hypotheses together so we flatten\n beam_scores = logprobs + prior_scores.unsqueeze(1).expand_as(logprobs)\n flat_beam_scores = beam_scores.view(-1)\n best_scores, best_idxs = torch.topk(flat_beam_scores, self.beam_size, dim=-1)\n voc_size = logprobs.size(-1)\n\n # get the backtracking hypothesis id as a multiple of full voc_sizes\n hyp_ids = best_idxs // voc_size\n # get the actual word id from residual of the same division\n tok_ids = best_idxs % voc_size\n\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass DelayedBeamSearch(TreeSearch):\n \"\"\"\n DelayedBeam: Top-K sampling followed by beam search (Massarelli et al., 2019).\n\n Samples from a truncated distribution where only the most probable K words\n are considered at each time for the first N tokens, then switches to beam\n after N steps.\n\n See https://arxiv.org/abs/1911.03587 for details.\n \"\"\"\n\n def __init__(self, k, delay, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.k = k\n self.delay = delay\n\n def select_paths(self, logprobs, prior_scores, current_length):\n if current_length < self.delay:\n return TopKSampling.select_paths(\n self, logprobs, prior_scores, current_length\n )\n else:\n return BeamSearch.select_paths(self, logprobs, prior_scores, current_length)\n\n\nclass TopKSampling(TreeSearch):\n \"\"\"\n Top-K sampling (Fan et al., 2018).\n\n Samples from a truncated distribution where only the most probable K words\n are considered at each time.\n\n Typical values of k are 2, 10, 50.\n\n See https://arxiv.org/abs/1805.04833 for details.\n \"\"\"\n\n def __init__(self, k, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.k = k\n\n def select_paths(self, logprobs, prior_scores, current_length):\n values, indices = logprobs.topk(self.k, dim=-1)\n probs = torch.softmax(values, dim=-1)\n choices = torch.multinomial(probs, 1)[:, 0]\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n tok_ids = indices[hyp_ids, choices]\n scores = values[hyp_ids, choices]\n best_scores = prior_scores.expand_as(scores) + scores\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass NucleusSampling(TreeSearch):\n \"\"\"\n Nucelus, aka top-p sampling (Holtzman et al., 2019).\n\n Samples from a truncated distribution which covers a fixed CDF proportion\n of the original distribution.\n\n Typical values of p are 0.3 and 0.9.\n\n See https://arxiv.org/abs/1904.09751 for details.\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.p = p\n\n def select_paths(self, logprobs, prior_scores, current_length):\n # Unlike the other treesearch methods, we have to switch to linspace\n # for the probabilities in order to compute the CDF.\n probs = torch.softmax(logprobs, dim=-1)\n sprobs, sinds = probs.sort(dim=-1, descending=True)\n # The subtraction here is to get the exclusive prefix sum,\n # to guarantee the first element is not masked\n mask = (sprobs.cumsum(dim=-1) - sprobs) >= self.p\n sprobs[mask] = 0\n sprobs.div_(sprobs.sum(dim=-1).unsqueeze(1))\n choices = torch.multinomial(sprobs, 1)[:, 0]\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n tok_ids = sinds[hyp_ids, choices]\n # Convert back to logspace.\n scores = sprobs[hyp_ids, choices].log()\n best_scores = prior_scores.expand_as(scores) + scores\n return (hyp_ids, tok_ids, best_scores)\n" ]
[ [ "torch.LongTensor", "torch.softmax", "torch.nn.CrossEntropyLoss", "torch.nn.functional.log_softmax", "torch.cat", "torch.Tensor", "torch.zeros", "torch.multinomial", "torch.arange", "torch.topk", "torch.index_select", "torch.nn.parallel.DistributedDataParallel" ] ]
lawrendran/mesh-transformer-jax
[ "49bdec254f626dddbd9d16ea6d6edb6b49e459ad" ]
[ "train.py" ]
[ "import argparse\nimport json\nimport time\n\nimport numpy as np\nimport wandb\nfrom tqdm import tqdm\n\nfrom mesh_transformer.build_model import build_model\nfrom lm_eval import evaluator, tasks\nfrom tasks.eval_harness import EvalHarnessAdaptor\nfrom tfrecord_loader import TFRecordNewInputs\nimport multiprocessing\n\n\ndef parse_args():\n # Parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--tpu\", type=str, help=\"Name of TPU to train on.\")\n parser.add_argument(\"--tpu_region\", type=str, help=\"Region of TPU to train on.\")\n parser.add_argument(\"--preemptible\", action=\"store_true\")\n\n parser.add_argument(\"--config\", type=str, default=None, help=\"Config file location\")\n\n parser.add_argument(\"--new\", action=\"store_true\", help=\"If set, deletes previous checkpoint, if it exists, and \"\n \"starts a new training run\")\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n # huggingface tokenizers gets very angry if you fork\n multiprocessing.set_start_method(\"spawn\")\n\n args = parse_args()\n params = json.load(open(args.config))\n\n if args.new:\n print(f\"Starting experiment {params['name']} from scratch! \"\n f\"all data in gs://{params['bucket']}/{params['model_dir']}/ will be deleted\")\n input(\"Hit enter to continue\")\n\n tpu_name = args.tpu\n region = args.tpu_region\n preemptible = args.preemptible\n clean_start = args.new\n\n gradient_accumulation_steps = params.get(\"gradient_accumulation_steps\", 1)\n per_replica_batch = params[\"per_replica_batch\"]\n tpu_size = params[\"tpu_size\"]\n cores_per_replica = params[\"cores_per_replica\"]\n\n bucket = params[\"bucket\"]\n model_dir = params[\"model_dir\"]\n layers = params[\"layers\"]\n d_model = params[\"d_model\"]\n n_heads = params[\"n_heads\"]\n n_vocab = params[\"n_vocab\"]\n seq = params[\"seq\"]\n norm = params[\"norm\"]\n\n val_batches = params[\"val_batches\"]\n val_every = params[\"val_every\"]\n ckpt_every = params[\"ckpt_every\"]\n keep_every = params[\"keep_every\"]\n eval_tasks = params[\"eval_harness_tasks\"]\n total_steps = params[\"total_steps\"]\n\n pe = params[\"pe\"]\n assert pe in [\"fixed\", \"rotary\", \"t5\"]\n\n t = build_model(params, tpu_name, region, preemptible)\n\n try:\n t.save(0, bucket, model_dir, init=True, overwrite=clean_start)\n step = 0\n train_load_restore = None\n except Exception as e:\n print(f\"Save failed with error {e}, trying to load instead...\", e)\n step, aux = t.load(bucket, model_dir)\n train_load_restore = aux.get(\"train_loader\", None)\n\n if train_load_restore is None:\n print(\"Failed to restore train loader state\")\n\n train_dataset = TFRecordNewInputs(f\"data/{params['train_set']}\",\n batch_size=(\n gradient_accumulation_steps,\n per_replica_batch * tpu_size // cores_per_replica),\n sample_size=params['seq'],\n restore_state=train_load_restore)\n\n global_val_batch = per_replica_batch * tpu_size // cores_per_replica\n\n val_sets = {}\n\n for k, v in params['val_set'].items():\n val_sets[k] = TFRecordNewInputs(f\"data/{v}\",\n batch_size=(global_val_batch,),\n sample_size=seq)\n\n # use dynamic seq length unless pe is fixed\n adaptor = EvalHarnessAdaptor(t, seq, global_val_batch * 4, shrink=pe != \"fixed\")\n\n start = time.time()\n t.train(train_dataset.get_samples())\n print(f\"Train fn compiled in {time.time() - start:.06}s\")\n\n start = time.time()\n for val_set in val_sets.values():\n t.eval(val_set.get_samples())\n print(f\"Eval fn compiled in {time.time() - start:.06}s\")\n\n wandb.init(project='mesh-transformer-jax', entity=\"eleutherai\", name=params[\"name\"], config=params)\n\n eval_task_dict = tasks.get_task_dict(eval_tasks)\n\n while True:\n loss, last_loss = t.train(train_dataset.get_samples())\n wandb.log({'train/loss': loss, 'train/last_loss': last_loss}, step)\n\n if (step % ckpt_every == 0 and step) or step == total_steps:\n t.save(step, bucket, model_dir,\n aux={\"train_loader\": train_dataset.get_state()},\n init=False,\n delete_old=step % keep_every != 0)\n\n if step == total_steps:\n print(\"training completed!\")\n exit()\n\n if step % 100 == 0:\n print(f\"step {step} done\")\n\n if step % val_every == 0:\n for name, val_set in val_sets.items():\n val_loss = []\n for i, _ in tqdm(zip(val_set.sample_once(), range(val_batches)),\n desc=f\"validation for step {step}, set {name}\",\n total=val_batches):\n val_loss.append(t.eval(i))\n val_loss = np.array(val_loss).mean()\n print(f\"validation loss for step {step}, set {name}: {val_loss}\")\n\n wandb.log({f'val/loss_{name}': float(val_loss)}, step)\n\n results = evaluator.evaluate(adaptor, eval_task_dict, False, 0, None)\n\n flat_results = {}\n\n for task_name, task_res in results[\"results\"].items():\n version = results[\"versions\"][task_name]\n for metric_name, metric_res in task_res.items():\n flat_results[f\"{task_name}-v{version}/{metric_name}\"] = float(metric_res)\n\n dumped = json.dumps(results, indent=2)\n print(f\"step {step} val results: {dumped}\")\n wandb.log(flat_results, step)\n step += 1\n" ]
[ [ "numpy.array" ] ]
rstodden/TS-scale-interpretations
[ "4acc197c5ef6f950509227f47e6e69342be3829f" ]
[ "src/rebuild_hsplit.py" ]
[ "#!/usr/bin/env python\n# Copyright (c) Regina Stodden.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport pandas as pd\nimport os\n\n\nwith open(\"data/test.8turkers.tok.norm\") as f:\n original_content = f.readlines()\n original_content = [t.strip() for t in original_content]\n\n# HSplit\ndataframe = pd.read_excel(\"simplification-acl2018/Human_evaluation_benchmark_acl2018.ods\", engine=\"odf\",header=[0, 1],)\n# print(dataframe[\"Annotator1\", \"Qa\"])\n\ncurrent_system = \"\"\nfor i,row in dataframe.iterrows():\n system = row[\"System\", \"Unnamed: 1_level_1\"]\n if type(system) == str:\n current_system = system\n if \"NTSh\" in system:\n current_system = current_system.replace(\"NTSh\", \"NTS-h\")\n if \"LM\" in system:\n current_system = current_system.replace(\"LM\", \"_LM\")\n if \"SENTSm-h1\" in system:\n current_system = current_system.replace(\"SENTSm-h1\", \"SENTS-h1^m\")\n if \"SENTSm-h4\" in system:\n current_system = current_system.replace(\"SENTSm-h4\", \"SENTS-h4^m\")\n if \" (with the default model instead of w2v)\" in row[\"System\", \"Unnamed: 1_level_1\"] :\n current_system = current_system.replace(\" (with the default model instead of w2v)\", \"_default_model\")\n elif \"NTS\" in system or \"SENTS\" in system:\n current_system = current_system+\"_w2v_model\"\n elif system.endswith(\"m\") and not system.endswith(\"^m\"):\n current_system = current_system[:-1]+\"^m\"\n else:\n pass\n current_system = current_system.strip()\n dataframe.loc[i, [[\"System\", \"Unnamed: 1_level_1\"]]] = current_system\n else:\n dataframe.loc[i, [[\"System\", \"Unnamed: 1_level_1\"]]] = current_system\n\n\nfor system in os.listdir(\"simplification-acl2018/Evaluation_system_outputs/\"):\n with open(\"simplification-acl2018/Evaluation_system_outputs/\"+system) as f:\n content = f.readlines()\n content = [t for t in content if t != \"\\n\"]\n if len(content) != len(dataframe[dataframe[\"System\", \"Unnamed: 1_level_1\"] == system]):\n print(system, len(content),len(dataframe[dataframe[\"System\", \"Unnamed: 1_level_1\"] == system]))\n for i, index in enumerate(dataframe[dataframe[\"System\", \"Unnamed: 1_level_1\"] == system].index):\n dataframe.loc[index,\"simplification\"] = content[i].strip().lower()\n dataframe.loc[index,\"original\"] = original_content[i].lower()\n\nfor i, index in enumerate(dataframe[dataframe[\"System\", \"Unnamed: 1_level_1\"] == \"Identity\"].index):\n dataframe.loc[index,\"simplification\"] = original_content[i].strip().lower()\n dataframe.loc[index,\"original\"] = original_content[i].lower()\n\n\ndataframe.to_csv(\"data/hsplit_with_text.csv\")\n\nprint(dataframe.columns)\n# dataframe = pd.read_csv(\"data/hsplit_with_text.csv\", header=[0,1])\nnew_dataframe = pd.DataFrame(columns = [\"original\", \"simplification\", \"sentence_id\", \"sample_id\", \"system_name\", \"aspect\", \"rater_id\", \"rating\"])\nannotator_column = sorted(list(set([name for name,level in dataframe.columns if \"Annotator\" in name])))\nprint(annotator_column)\naspect_mapping = {\"Qa\": \"fluency\", \"Qb\": \"meaning\", \"Qc\": \"simplicity\", \"Qd\": \"structural_simplicity\"}\n# Qa: G: Is the output fluent and grammatical?\n# Qb: M: Does the output preserve the meaning of the input?\n# Qc: S: Is the output simpler than the input?\n# Qd: StS: Is the output simpler than the input, ignoring the complexity of the words?\n\nj = 0\nfor i, row in dataframe.iterrows():\n print(i,j)\n if row[\"original\", \"\"] and row[\"simplification\", \"\"] and not pd.isna(row[\"original\", \"\"]) and not pd.isna(row[\"simplification\", \"\"]):\n for annotator in annotator_column:\n for rating in aspect_mapping:\n sample_id = str(row[\"Sentences\", 'Unnamed: 0_level_1'])+\"_\"+row[\"System\", 'Unnamed: 1_level_1']\n new_dataframe.loc[j] = [row[\"original\", \"\"], row[\"simplification\", \"\"], row[\"Sentences\", 'Unnamed: 0_level_1'],\n sample_id, row[\"System\", 'Unnamed: 1_level_1'], aspect_mapping[rating],\n annotator, row[annotator, rating]]\n j +=1\n\nnew_dataframe.to_csv(\"data/hsplit_ratings.csv\")" ]
[ [ "pandas.isna", "pandas.read_excel", "pandas.DataFrame" ] ]
subhacom/mbnet
[ "b0ab55079ed31614f923ee15ed65defae156332b" ]
[ "analysis/pn_kc_ggn_plot.py" ]
[ "# pn_kc_ggn_plot.py --- \n# \n# Filename: pn_kc_ggn_plot.py\n# Description: \n# Author: Subhasis Ray\n# Maintainer: \n# Created: Fri Feb 16 13:08:41 2018 (-0500)\n# Last-Updated: Fri Feb 16 16:11:54 2018 (-0500)\n# By: Subhasis Ray\n# Update #: 263\n# \n# Code:\n\nimport sys\nimport os\nfrom timeit import default_timer as timer\nimport numpy as np\nimport h5py as h5\nimport yaml\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui\n\npg.setConfigOptions(antialias=True)\npg.setConfigOption('background', 'w')\npg.setConfigOption('foreground', 'k')\n\ndefault_pen = (0, 0, 0, 100)\n\ndef get_event_times(group, nodes=None):\n spike_x = []\n spike_y = []\n if nodes is None:\n nodes = group\n for ii, node in enumerate(nodes):\n st = np.empty(2*len(group[node][:]))\n st[::2] = group[node][:]\n st[1::2] = group[node][:]\n spike_x.append(st)\n sy = np.zeros(st.shape)\n sy[::2] = ii\n sy[1::2] = ii + 0.5\n spike_y.append(sy)\n spike_x = np.concatenate(spike_x)\n spike_y = np.concatenate(spike_y)\n return spike_x, spike_y\n\n\ndef plot_spike_rasters(fname, vm_samples=10):\n \"\"\"The file `fname` has data from pn_kc_ggn simulation. In the early\n ones I did not record the spike times for KCs.\n\n \"\"\"\n start = timer()\n with h5.File(fname, 'r') as fd:\n config = yaml.load(fd['/model/filecontents/mb/network/config.yaml'][0])\n # PN spike raster\n pn_st = fd['/data/event/pn/pn_spiketime']\n gw = pg.GraphicsWindow(title=fd.attrs['description'])\n gw.setWindowTitle(fname)\n pn_plot = gw.addPlot(title='PN spike raster')\n pn_raster = pg.PlotCurveItem()\n spike_x, spike_y = get_event_times(pn_st)\n pn_raster.setData(spike_x, spike_y, connect='pairs', pen=default_pen)\n pn_plot.addItem(pn_raster)\n # LCA KC spike raster, MCA KC spike raster\n lca_kcs = int(config['kc']['number'] * config['kc']['lca_frac'] + 0.5) \n lca_nodes = [str(ii) for ii in range(lca_kcs)]\n mca_nodes = [str(ii) for ii in range(lca_kcs, config['kc']['number'])]\n try:\n lca_spike_x, lca_spike_y = get_event_times(fd['/data/event/kc/kc_spiketime'],\n nodes=lca_nodes)\n mca_spike_x, mca_spike_y = get_event_times(fd['/data/event/kc/kc_spiketime'],\n nodes=mca_nodes)\n except KeyError:\n dirname = os.path.dirname(fname)\n fname = 'kc_spikes_' + os.path.basename(fname)\n with h5.File(os.path.join(dirname, fname)) as kc_file:\n lca_spike_x, lca_spike_y = get_event_times(kc_file, nodes=lca_nodes)\n mca_spike_x, mca_spike_y = get_event_times(kc_file, nodes=mca_nodes)\n print('LCA spikes', lca_spike_x.shape[0] // 2)\n gw.nextRow()\n kc_lca_plot = gw.addPlot(title='KC LCA')\n kc_lca_plot.setXLink(pn_plot)\n kc_lca_raster = pg.PlotCurveItem()\n kc_lca_raster.setData(lca_spike_x, lca_spike_y, connect='pairs', pen=default_pen)\n kc_lca_plot.addItem(kc_lca_raster)\n gw.nextRow()\n kc_mca_plot = gw.addPlot(title='KC MCA')\n kc_mca_plot.setXLink(pn_plot)\n kc_mca_raster = pg.PlotCurveItem()\n kc_mca_raster.setData(mca_spike_x, mca_spike_y, connect='pairs', pen=default_pen)\n kc_mca_plot.addItem(kc_mca_raster)\n # LCA KC Vm\n kc_vm_node = fd['/data/uniform/kc/KC_Vm']\n t = np.arange(kc_vm_node.shape[1]) * kc_vm_node.attrs['dt']\n kc_lca_vm = [kc_vm_node[int(ii), :]\n for ii in np.random.choice(lca_nodes, size=vm_samples,\n replace=False)]\n gw.nextRow()\n kc_lca_vm_plot = gw.addPlot(title='KC LCA')\n kc_lca_vm_plot.setXLink(pn_plot)\n for kc_vm in kc_lca_vm:\n kc_vm_curve = kc_lca_vm_plot.plot(t, kc_vm, pen=default_pen)\n # MCA KC Vm\n kc_mca_vm = [kc_vm_node[int(ii), :]\n for ii in np.random.choice(mca_nodes, size=vm_samples,\n replace=False)]\n gw.nextRow()\n kc_mca_vm_plot = gw.addPlot(title='KC MCA')\n kc_mca_vm_plot.setXLink(pn_plot)\n for kc_vm in kc_mca_vm:\n kc_vm_curve = kc_mca_vm_plot.plot(t, kc_vm, pen=default_pen)\n # GGN MCA Vm, GGN LCA Vm\n gw.nextRow()\n ggn_output_vm_plot = gw.addPlot(title='GGN CA')\n ggn_output_vm_plot.setXLink(pn_plot)\n ggn_output_vm_plot.addLegend()\n ggn_output_vm = fd['/data/uniform/ggn_output/GGN_output_Vm']\n pen_lca = (255, 0, 0, 100)\n pen_mca = (0, 0, 255, 100)\n for ii in np.random.choice(range(ggn_output_vm.shape[0]), size=vm_samples, replace=False):\n sec = ggn_output_vm.dims[0]['source'][ii] \n sid = sec.rpartition('dend_')[-1].partition('[')[0]\n if sid == '5':\n pen = pen_lca\n else:\n pen = pen_mca\n ggn_output_vm_plot.plot(t, ggn_output_vm[ii, :], pen=pen)\n ggn_output_vm_plot.legend.addItem(pg.PlotDataItem(pen=pen_lca), 'LCA')\n ggn_output_vm_plot.legend.addItem(pg.PlotDataItem(pen=pen_mca), 'MCA')\n # GGN alphaL Vm\n gw.nextRow()\n ggn_alphaL_vm_plot = gw.addPlot(title='GGN alphaL')\n ggn_alphaL_vm_plot.setXLink(pn_plot)\n ggn_alphaL_vm = fd['/data/uniform/ggn_alphaL_input/GGN_alphaL_input_Vm']\n for ii in np.random.choice(range(ggn_alphaL_vm.shape[0]), size=vm_samples, replace=False):\n ggn_alphaL_vm_plot.plot(t, ggn_alphaL_vm[ii,:], pen=default_pen)\n end = timer()\n print('Time for plotting {}s'.format(end - start))\n return gw\n\n\nif __name__ == '__main__':\n gw = plot_spike_rasters(sys.argv[1])\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n \n \n# \n# pn_kc_ggn_plot.py ends here\n" ]
[ [ "numpy.concatenate", "numpy.arange", "numpy.zeros", "numpy.random.choice" ] ]
utsavnandi/Kaggle-SIIM-ISIC-Melanoma-Classification
[ "5790c50b9cc266f82326a84093fa067880447397" ]
[ "plots.py" ]
[ "import time\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve\n\n\ndef plot_roc(y_true, y_pred, show=False):\n testy, lr_probs = y_true, y_pred\n ns_probs = [0 for _ in range(len(testy))]\n # calculate roc curves\n ns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs)\n lr_fpr, lr_tpr, _ = roc_curve(testy, lr_probs) # lr_probs: predictions\n # plot the roc curve for the model\n figure = plt.figure(figsize=(8, 8))\n plt.plot(ns_fpr, ns_tpr, linestyle=\"--\", label=\"No Skill\")\n plt.plot(lr_fpr, lr_tpr, linestyle=\"-\", label=\"Model\")\n # axis labels\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n # show the legend\n plt.legend()\n # show the plot\n if show:\n plt.show()\n else:\n plt.clf()\n return figure\n\n\ndef draw_hist(y_true, y_pred, show=True):\n true_neg_indices = np.where(y_true[:, 0] == 0)[0]\n true_pos_indices = np.where(y_true[:, 0] == 1)[0]\n pred_true_pos = y_pred[true_pos_indices]\n pred_true_neg = y_pred[true_neg_indices]\n thresh = 0.2\n pred_true_pos_error_count = pred_true_pos[\n np.where(pred_true_pos < (1 - thresh))[0]\n ]\n pred_true_neg_error_count = pred_true_neg[\n np.where(pred_true_neg > (thresh))[0]\n ]\n total_error_count = (\n pred_true_pos_error_count.shape[0] + pred_true_neg_error_count.shape[0]\n )\n total_error_count_scaled = total_error_count / y_true.shape[0]\n # plot\n figure = plt.figure(figsize=(14, 6))\n ax1 = plt.subplot(121)\n ax1.hist(pred_true_neg.T[0], bins=10)\n plt.ylim((0, 2000))\n ax2 = plt.subplot(122)\n ax2.hist(pred_true_pos.T[0], bins=10)\n if show:\n plt.show()\n else:\n plt.clf()\n return figure, total_error_count_scaled\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "sklearn.metrics.roc_curve", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.ylabel" ] ]
mchant/pandas-ta
[ "360a26d71c83fe87e4042e4f86298b1dc3023704" ]
[ "pandas_ta/overlap/kama.py" ]
[ "# -*- coding: utf-8 -*-\nfrom numpy import NaN as npNaN\nfrom pandas import Series\nfrom pandas_ta.utils import get_drift, get_offset, non_zero_range, verify_series\n\n\ndef kama(close, length=None, fast=None, slow=None, drift=None, offset=None, **kwargs):\n \"\"\"Indicator: Kaufman's Adaptive Moving Average (KAMA)\"\"\"\n # Validate Arguments\n length = int(length) if length and length > 0 else 10\n fast = int(fast) if fast and fast > 0 else 2\n slow = int(slow) if slow and slow > 0 else 30\n close = verify_series(close, max(fast, slow, length))\n drift = get_drift(drift)\n offset = get_offset(offset)\n\n if close is None: return\n\n # Calculate Result\n def weight(length: int) -> float:\n return 2 / (length + 1)\n\n fr = weight(fast)\n sr = weight(slow)\n\n abs_diff = non_zero_range(close, close.shift(length)).abs()\n peer_diff = non_zero_range(close, close.shift(drift)).abs()\n peer_diff_sum = peer_diff.rolling(length).sum()\n er = abs_diff / peer_diff_sum\n x = er * (fr - sr) + sr\n sc = x * x\n\n m = close.size\n result = [npNaN for _ in range(0, length - 1)] + [0]\n for i in range(length, m):\n result.append(sc.iloc[i] * close.iloc[i] + (1 - sc.iloc[i]) * result[i - 1])\n\n kama = Series(result, index=close.index)\n\n # Offset\n if offset != 0:\n kama = kama.shift(offset)\n\n # Name & Category\n kama.name = f\"KAMA_{length}_{fast}_{slow}\"\n kama.category = \"overlap\"\n\n return kama\n\n\nkama.__doc__ = \\\n\"\"\"Kaufman's Adaptive Moving Average (KAMA)\n\nDeveloped by Perry Kaufman, Kaufman's Adaptive Moving Average (KAMA) is a moving average\ndesigned to account for market noise or volatility. KAMA will closely follow prices when\nthe price swings are relatively small and the noise is low. KAMA will adjust when the\nprice swings widen and follow prices from a greater distance. This trend-following indicator\ncan be used to identify the overall trend, time turning points and filter price movements.\n\nSources:\n https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average\n https://www.tradingview.com/script/wZGOIz9r-REPOST-Indicators-3-Different-Adaptive-Moving-Averages/\n\nCalculation:\n Default Inputs:\n length=10\n\nArgs:\n close (pd.Series): Series of 'close's\n length (int): It's period. Default: 10\n fast (int): Fast MA period. Default: 2\n slow (int): Slow MA period. Default: 30\n drift (int): The difference period. Default: 1\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature generated.\n\"\"\"\n" ]
[ [ "pandas.Series" ] ]
avito-tech/abito
[ "9071eecd9526ee5c268cfacd7ac9a49b6ee185e5" ]
[ "abito/lib/stats/plain.py" ]
[ "import numpy as np\nfrom typing import Union\n\n\n__all__ = ['sum', 'mean', 'var', 'std', 'mean_std', 'quantile', 'median', 'ratio']\n\n\ndef sum(obs: np.ndarray) -> np.float:\n return obs.sum(axis=0)\n\n\ndef mean(obs: np.ndarray) -> np.float:\n return np.divide(obs.sum(axis=0), obs.shape[0])\n\n\ndef demeaned(obs: np.ndarray) -> np.ndarray:\n return obs - mean(obs)\n\n\ndef demeaned_sumsquares(obs: np.ndarray) -> np.float:\n return (demeaned(obs) ** 2).sum(axis=0)\n\n\ndef var(obs: np.ndarray) -> np.float:\n return demeaned_sumsquares(obs) / (obs.shape[0] - 1)\n\n\ndef std(obs: np.ndarray) -> np.float:\n return np.sqrt(var(obs))\n\n\ndef mean_std(obs: np.ndarray) -> np.float:\n return std(obs) / np.sqrt(obs.shape[0])\n\n\ndef quantile(obs: np.ndarray, q: float) -> Union[np.ndarray, np.float]:\n return np.quantile(obs, q, axis=0)\n\n\ndef median(obs: np.ndarray) -> np.float:\n return quantile(obs, 0.5)\n\n\ndef ratio(obs: np.ndarray) -> np.float:\n return sum(obs['num']) / sum(obs['den'])\n" ]
[ [ "numpy.quantile", "numpy.sqrt" ] ]
ludc/rlstructures
[ "99fa91bb4e955d31348bed007f25b41641c9fa73" ]
[ "rlalgos/ppo/run_cartpole_pomdp.py" ]
[ "#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n\nfrom rlstructures import logging\nfrom rlstructures.env_wrappers import GymEnv, GymEnvInf\nfrom rlstructures.tools import weight_init\nimport torch.nn as nn\nimport copy\nimport torch\nimport time\nimport numpy as np\nimport torch.nn.functional as F\nfrom rlalgos.a2c.agent import NNAgent, GRUAgentModel\nimport gym\nfrom gym.wrappers import TimeLimit\nfrom rlalgos.ppo.discrete_ppo import PPO\n\nimport gym.spaces as spaces\nfrom gym import ObservationWrapper\n\nimport hydra\nfrom omegaconf import DictConfig, OmegaConf\n\n\nclass MyWrapper(ObservationWrapper):\n r\"\"\"Observation wrapper that flattens the observation.\"\"\"\n def __init__(self, env):\n super(MyWrapper, self).__init__(env)\n self.observation_space = None #spaces.flatten_space(env.observation_space)\n\n def observation(self, observation):\n return [observation[0],observation[2]]\n\n\ndef create_gym_env(args):\n return gym.make(args[\"environment/env_name\"])\n\ndef create_env(n_envs, mode=\"train\",max_episode_steps=None, seed=None,**args):\n envs=[]\n for k in range(n_envs):\n e = create_gym_env(args)\n e = MyWrapper(e)\n e = TimeLimit(e, max_episode_steps=max_episode_steps)\n envs.append(e)\n if mode==\"train\":\n return GymEnvInf(envs, seed)\n else:\n return GymEnv(envs, seed)\n\ndef create_agent(n_actions, model):\n return NNAgent(model=model, n_actions=n_actions)\n\nclass Experiment(PPO):\n def __init__(self, config, create_env, create_agent):\n super().__init__(config, create_env, create_agent)\n\n def _create_model(self):\n module = GRUAgentModel(self.obs_dim, self.n_actions, self.config[\"model/hidden_size\"])\n module.apply(weight_init)\n return module\n\ndef flatten(d, parent_key='', sep='/'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, DictConfig):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items) \n\[email protected]()\ndef my_app(cfg : DictConfig) -> None:\n f=flatten(cfg)\n print(f)\n exp = Experiment(f, create_env, create_agent)\n exp.go()\n\nif __name__ == \"__main__\":\n import torch.multiprocessing as mp\n mp.set_start_method(\"spawn\")\n\n my_app()\n\n\n" ]
[ [ "torch.multiprocessing.set_start_method" ] ]
ketank1000/pancake_prediction_bot
[ "146b4b9f1c924abaf8f81e864473dbf085955c49" ]
[ "backtest/backtest.py" ]
[ "\"\"\"\nCurrent results: (15000 epochs)\n Win/lose: 38/23 (62.295081967213115%)\n Profit: 3.599999999999998 $\n\nCurrent results: (15000 epochs)\n Win/lose: 186/153 (54.86725663716814%)\n Profit: -22.80000000000001 $\n\"\"\"\n\nimport json\nfrom os import EX_SOFTWARE\nfrom typing_extensions import Required\nimport talib\nimport pandas as pd\nfrom utils.contracts import Pancake\nfrom utils import enums\n\nclass KuberPancake:\n def __init__(self) -> None:\n self.pancake = Pancake()\n self.start_epoch = 10000\n self.end_epoch = 25000\n self.bnb_1m = None\n self.bnb_5m = None\n self.bnb_15m = None\n self.rounds = None\n self.wins = 0\n self.lose = 0\n\n def get_pancake_data(self):\n \n epoch_data = {}\n # st -> 10000\n # end -> 25000\n for epoch in range(self.start_epoch,self.end_epoch):\n print(f\"getting {epoch}\")\n epoch_data[epoch] = self.pancake.get_round_details(epoch)\n \n print(len(epoch_data))\n print(epoch_data[24999])\n with open('data/rounds.json', 'w') as fp:\n json.dump(epoch_data, fp)\n\n def collect_data(self):\n\n # Read rounds\n with open('data/rounds.json') as f:\n self.rounds = json.load(f)\n \n # Read 1 min data\n self.bnb_1m = self.read_data('1m')\n self.bnb_1m['macd'] = talib.MACD(self.bnb_1m['Close'])[0]\n self.bnb_1m['macd_signal'] = talib.MACD(self.bnb_1m['Close'])[1]\n self.bnb_1m['macd_hist'] = talib.MACD(self.bnb_1m['Close'])[2]\n self.bnb_1m['rsi'] = talib.RSI(self.bnb_1m['Close'])\n\n # Read 5 min data\n self.bnb_5m = self.read_data('5m')\n self.bnb_5m['ema'] = talib.EMA(self.bnb_5m['Close'], timeperiod=50)\n self.bnb_5m['rsi'] = talib.RSI(self.bnb_5m['Close'])\n\n # Read 15 min data\n self.bnb_15m = self.read_data('15m')\n self.bnb_15m['ema'] = talib.EMA(self.bnb_15m['Close'], timeperiod=50)\n self.bnb_15m['rsi'] = talib.RSI(self.bnb_15m['Close'])\n\n print(self.bnb_1m)\n print(self.bnb_5m)\n print(self.bnb_15m)\n \n\n def read_data(self, interval):\n \n bnb = pd.read_json(f'data/BNB_USDT-{interval}.json')\n #self.bnb = self.bnb.set_index(0)\n mapping = {\n bnb.columns[0]: 'Date',\n bnb.columns[1]: 'Open',\n bnb.columns[2]: 'High',\n bnb.columns[3]: 'Low',\n bnb.columns[4]: 'Close',\n bnb.columns[5]: 'Volume'\n }\n bnb = bnb.rename(columns=mapping)\n return bnb\n\n def heikin_ashi(self, df):\n df_ha = pd.DataFrame(index=df.index.values, columns=['Open', 'High', 'Low', 'Close', 'Change'])\n df_ha['Close'] = (df['Open'] + df['High'] + df['Low'] + df['Close']) / 4\n \n #print(df.iloc[0])\n for i in range(len(df)):\n if i == 0:\n df_ha.iat[0, 0] = df['Open'].iloc[0]\n else:\n df_ha.iat[i, 0] = round((df_ha.iat[i-1, 0] + df_ha.iat[i-1, 3]) / 2,3)\n \n df_ha['High'] = df.loc[:, ['Open', 'Close']].join(df['High']).max(axis=1)\n df_ha['Low'] = df.loc[:, ['Open', 'Close']].join(df['Low']).min(axis=1)\n df_ha['Change'] = df_ha['Close'] - df_ha['Open']\n\n return df_ha\n\n def get_macd_crossed(self, index):\n # print(self.bnb[index-3:index])\n\n if self.bnb_1m.iloc[index].macd > 0 and self.bnb_1m.iloc[index].macd_signal > 0:\n if self.bnb_1m.iloc[index-2].macd_hist > 0 and self.bnb_1m.iloc[index].macd_hist < 0:\n start = index - 3\n no_of_hist_switched = 0\n direction = True\n while self.bnb_1m.iloc[start].macd > 0 and self.bnb_1m.iloc[start].macd_signal > 0:\n if self.bnb_1m.iloc[start].macd_hist < 0 and direction == True:\n no_of_hist_switched += 1\n direction = False\n elif self.bnb_1m.iloc[start].macd_hist > 0 and direction == False:\n no_of_hist_switched += 1\n direction = True\n start -= 1\n if no_of_hist_switched >= 1:\n #print(f\"no changes : {no_of_hist_switched}\")\n return enums.Prediction.BEAR\n elif self.bnb_1m.iloc[index].macd < 0 and self.bnb_1m.iloc[index].macd_signal < 0:\n if self.bnb_1m.iloc[index-2].macd_hist < 0 and self.bnb_1m.iloc[index].macd_hist > 0:\n start = index - 3\n no_of_hist_switched = 0\n direction = True\n while self.bnb_1m.iloc[start].macd < 0 and self.bnb_1m.iloc[start].macd_signal < 0:\n if self.bnb_1m.iloc[start].macd_hist > 0 and direction == True:\n no_of_hist_switched += 1\n direction = False\n elif self.bnb_1m.iloc[start].macd_hist < 0 and direction == False:\n no_of_hist_switched += 1\n direction = True\n start -= 1\n if no_of_hist_switched >= 1:\n #print(f\"no changes : {no_of_hist_switched}\")\n return enums.Prediction.BULL\n return enums.Prediction.SKIP\n\n def get_sma_crossed(self, index_5m, index_15m):\n if self.bnb_5m.iloc[index_5m].ema > self.bnb_15m.iloc[index_15m].ema:\n return enums.Prediction.BULL\n return enums.Prediction.BEAR\n\n def validate_prediction(self, round, prediction):\n actual_prediction = None\n if round['lockPrice'] < round['closePrice']:\n actual_prediction = enums.Prediction.BULL\n elif round['lockPrice'] > round['closePrice']:\n actual_prediction = enums.Prediction.BEAR\n else:\n actual_prediction = enums.Prediction.SKIP\n\n if prediction == actual_prediction:\n self.wins += 1\n print(f'won : {self.wins} {round} {prediction}')\n else:\n self.lose += 1\n print(f'lose : {self.lose} {round} {prediction}')\n\n\n\n\n def strategy(self):\n for epoch in range(self.start_epoch, self.end_epoch):\n round = self.rounds[f\"{epoch}\"]\n epoch_required = int(round[\"startTimestamp\"])//60 * 60000\n index_1m = self.bnb_1m.index.get_loc(self.bnb_1m.index[self.bnb_1m['Date'] == epoch_required][0])\n prediction_macd = self.get_macd_crossed(index_1m)\n epoch_required = (int(round[\"startTimestamp\"]) - int(round[\"startTimestamp\"])%(5*60))*1000\n index_5m = self.bnb_5m.index.get_loc(self.bnb_5m.index[self.bnb_5m['Date'] == epoch_required][0])\n epoch_required = (int(round[\"startTimestamp\"]) - int(round[\"startTimestamp\"])%(15*60))*1000\n index_15m = self.bnb_15m.index.get_loc(self.bnb_15m.index[self.bnb_15m['Date'] == epoch_required][0])\n prediction_ema = self.get_sma_crossed(index_5m, index_15m)\n\n prediction = enums.Prediction.SKIP\n if prediction_macd == enums.Prediction.BULL and prediction_ema == enums.Prediction.BULL: # and self.bnb_1m.loc[index_1m].rsi < 50:\n prediction = enums.Prediction.BULL\n elif prediction_macd == enums.Prediction.BEAR and prediction_ema == enums.Prediction.BEAR: # and self.bnb_1m.loc[index_1m].rsi > 50:\n prediction = enums.Prediction.BEAR\n \n if prediction != enums.Prediction.SKIP:\n self.validate_prediction(round, prediction)\n print(f\"{index_1m},{index_5m},{index_15m}\")\n print(f\"{self.bnb_5m.iloc[index_5m].ema} / {self.bnb_15m.iloc[index_15m].ema}\")\n print(self.bnb_1m.loc[index_1m-3:index_1m])\n # print(self.bnb_5m.loc[index_5m-3:index_5m])\n # print(self.bnb_15m.loc[index_15m-3:index_15m])\n\n print(f\"Win/lose: {self.wins}/{self.lose} ({(self.wins/(self.wins+self.lose))*100}%)\")\n print(f\"Profit: {self.wins*0.7 - self.lose} $\")\n\n def init(self):\n # self.get_pancake_data()\n self.collect_data()\n self.strategy()\n\n\nif __name__ == '__main__':\n st = KuberPancake()\n st.init()" ]
[ [ "pandas.read_json", "pandas.DataFrame" ] ]
tburnett/pointlike
[ "a556f07650c2f17d437c86fdafe9f9a33f59758e" ]
[ "python/uw/darkmatter/spectral.py" ]
[ "\"\"\" Dark Matter spectral models\n\n $Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/darkmatter/spectral.py,v 1.16 2014/08/06 00:52:49 echarles Exp $\n\n author: Alex Drlica-Wagner, Joshua Lande\n\"\"\"\nimport operator\nimport copy\nimport collections\nfrom collections import OrderedDict\nimport os\n\nimport numpy as np\nfrom scipy.optimize.minpack import check_gradient\n\nfrom uw.like.Models import Model,CompositeModel,Constant,ModelException\nfrom uw.utilities import path\n\nfrom uw.utilities.parmap import LogMapper,LimitMapper\n\nclass DMFitFunction(Model):\n \"\"\" Wrap gtlike's DMFitFunction interface. \n \n N.B. The bug Sheridan reported that the set_flux function \n was not working should now be fixed:\n \n >>> model = DMFitFunction()\n >>> model.set_flux(1e-7, emin=1e3, emax=1e5)\n >>> print '%g' % model.i_flux(emin=1e3, emax=1e5)\n 1e-07\n\n Test the getters and setters\n\n >>> model['sigmav']=3.14\n >>> print '%g' % model['sigmav']\n 3.14\n\n There was previously a bug in set_parameters, \n lets see if its fixed:\n\n >>> model.set_parameters(np.log10([5,500]))\n >>> print '%g' % model['sigmav']\n 5\n >>> print '%g' % model['mass']\n 500\n\n Note, the parameters which are not directly fit (like bratio) get set correctly:\n\n >>> model = DMFitFunction(bratio=2)\n >>> print model.dmf.getParam('bratio').getTrueValue()\n 2.0\n >>> model = DMFitFunction(bratio=3)\n >>> print model.dmf.getParam('bratio').getTrueValue()\n 3.0\n\n Test a few hard coded values, to make sure the function values are correct:\n\n >>> model = DMFitFunction(sigmav=1e-26, mass=100,\n ... channel0=4, channel1=1, bratio=1, norm=2.5e17)\n\n >>> model = DMFitFunction(norm=2.5e17, sigmav=1e-26, channel0=4,channel1=1,mass=100,bratio=1.0)\n\n These points agree with the fortran code.\n\n >>> e = [1, 10, 100, 1000, 10000, 100000 , 1000000]\n >>> dnde = [ 9.55801576e-18, 2.04105211e-16, 4.43719263e-16, 1.00123992e-16, 1.44911940e-18, 0.0, 0.0 ]\n >>> print np.allclose(model(e), dnde)\n True\n\n TODO: The limits of integration when calculating the flux should be\n limited by the DM particle mass. Otherwise, this can lead to numerical\n instability in the fit.\n \"\"\"\n default_p=[1e-25, 100.]\n default_extra_params=dict(norm=1e18, bratio=1.0, channel0=1, channel1=1)\n param_names=['sigmav','mass']\n default_mappers=[LogMapper,LogMapper]\n # ST >= 09-31-00\n gammamc_dif = '$(INST_DIR)/data/Likelihood/gammamc_dif.dat'\n if not os.path.exists(path.expand(gammamc_dif)):\n gammamc_dif = '$(INST_DIR)/Likelihood/src/dmfit/gammamc_dif.dat'\n if not os.path.exists(path.expand(gammamc_dif)):\n gammamc_dif = '$(BASE_DIR)/data/Likelihood/gammamc_dif.dat'\n default_extra_attrs=OrderedDict((('file',gammamc_dif),))\n\n gtlike = dict(\n name='DMFitFunction',\n param_names=['sigmav','mass'],\n extra_param_names=dict(norm='norm', bratio='bratio', channel0='channel0', channel1='channel1'),\n topointlike=[operator.pos,operator.pos],\n togtlike=[operator.pos,operator.pos])\n\n default_limits = dict(\n sigmav=LimitMapper(0,1e-19,1e-25),\n mass=LimitMapper(1,1e4,1))\n default_oomp_limits=['sigmav']\n\n channel_mapping = {\n 1 : [\"e+e-\",\"ee\"] ,\n 2 : [\"mu+mu-\",\"mumu\",\"musrc\"] ,\n 3 : [\"tau+tau-\",\"tautau\",\"tausrc\"] ,\n 4 : [\"bb-bar\",\"bb\",\"bbbar\",\"bbsrc\"],\n 5 : [\"tt-bar\",\"tt\"] ,\n 6 : [\"gluons\",\"gg\"] ,\n 7 : [\"W+W-\",\"w+w-\",\"ww\",\"wwsrc\"] ,\n 8 : [\"ZZ\",\"zz\"] ,\n 9 : [\"cc-bar\",\"cc\"] ,\n 10 : [\"uu-bar\",\"uu\"] ,\n 11 : [\"dd-bar\",\"dd\"] ,\n 12 : [\"ss-bar\",\"ss\"] ,\n }\n\n channel_tex = {\n 1 : r'$e^{+}e^{-}$' ,\n 2 : r'$\\mu^{+}\\mu^{-}$' ,\n 3 : r'$\\tau^{+}\\tau^{-}$' ,\n 4 : r'$b \\bar b$' ,\n 5 : r'$t \\bar t$' ,\n 6 : r'$gg$' ,\n 7 : r'$W^{+}W^{-}$' ,\n 8 : r'$ZZ$' ,\n 9 : r'$c \\bar c$' ,\n 10 : r'$u \\bar u$' ,\n 11 : r'$d \\bar d$' ,\n 12 : r'$s \\bar s$' ,\n }\n\n @staticmethod\n def channel2int(s):\n for k,v in DMFitFunction.channel_mapping.items():\n if s in v: return k\n else: raise ValueError(\"Can't find value %s\"%s)\n\n @staticmethod\n def channel2tex(ch):\n if ch in DMFitFunction.channel_tex.keys():\n return DMFitFunction.channel_tex[ch]\n elif ch in DMFitFunction.channels():\n return DMFitFunction.channel_tex[DMFitFunction.channel2int(ch)]\n else: raise ValueError(\"Can't find channel %s\"%ch)\n\n @staticmethod\n def int2channel(i):\n return DMFitFunction.channel_mapping[i][0]\n\n @staticmethod\n def channels():\n \"\"\" Return all available DMFit channel strings \"\"\"\n return [s for channel in DMFitFunction.channel_mapping.values() for s in channel]\n\n def full_name(self):\n return '%s, norm=%.2g, bratio=%.1f channel0=%d, channel1=%d' % (self.pretty_name,\n self.norm, self.bratio, \n self.channel0, self.channel1)\n\n def __getstate__(self):\n d=copy.copy(self.__dict__)\n del d['dmf']\n return d\n\n def __setstate__(self,state):\n self.__dict__ = state\n self._update()\n\n def _update(self):\n \"\"\" Update the DMFitFunction internally.\n This function should be called\n automatically when necessary.\n \"\"\"\n if not hasattr(self,'dmf'):\n import pyLikelihood\n self.dmf=pyLikelihood.DMFitFunction()\n\n for i,param_name in enumerate(self.param_names):\n self.dmf.setParam(param_name,self[param_name])\n\n # Set the parameters which are not fixed explicitly\n self.dmf.setParam('norm',self.norm)\n self.dmf.setParam('bratio',self.bratio)\n self.dmf.setParam('channel0',self.channel0)\n self.dmf.setParam('channel1', self.channel1)\n\n # Set flux integration energy cut to slightly higher than the mass\n self.ecut = 1.1 * self['mass'] * 1e3 # Energy cutoff (MeV)\n\n def __init__(self, *args, **kwargs):\n import pyLikelihood\n\n # Parse channel strings\n if isinstance(kwargs.get('channel0',None),basestring):\n kwargs['channel0'] = self.channel2int(kwargs['channel0'])\n if isinstance(kwargs.get('channel1',None),basestring):\n kwargs['channel1'] = self.channel2int(kwargs['channel1'])\n\n # the DMFitFunction must exist before __init__ is called because\n # the __init__ will call setp().\n self.dmf=pyLikelihood.DMFitFunction()\n super(DMFitFunction,self).__init__(*args,**kwargs)\n\n # unbound all parameters in gtlike\n for n in np.append(self.param_names,['norm','bratio','channel0','channel1']):\n self.dmf.getParam(n).setBounds(-float('inf'),float('inf'))\n\n # Integrated flux calculation energy cutoff\n self.ecut = None\n\n self.dmf.readFunction(path.expand(self.file))\n self._update() # update all parameters in DMFitFunction\n\n def setp(self, *args, **kwargs):\n super(DMFitFunction,self).setp(*args, **kwargs)\n self._update()\n\n def set_parameters(self, *args, **kwargs):\n super(DMFitFunction,self).set_parameters(*args, **kwargs)\n self._update()\n\n def set_all_parameters(self, *args, **kwargs):\n super(DMFitFunction,self).set_all_parameters(*args, **kwargs)\n self._update()\n\n @staticmethod\n def call_pylike_spectrum(spectrum, e):\n \"\"\" Method to call a pylikelihood spectrum given\n either a python numer or a numpy array. \"\"\"\n from pyLikelihood import dArg\n if isinstance(e,collections.Iterable):\n return np.asarray([spectrum(dArg(i)) for i in e])\n else:\n return spectrum(dArg(e))\n\n def __call__(self,e):\n \"\"\" Return energy in MeV. This could be vectorized. \"\"\"\n return DMFitFunction.call_pylike_spectrum(self.dmf, e)\n\nclass ComprehensiveModel(CompositeModel):\n \"\"\" Implements a \"Comprehensive Model\" needed for comparing non-nested\n models using a fequentist test with a well defiend null hypthesis.\n\n A good reference for the test is Cox 1961, 1962:\n\n And a modern description is in a recent talk by Jan Conrad:\n http://www-conf.slac.stanford.edu/statisticalissues2012/talks/SLAC_Statistics_2012_Conrad.pdf\n\n\n This feature is somewhat implemented in gtlike, but compbingin the\n two models in a different way\n https://confluence.slac.stanford.edu/display/SCIGRPS/Model+Selection+Using+Likelihood+Ratios\n\n Using this object is easy:\n\n >>> from uw.like.Models import PowerLaw\n >>> dm=DMFitFunction(); dm.set_flux(1)\n >>> pl=PowerLaw(); pl.set_flux(1)\n >>> cm=ComprehensiveModel(dm,pl)\n\n This model has a \"Scale\" (the theta parameter), and the parameters\n for the dark matter & powerlaw object:\n\n >>> print cm.param_names\n ['sigmav', 'mass', 'Norm', 'Index', 'Scale']\n\n\n The default 'theta' parameter is 0.5\n >>> print cm.param_names[-1]\n Scale\n >>> print cm[-1]\n 0.5\n >>> print cm.theta\n 0.5\n\n And the value is defined with the strange formula:\n\n >>> energies=np.logspace(1,7,7)\n >>> for theta in [0, 0.25, 0.5, 0.75, 1]:\n ... cm.theta = theta\n ... np.all(cm(energies)==dm(energies)**theta*pl(energies)**(1-theta))\n True\n True\n True\n True\n True\n\n Note, make theta the last parameter because many function in pointlike (like set_flux)\n assume that the first model is the normalization.\n \"\"\"\n def __init__(self,model1,model2):\n theta=Constant(Scale=0.5,mappers=[LimitMapper(0,1)])\n super(ComprehensiveModel,self).__init__(model1,model2,theta)\n\n @property\n def pretty_name(self):\n g,f,theta=self.models\n return '%s^theta*%s^(1-theta)' % (g.pretty_name,f.pretty_name)\n\n def external_gradient(self, energy):\n raise ModelException(\"The ComprehensiveModel spectral model has no analytic gradient!\")\n\n @property\n def theta(self):\n return self.models[2]['Scale']\n\n @theta.setter\n def theta(self,new_theta):\n self.models[2]['Scale']=new_theta\n\n def __call__(self,e):\n g,f=self.models[0:2]\n return g(e)**self.theta*f(e)**(1-self.theta)\n\n def set_prefactor(self, prefactor, energy):\n \"\"\" set_prefactor must work around non-linearities in function\n\n >>> from uw.like.Models import PowerLaw,ExpCutoff\n >>> model=ComprehensiveModel(PowerLaw(),ExpCutoff())\n >>> model.set_prefactor(1e-10, 100)\n >>> print model(100)\n 1e-10\n \"\"\"\n g,f=self.models[0:2]\n g.setp(0, 1) \n f.setp(0, 1) \n new_prefactor=prefactor/self(energy)\n g.setp(0, new_prefactor)\n f.setp(0, new_prefactor)\n\n def set_flux(self,flux,*args,**kwargs):\n \"\"\" set_flux must work around non-linearities in function\n\n >>> from uw.like.Models import PowerLaw,ExpCutoff\n >>> model=ComprehensiveModel(PowerLaw(),ExpCutoff())\n >>> model.set_flux(1)\n >>> print model.i_flux()\n 1.0\n \"\"\"\n g,f=self.models[0:2]\n g.setp(0, 1) \n f.setp(0, 1) \n new_prefactor = flux/self.i_flux(*args,**kwargs)\n g.setp(0,new_prefactor)\n f.setp(0,new_prefactor)\n\n \nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n \n" ]
[ [ "numpy.append" ] ]
johne13/dataset2binary
[ "de2bd5deb4f1f3935a6697093fc19c3c7a8b2d88" ]
[ "dataset2binary.py" ]
[ "'''\nfile: dataset2binary.py \n\ndescription: converts a dataset (aka dataframe) to a binary data file, and\n alse creates c and fortran code for reading the binary data\n\ninput: data in sas, stata, or csv format. may include a mix of character,\n integer, and float data, in multiple sizes.\n\noutput: (1) binary file readable by c or fortran\n (2) c code to read the binary file\n (3) fortran code to read the binary file\n (4) list of columns & formats/dtypes\n'''\n\nimport sys\nimport pandas as pd\nimport numpy as np\n\nif len(sys.argv) == 1:\n print( '-' * 70 + '\\n' )\n print( 'Description of dataset2binary:\\n')\n print( ' -> reads a stata/sas dataset or a CSV')\n print( ' -> outputs')\n print( ' 1 c/fortran readable binary dataset')\n print( ' 2 fortran program to read the data and output means')\n print( ' 3 c program to read the data and output means')\n print( ' 4 format file that can (optionally) be altered ')\n print( ' to specify data types (int8,float16,etc.)\\n')\n print( 'Syntax for ds2bin:\\n')\n print( ' $ ds2bin filename.ext [ downcast | f=file ]\\n')\n print( ' -> filename may include path')\n print( \" -> extension must be 'sas7bdat' (sas), 'dta' (stata), or 'csv'\")\n print( \" -> downcast option will attempt to safely cast numerical columns\")\n print( \" into smaller dtypes (including float -> integer)\")\n print( \" -> f=file is for providing desired number formats\")\n print( \" (see the /tmp/file output for syntax\\n\")\n print( '-' * 70 + '\\n' )\n sys.exit() \n\n# function for automatic downcasting of float to int (but only if it doesn't lead to loss of precision)\n\ndef float_to_int( s ):\n if np.issubdtype( s, np.number ):\n if ( s.astype(np.int64) == s ).all():\n return pd.to_numeric( s, downcast='integer' )\n else:\n return s\n else:\n return s\n\ndowncast=False\nuser_formats=False\nif len(sys.argv) == 3: \n if sys.argv[2] == 'downcast': downcast=True\n\n if sys.argv[2][:2] == 'f=':\n user_formats=True\n fmt_file = sys.argv[2].partition('=')[2] \n fmts_in = pd.read_csv(fmt_file, delim_whitespace=True, header=None )\n\n# process the required argument -- filename + extension \nfullname = sys.argv[1]\ndataset_name, sep, dataset_type = fullname.partition('.')\nif dataset_type == '': raise Exception(\"\\n\\n *** Input dataset must have extension 'sas7bdat', 'dta', or 'csv' ***\\n\")\n\npath, sep, dataset_name = dataset_name.rpartition('/')\n\noutput_bin = dataset_name + '.bin'\noutput_f90 = dataset_name + '.f90'\noutput_c = dataset_name + '.c'\nformats_file = dataset_name + '.formats'\n\n# can i add some sort of hourglass sort of thingy here???\nif dataset_type in ['sas7bdat', 'dta', 'csv']: print( '\\n' + 80 * '-' + '\\nReading ' + sys.argv[1] + '. . . \\n' )\n\nif dataset_type == 'sas7bdat': df = pd.read_sas( fullname ) \nelif dataset_type == 'dta': df = pd.read_stata( fullname ) \nelif dataset_type == 'csv': df = pd.read_csv( fullname ) \nelse:\n raise Exception(\"\\n\\n *** Input dataset must have extension 'sas7bdat', 'dta', or 'csv' ***\\n\")\n\n# optionally downcast or apply the user-specified formats\nif downcast==True: \n df = df.apply(float_to_int)\n\nif user_formats:\n for col, fmt in zip( fmts_in[0], fmts_in[1] ):\n # apply user formats/dtypes, but only if lossless for integers\n if fmt[:3] != 'int' or ( df[col] == df[col].astype(fmt) ).all(): \n df[col] = df[col].astype(fmt)\n else:\n print( '***** Column ' + col + ' not downcast to ' + fmt + ' b/c data values are too large *****\\n' )\n\n# output pandas dtypes to formats file 'fmts_out'\nfmts_out = open(formats_file,'w')\nfor col in df.columns:\n fmts_out.write( f'{col: <30}' + str(df[col].dtype) + '\\n' ) \nfmts_out.close()\n\n# process any additional arguments \n# for arg in sys.argv[2:]\n\n# output files 'f' & 'c' with variable declarations, to be used in fortran/c binary reads\nf = open(output_f90,'w')\nf.write( 'program main\\n\\n' )\nf.write( ' type foo' )\n\nc = open(output_c,'w')\nc.write( '#include <stdio.h>\\n' )\nc.write( '#include <ctype.h>\\n\\n' )\nc.write( 'int main() {\\n\\n' )\nc.write( ' FILE *fp;\\n\\n' )\nc.write( ' #pragma pack(push,1)\\n' )\nc.write( ' struct foobar {\\n' )\n\nprint('\\nFirst 5 rows of dataset:\\n\\n',df.head(5))\n\n# i think we want to convert any missing values ('.') to zeroes or else fortran will sum as NaN?\ndf = df.fillna(0)\n\nnames = df.columns\n\narrays = [ df[col].values for col in names ]\n\nformats = [ array.dtype.str if array.dtype != 'O' \n else array.astype(str).dtype.str.replace('<U','S') for array in arrays ] \n\n#formats = [ array.dtype if array.dtype != 'O' else '%s' % array.astype(str).dtype for array in arrays ] \n#formats = [ array.dtype if array.dtype != 'O' else f'{array.astype(str).dtype}' for array in arrays ] \n\nrec_array = np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} )\nrec_array.tofile(output_bin)\n\n# numpy dtype notes: can have the following prefix characters, but may not be present at all\n# source: https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.dtype.html\n# = native\n# < little-endian\n# > big-endian\n# | not applicable\n\nindent = 6 * ' ' \nprev_fmt = ''\ncount = 1\nnumerical_cols = []\nfirst_column = True\n\nfor fmt, col in zip( formats, names ):\n\n if fmt == prev_fmt and count < 8:\n\n pre = ', '\n pre_c = ', '\n\n count += 1\n\n else:\n\n # try to figure out the formats in this way:\n # if first or second character is \"i\" then it is an integer\n # e.g. 'i', '<i', and '|i' are all integers (similar for floats and characters)\n\n post_c = ''\n\n # debugging print\n if False:\n print('i formats',fmt,fmt[:2],fmt.partition('i')[-1])\n print('f formats',fmt,fmt[:2],fmt.partition('f')[-1])\n print('S formats',fmt,fmt[:2],fmt.partition('S')[-1],'\\n')\n \n if 'i' in fmt[:2]: pre = '\\n' + indent + 'integer*' + fmt.partition('i')[-1] + ' :: '\n elif 'f' in fmt[:2]: pre = '\\n' + indent + 'real*' + fmt.partition('f')[-1] + ' :: ' \n elif 'S' in fmt[:2]: pre = '\\n' + indent + 'character(' + fmt.partition('S')[-1] + ') :: '\n else: raise Exception('Unknown format: ',fmt)\n \n if 'i' in fmt[:2]: \n if fmt.partition('i')[-1] == '4': pre_c = indent + 'int '\n elif fmt.partition('i')[-1] == '8': pre_c = indent + 'long '\n else: raise Exception('Unknown format: ',fmt)\n elif 'f' in fmt[:2]: \n if fmt.partition('f')[-1] == '4': pre_c = indent + 'float '\n elif fmt.partition('f')[-1] == '8': pre_c = indent + 'double '\n else: raise Exception('Unknown format: ',fmt)\n elif 'S' in fmt[:2]: \n pre_c = indent + 'char ' \n post_c = '[' + fmt.partition('S')[-1] + ']'\n else: \n raise Exception('Unknown format: ',fmt)\n\n count = 1\n\n if first_column: \n pre_c = '\\n' + pre_c\n first_column = False\n else: \n pre_c = ' ;\\n' + pre_c\n\n if 'i' in fmt[:2] or 'f' in fmt[:2]: numerical_cols += [col] \n\n if count < 9: \n f.write(pre + col )\n c.write(pre_c + col + post_c )\n else: \n f.write(pre + col + '\\n')\n f.write(pre_c + col + post_c + '\\n')\n\n prev_fmt = fmt\n\nrows = str(len(df))\n\n# write out the rest of the fortran code\n\nf.write('\\n end type foo\\n')\nf.write('\\n type(foo) :: foo_in\\n') \nf.write('\\n real(8) :: means(' + str(len(numerical_cols)) + ')\\n\\n')\nf.write(\" open( 13, file='\" + output_bin + \"', form='unformatted', access='stream' )\\n\\n\" )\nf.write(' do i = 1, ' + rows + '\\n')\nf.write('\\n read(13) foo_in\\n\\n')\nfor i, col in enumerate(numerical_cols):\n f.write(' means(' + str(i+1) + ') = means(' + str(i+1) + ') + foo_in%' + col + '\\n') \nf.write('\\n !!! if( i < 6 ) print *, foo_in\\n')\nf.write('\\n end do\\n')\nf.write('\\n print *, new_line(\"a\"), \" numerical columns means \", new_line(\"a\")\\n\\n')\nfor i, col in enumerate(numerical_cols):\n f.write(\" print '(a20,f24.5)', '\" + col + \"', means(\" + str(i+1) + \") / \" + rows + \"\\n\") \nf.write('\\nend program main\\n' )\n\n# write out the rest of the c code\n\nc.write( ';\\n\\n' )\nc.write( ' } foo ;\\n' )\nc.write( ' #pragma pack(pop)\\n\\n' )\nc.write( ' fp = fopen( \"' + output_bin + '\", \"rb\"); \\n')\nc.write( ' if (fp == NULL) { \\n')\nc.write( ' puts(\"Cannot open the file.\"); \\n')\nc.write( ' return 1; \\n')\nc.write( ' }\\n\\n' )\nc.write( ' int i_counter = 1 ; \\n')\nc.write( ' double means[' + str(len(numerical_cols)) + '] = { 0. } ; \\n\\n')\nc.write( ' while (fread(&foo, sizeof(foo), 1, fp) == 1) { \\n')\nfor i, col in enumerate(numerical_cols):\n c.write(' means[' + str(i) + '] += ' + 'foo.' + col + ' ;\\n') \nc.write( ' i_counter ++ ; \\n' )\nc.write( ' }\\n' )\nc.write('\\n printf( \"%cmeans of numerical columns %c %c\", 10, 10, 10 );\\n')\nfor i, col in enumerate(numerical_cols):\n c.write(' printf( \" ' + col + ' %f %c\", means[' + str(i) + '] / ' + rows + '., 10 ) ;\\n') \nc.write( ' printf(\"%c\",10) ;\\n' )\nc.write( '\\n fclose(fp); \\n')\nc.write( ' return 0; \\n\\n')\nc.write( ' }\\n' )\n\n# close all the files\nf.close()\nc.close()\n\nprint( '\\nConversion completed' )\nprint( '\\nInput = ' + fullname )\nprint( '\\nOutput = ' + output_bin + ' (binary data)' )\nprint( ' ' + output_f90 + ' (fortran code)' )\nprint( ' ' + output_c + ' (c code)' )\nprint( ' ' + formats_file + ' (formats)\\n' )\n\n" ]
[ [ "pandas.read_csv", "numpy.issubdtype", "numpy.rec.fromarrays", "pandas.read_sas", "pandas.read_stata", "pandas.to_numeric" ] ]
covid-models/ventilator-supply-demand
[ "2fe52854833af0d074d942f6352eaea0b9612ce0" ]
[ "model.py" ]
[ "import math\r\nimport pandas as pd\r\nimport numpy as np\r\nimport scipy.integrate\r\nfrom datetime import timedelta\r\nimport shared\r\n# import world_data\r\n# import population\r\n\r\ndef model(Y, x, N, beta0, days0, beta1, gamma, sigma):\r\n # :param array x: Time step (days)\r\n # :param int N: Population\r\n # :param float beta: The parameter controlling how often a susceptible-infected contact results in a new infection.\r\n # :param float gamma: The rate an infected recovers and moves into the resistant phase.\r\n # :param float sigma: The rate at which an exposed person becomes infective.\r\n\r\n S, E, I, R = Y\r\n\r\n beta = beta0 if x < days0 else beta1\r\n\r\n dS = - beta * S * I / N\r\n dE = beta * S * I / N - sigma * E\r\n dI = sigma * E - gamma * I\r\n dR = gamma * I\r\n return dS, dE, dI, dR\r\n\r\n\r\ndef solve(model, population, E0, beta0, days0, beta1, gamma, sigma, days_total):\r\n X = np.arange(days_total) # time steps list\r\n N0 = population - E0, E0, 0, 0 # S, E, I, R at initial step\r\n\r\n y_data_var = scipy.integrate.odeint(model, N0, X, args=(population, beta0, days0, beta1, gamma, sigma))\r\n\r\n S, E, I, R = y_data_var.T # transpose and unpack\r\n return X, S, E, I, R # note these are all arrays\r\n\r\n\r\ndef run_SEIR(population, date_of_first_infection, date_of_lockdown,\r\n intensive_units, mean_days_icu,\r\n vents_units_start, vents_units_sh1, vents_date_sh1,):\r\n\r\n # --- external parameters ---\r\n days_total = 365 # total days to model\r\n # dataOffset = 'auto' # position of real world data relative to model in whole days.\r\n # 'auto' will choose optimal offset based on matching of deaths curves\r\n\r\n E0 = 1 # number of exposed people at initial time step\r\n r0 = 3.0 # https://en.wikipedia.org/wiki/Basic_reproduction_number\r\n r1 = 1.1 # reproduction number after quarantine measures - https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3539694\r\n\r\n # --- derived parameters ---\r\n days_before_lockdown = (date_of_lockdown - date_of_first_infection).days\r\n\r\n # almost half infections take place before symptom onset (Drosten)\r\n # https://www.medrxiv.org/content/10.1101/2020.03.08.20032946v1.full.pdf\r\n days_presymptomatic = 2.5\r\n days_to_incubation = 5.2\r\n\r\n # sigma: The rate at which an exposed person becomes infective. symptom onset - presympomatic\r\n sigma = 1.0 / (days_to_incubation - days_presymptomatic)\r\n\r\n # for SEIR: generation_time = 1/sigma + 0.5 * 1/gamma = timeFromInfectionToInfectiousness + timeInfectious https://en.wikipedia.org/wiki/Serial_interval\r\n generation_time = 4.6 # https://www.medrxiv.org/content/10.1101/2020.03.05.20031815v1 http://www.cidrap.umn.edu/news-perspective/2020/03/short-time-between-serial-covid-19-cases-may-hinder-containment\r\n\r\n # gamma: The rate an infectious person recovers and moves into the resistant phase.\r\n # Note that for the model it only means he does not infect anybody any more.\r\n gamma = 1.0 / (2.0 * (generation_time - 1.0 / sigma))\r\n\r\n percent_asymptomatic = 0.35 # https://www.zmescience.com/medicine/iceland-testing-covid-19-0523/ but virus can already be found in throat 2.5 days before symptoms (Drosten)\r\n # wild guess! italy:16? germany:4 south korea: 4? a lot of the mild cases will go undetected assuming 100% correct tests\r\n percent_cases_detected = (1.0 - percent_asymptomatic) / 20.0\r\n\r\n days_in_hospital = 12\r\n days_infectious = 1.0 / gamma # better days_infectious?\r\n\r\n # lag in whole days - need sources\r\n presymptomatic_lag = round(days_presymptomatic)\r\n communication_lag = 2\r\n test_lag = 3\r\n symptom_to_hospital_lag = 5\r\n hospital_to_icu_lag = 5\r\n\r\n infectionFatalityRateA = 0.01 # Diamond Princess, age corrected\r\n infectionFatalityRateB = infectionFatalityRateA * 3.0 # higher lethality without ICU - by how much? even higher without oxygen and meds\r\n\r\n # icu_rate and vent_rate based on figures from Wuhan study https://www.thelancet.com/journals/lanres/article/PIIS2213-2600(20)30110-7/fulltext\r\n icu_rate = (52 / 710)\r\n vent_rate = (22 / 56)\r\n # icuRate = infectionFatalityRateA * 2 # Imperial College NPI study: hospitalized/ICU/fatal = 6/2/1\r\n\r\n beta0 = r0 * gamma # The parameter controlling how often a susceptible-infected contact results in a new infection.\r\n beta1 = r1 * gamma # beta0 is used during days0 phase, beta1 after days0\r\n\r\n s1 = 0.5 * (-(sigma + gamma) + math.sqrt((sigma + gamma) ** 2 + 4 * sigma * gamma * (\r\n r0 - 1))) # https://hal.archives-ouvertes.fr/hal-00657584/document page 13\r\n # doublingTime = (math.log(2.0, math.e) / s1)\r\n\r\n X, S, E, I, R = solve(model, population, E0, beta0, days_before_lockdown, beta1, gamma, sigma, days_total)\r\n\r\n demand_dict = {'days': X,\r\n 'susceptible': S,\r\n 'exposed': E,\r\n 'infectious': I,\r\n 'recovered': R,\r\n }\r\n\r\n df = pd.DataFrame(demand_dict)\r\n\r\n # Feature engineering\r\n df['date'] = df['days'].apply(lambda x: date_of_first_infection + timedelta(days=x))\r\n df['vents'] = df.apply(lambda x: vents_units_start if x['date'] < vents_date_sh1 else (vents_units_start + vents_units_sh1), axis=1)\r\n df = df.applymap(lambda x: round(x) if isinstance(x, float) else x)\r\n\r\n # Compute time series of patients who require intensive-care unit\r\n df['needs_icu'] = df.apply(lambda x: round(x['infectious'] * icu_rate), axis=1)\r\n # Number of patients who need an icu at any given time is a rolling function of those who needed it over the last x days\r\n df['needs_icu'] = df['needs_icu'].shift(10).rolling(window=mean_days_icu, win_type='gaussian').sum(std=3)\r\n df.fillna(0, inplace=True)\r\n\r\n # Compute time series of patients who require mechanical ventilation\r\n df['needs_ventilator'] = df.apply(lambda x: round(x['needs_icu'] * vent_rate), axis=1)\r\n df['needs_ventilator'] = df['needs_ventilator'].shift(3).rolling(window=3, win_type='gaussian').sum(std=3)\r\n df.drop(columns=['exposed'], inplace=True)\r\n\r\n\r\n\r\n # # derived arrays\r\n # F = I * percent_cases_detected\r\n # needs_icu = I * icuRate * days_in_hospital / days_infectious # scale for short infectious time vs. real time in hospital\r\n # # P = I / population * 1_000_000 # probability of random person to be infected\r\n #\r\n # # timeline: exposed, infectious, symptoms, at home, hospital, ICU\r\n # F = shared.delay(F,\r\n # days_presymptomatic + symptom_to_hospital_lag + test_lag + communication_lag) # found in tests and officially announced; from I\r\n # U = shared.delay(needs_icu, days_presymptomatic + symptom_to_hospital_lag + hospital_to_icu_lag) # ICU from I before delay\r\n # U = shared.delay(U, round(\r\n # (days_in_hospital / days_infectious - 1) * days_infectious)) # ??? delay by scaling? todo: think this through\r\n #\r\n # # cumulate found --> cases\r\n # # FC = np.cumsum(F)\r\n #\r\n # # estimate deaths from recovered\r\n # D = np.zeros(days_total)\r\n # RPrev = 0\r\n # DPrev = 0\r\n # for i, x in enumerate(X):\r\n # IFR = infectionFatalityRateA if U[i] <= intensive_units else infectionFatalityRateB\r\n # D[i] = DPrev + IFR * (R[i] - RPrev)\r\n # RPrev = R[i]\r\n # DPrev = D[i]\r\n #\r\n # D = shared.delay(D,\r\n # - days_infectious + days_presymptomatic\r\n # + symptom_to_hospital_lag + days_in_hospital\r\n # + communication_lag) # deaths from R\r\n\r\n\r\n\r\n\r\n\r\n line_plot_data = df.melt(id_vars=['date'],\r\n value_vars=['infectious', 'needs_icu', 'vents', 'needs_ventilator'],\r\n value_name='count',\r\n var_name='type')\r\n\r\n return line_plot_data\r\n" ]
[ [ "numpy.arange", "pandas.DataFrame" ] ]
Marticles/ml-in-action
[ "7b8a13fdd73a210ee4338dce400bd764eb9abf75" ]
[ "LogisticRegression/lr.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 《机器学习实战》 - 第5章 - Logistic回归\n\n# 示例1:采用梯度上升法找到Logistic回归分类器的最佳回归系数\n\ndef loadDataSet():\n \"\"\"\n 读取数据集\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('TestSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n # X0设为1.0\n dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat,labelMat\n\ndef sigmoid(z):\n \"\"\"\n sigmoid函数\n \"\"\"\n return 1.0 / (1 + np.exp(-z))\n\ndef gradAscent(dataMatIn, classLabels):\n \"\"\"\n 梯度上升法\n \"\"\"\n dataMatrix = np.mat(dataMatIn)\n # 转置为列向量\n labelMat = np.mat(classLabels).transpose()\n m, n = np.shape(dataMatrix)\n alpha = 0.001 # 学习率\n maxCycles = 500 # 迭代次数\n weights = np.ones((n,1)) # 权重\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = labelMat - h\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n\n# 测试结果\n# dataArr, labelMat = loadDataSet()\n# print(gradAscent(dataArr,labelMat))\n\ndef stoGradAscent0(dataMatrix, classLabels):\n \"\"\"\n 随机梯度下降法\n \"\"\"\n dataMatrix = np.array(dataMatrix)\n m, n = np.shape(dataMatrix)\n alpha = 0.001\n weights = np.ones(n)\n for i in range(m):\n h = sigmoid(sum(dataMatrix[i] * weights))\n error = classLabels[i] - h\n weights = weights + alpha * dataMatrix[i]* error\n return weights\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter = 150):\n \"\"\"\n 改进的随机梯度下降法\n \"\"\"\n dataMatrix = np.array(dataMatrix)\n m, n = np.shape(dataMatrix)\n weights = np.ones(n)\n # i和j的不断增大令学习率不断减少,但是不为0\n for j in range(numIter):\n dataIndex = list(range(m))\n for i in range(m):\n alpha = 4/(1.0+j+i)+0.0001\n # 随机抽取样本\n randIndex = int(np.random.uniform(0,len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex]*weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n del(dataIndex[randIndex])\n return weights\n\ndef plotBestFit(wei):\n \"\"\"\n 对结果进行可视化\n \"\"\"\n # weights = wei.getA() # getA()方法将numpy矩阵转为数组\n weights = wei\n dataMat, labelMat = loadDataSet()\n dataArr = np.array(dataMat)\n n = np.shape(dataArr)[0]\n xcord1 = []\n ycord1 = []\n xcord2 = []\n ycord2 = []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i,1])\n ycord1.append(dataArr[i,2])\n else:\n xcord2.append(dataArr[i,1])\n ycord2.append(dataArr[i,2])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xcord1, ycord1, s = 30, marker = 's')\n ax.scatter(xcord2, ycord2, s = 30,)\n x = np.arange(-3.0, 3.0, 0.1)\n y = (-weights[0] - weights[1] * x) / weights[2]\n ax.plot(x, y, c = 'red')\n plt.xlabel(('X1'))\n plt.ylabel(('Y1'))\n plt.show()\n\n# 可视化\n# dataArr, labelMat = loadDataSet()\n# plotBestFit(stocGradAscent1(dataArr,labelMat))\n\n# 示例2: 从疝气病症预测病马的死亡率\n\ndef classifyVector(inX, weights):\n \"\"\"\n sigmoid分类器\n 根据权重与特征来计算sigmoid的值,大于0.5返回1,否则返回0\n \"\"\"\n prob = sigmoid(sum(inX*weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\ndef colicTest():\n \"\"\"\n 在疝气病马数据集中测试分类效果\n \"\"\"\n frTrain = open('HorseColicTraining.txt')\n frTest = open('HorseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n # 在训练集上训练\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(np.array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n # 在测试集上进行测试\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(np.array(lineArr), trainWeights)) != int(currLine[21]):\n errorCount += 1\n errorRate = (float(errorCount) / numTestVec)\n print(\"the error rate of this test is: %f\" % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"\n 调用colicTest()10次并求结果的平均值\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print(\"after %d iterations the average error rate is: %f\" % (numTests, errorSum/float(numTests)))\n\n# 测试结果\n# multiTest()" ]
[ [ "numpy.arange", "numpy.ones", "matplotlib.pyplot.ylabel", "numpy.shape", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.mat", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
JiaqiLiZju/NvTK
[ "6b887670a03d63c1747d9854ecbbac13cc06461c" ]
[ "NvTK/Explainer/MotifVisualize.py" ]
[ "'''Motif Visualization in NvTK.\n\nCurrently, this module only support DNA MOTIF Visualization.\n\nProtein Sequence Motif Visualization was under development.\n'''\n\n# Modified motif visualization functions from DeepOmic\n# [email protected]\n\n__all__ = [\"filter_heatmap\", \"plot_filter_heatmap\", \"plot_filter_logos\", \"seq_logo\", \"plot_seq_logo\"]\n\nimport os, logging\nimport numpy as np\nimport pandas as pd\n\nfrom PIL import Image\n\nimport matplotlib\nimport matplotlib.image as mpimg\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom .Motif import normalize_pwm\n\n# def imresize(arr, shape):\n# return np.array(Image.fromarray(arr).resize((shape[1], shape[0])))\n\ndef filter_heatmap(pwm, output_fname=None, save=False, fig_size=(10, 7), \n norm=True, cmap='hot_r', cbar_norm=True):\n \"\"\"visualize pwm represented Filter in heatmap.\"\"\"\n pwm_dim, pwm_len = pwm.shape\n\n plt.figure(figsize=fig_size)\n if norm:\n norm = matplotlib.colors.Normalize(vmin=0, vmax=1)\n else:\n norm = None\n cmap_reversed = matplotlib.cm.get_cmap(cmap)\n im = plt.imshow(pwm, cmap=cmap_reversed, norm=norm, aspect=\"auto\")\n\n #plt.axis('off')\n ax = plt.gca()\n ax.set_xticks(np.arange(-.5, pwm_len, 1.), minor=True)\n ax.set_yticks(np.arange(-.5, pwm_dim, 1.), minor=True)\n ax.grid(which='minor', color='k', linestyle='-', linewidth=2)\n plt.xticks(list(range(pwm_len)))\n if pwm.shape[0] == 4:\n plt.yticks([0, 1, 2, 3], ['A', 'C', 'G', 'T'], fontsize=16)\n else:\n plt.yticks(list(range(pwm_dim)), list(range(pwm_dim)), fontsize=16)\n\n #cbar = plt.colorbar()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.2)\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=16)\n if cbar_norm:\n cbar.set_ticks([0.0, 0.5, 1.0])\n\n if save:\n plt.savefig(output_fname, format=\"pdf\")\n plt.show()\n plt.close()\n\n\ndef plot_filter_heatmap(W, factor=5, fig_size=(10,7), save=True):\n \"\"\"visualize pwm represented Filter in heatmap.\"\"\"\n for idx, pwm in enumerate(W):\n output_fname = \"Motif_\" + str(idx) +\".pdf\"\n pwm = normalize_pwm(pwm, factor=factor)\n filter_heatmap(pwm, output_fname=output_fname, save=save, fig_size=fig_size)\n\n\ndef plot_filter_logos(W, figsize=(10,7), height=25, nt_width=10, norm=0, alphabet='dna', norm_factor=3, num_rows=None):\n \"\"\"visualize pwm represented filter Logo.\"\"\"\n # W = np.squeeze(W.transpose([3, 2, 0, 1]))\n num_filters = W.shape[0]\n if not num_rows:\n num_rows = int(np.ceil(np.sqrt(num_filters)))\n num_cols = num_rows\n else:\n num_cols = int(np.ceil(num_filters//num_rows))\n grid = matplotlib.gridspec.GridSpec(num_rows, num_cols)\n grid.update(wspace=0.2, hspace=0.2, left=0.1, right=0.2, bottom=0.1, top=0.2)\n fig = plt.figure(figsize=figsize);\n if norm:\n MAX = np.max(W)\n else:\n MAX = None\n\n for i in range(num_filters):\n plt.subplot(grid[i]);\n if norm_factor:\n W_norm = normalize_pwm(W[i], factor=norm_factor, max=MAX)\n else:\n W_norm = W[i]\n logo = seq_logo(W_norm, height=height, nt_width=nt_width, norm=0, alphabet=alphabet)\n plot_seq_logo(logo, nt_width=nt_width, step_multiple=None)\n #if np.mod(i, num_rows) != 0:\n plt.yticks([])\n return fig\n\n\n# help functions\ndef plot_seq_logo(logo, nt_width=None, step_multiple=None):\n \"\"\"visualize sequence logo.\"\"\"\n plt.imshow(logo, interpolation='none')\n if nt_width:\n num_nt = logo.shape[1]/nt_width\n if step_multiple:\n step_size = int(num_nt/(step_multiple+1))\n nt_range = range(step_size, step_size*step_multiple)\n plt.xticks([step_size*nt_width, step_size*2*nt_width, step_size*3*nt_width, step_size*4*nt_width],\n [str(step_size), str(step_size*2), str(step_size*3), str(step_size*4)])\n else:\n plt.xticks([])\n plt.yticks([0, 50], ['2.0','0.0'])\n ax = plt.gca()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n else:\n plt.imshow(logo, interpolation='none')\n plt.axis('off');\n\n\ndef load_alphabet(char_path, alphabet, colormap='standard'):\n\n def load_char(char_path, char, color):\n colors = {}\n colors['green'] = [10, 151, 21]\n colors['red'] = [204, 0, 0]\n colors['orange'] = [255, 153, 51]\n colors['blue'] = [0, 0, 204]\n colors['cyan'] = [153, 204, 255]\n colors['purple'] = [178, 102, 255]\n colors['grey'] = [160, 160, 160]\n colors['black'] = [0, 0, 0]\n\n img = mpimg.imread(os.path.join(char_path, char+'.eps'))\n img = np.mean(img, axis=2)\n x_index, y_index = np.where(img != 255)\n y = np.ones((img.shape[0], img.shape[1], 3))*255\n for i in range(3):\n y[x_index, y_index, i] = colors[color][i]\n return y.astype(np.uint8)\n\n\n colors = ['green', 'blue', 'orange', 'red']\n if alphabet == 'dna':\n letters = 'ACGT'\n if colormap == 'standard':\n colors = ['green', 'blue', 'orange', 'red']\n chars = []\n for i, char in enumerate(letters):\n chars.append(load_char(char_path, char, colors[i]))\n\n elif alphabet == 'rna':\n letters = 'ACGU'\n if colormap == 'standard':\n colors = ['green', 'blue', 'orange', 'red']\n chars = []\n for i, char in enumerate(letters):\n chars.append(load_char(char_path, char, colors[i]))\n\n\n elif alphabet == 'structure': # structural profile\n\n letters = 'PHIME'\n if colormap == 'standard':\n colors = ['blue', 'green', 'orange', 'red', 'cyan']\n chars = []\n for i, char in enumerate(letters):\n chars.append(load_char(char_path, char, colors[i]))\n\n elif alphabet == 'pu': # structural profile\n\n letters = 'PU'\n if colormap == 'standard':\n colors = ['cyan', 'purple']\n elif colormap == 'bw':\n colors = ['black', 'grey']\n chars = []\n for i, char in enumerate(letters):\n chars.append(load_char(char_path, char, colors[i]))\n\n return chars\n\n\ndef seq_logo(pwm, height=30, nt_width=10, norm=0, alphabet='dna', colormap='standard'):\n \"\"\"generate sequence logo from pwm.\"\"\"\n def get_nt_height(pwm, height, norm):\n\n def entropy(p):\n s = 0\n for i in range(len(p)):\n if p[i] > 0:\n s -= p[i]*np.log2(p[i])\n return s\n\n num_nt, num_seq = pwm.shape\n heights = np.zeros((num_nt,num_seq));\n for i in range(num_seq):\n if norm == 1:\n total_height = height\n else:\n total_height = (np.log2(num_nt) - entropy(pwm[:, i]))*height;\n if alphabet == 'pu':\n heights[:,i] = np.floor(pwm[:,i]*np.minimum(total_height, height));\n else:\n heights[:,i] = np.floor(pwm[:,i]*np.minimum(total_height, height*2));\n\n return heights.astype(int)\n\n\n # get the alphabet images of each nucleotide\n package_directory = os.path.dirname(os.path.abspath(__file__))\n char_path = os.path.join(package_directory,'chars')\n chars = load_alphabet(char_path, alphabet, colormap)\n\n # get the heights of each nucleotide\n heights = get_nt_height(pwm, height, norm)\n\n # resize nucleotide images for each base of sequence and stack\n num_nt, num_seq = pwm.shape\n width = np.ceil(nt_width*num_seq).astype(int)\n\n if alphabet == 'pu':\n max_height = height\n else:\n max_height = height*2\n #total_height = np.sum(heights,axis=0) # np.minimum(np.sum(heights,axis=0), max_height)\n logo = np.ones((max_height, width, 3)).astype(int)*255;\n for i in range(num_seq):\n nt_height = np.sort(heights[:,i]);\n index = np.argsort(heights[:,i])\n remaining_height = np.sum(heights[:,i]);\n offset = max_height-remaining_height\n\n for j in range(num_nt):\n if nt_height[j] > 0:\n # resized dimensions of image\n nt_img = np.array(Image.fromarray(chars[index[j]]).resize((nt_width, nt_height[j])))\n # nt_img = imresize(chars[index[j]], (nt_height[j], nt_width))\n # determine location of image\n height_range = range(remaining_height-nt_height[j], remaining_height)\n width_range = range(i*nt_width, i*nt_width+nt_width)\n\n # 'annoying' way to broadcast resized nucleotide image\n if height_range:\n for k in range(3):\n for m in range(len(width_range)):\n logo[height_range+offset, width_range[m],k] = nt_img[:,m,k];\n\n remaining_height -= nt_height[j]\n\n return logo.astype(np.uint8)\n\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.minimum", "numpy.sqrt", "numpy.max", "numpy.mean", "numpy.where", "matplotlib.pyplot.gca", "numpy.arange", "numpy.ceil", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.argsort", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.xticks", "numpy.log2", "matplotlib.colors.Normalize", "numpy.sort", "numpy.ones", "matplotlib.pyplot.colorbar", "matplotlib.cm.get_cmap", "matplotlib.pyplot.yticks" ] ]
dojinkimm/Object_Detection_Video_AllInOne
[ "ef2e3ca1ad5e731db43c7786a12f9f5ab42d52e3" ]
[ "p_utils/utils.py" ]
[ "from __future__ import division\nimport tqdm\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport cv2\n\n\ndef to_cpu(tensor):\n return tensor.detach().cpu()\n\n\ndef prep_image(img, inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network.\n\n Returns a Variable\n \"\"\"\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = (letterbox_image(orig_im, (inp_dim, inp_dim)))\n img_ = img[:, :, ::-1].transpose((2, 0, 1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\n\ndef letterbox_image(img, inp_dim):\n # resize image with unchanged aspect ratio using padding\n img_w, img_h = img.shape[1], img.shape[0]\n w, h = inp_dim\n new_w = int(img_w * min(w / img_w, h / img_h))\n new_h = int(img_h * min(w / img_w, h / img_h))\n resized_image = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)\n\n canvas = np.full((inp_dim[1], inp_dim[0], 3), 128)\n\n canvas[(h - new_h) // 2:(h - new_h) // 2 + new_h, (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = resized_image\n\n return canvas\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\ndef rescale_boxes(boxes, current_dim, original_shape):\n \"\"\" Rescales bounding boxes to the original shape \"\"\"\n orig_h, orig_w = original_shape\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes\n\n\ndef xywh2xyxy(x):\n y = x.new(x.shape)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (list).\n conf: Objectness value from 0-1 (list).\n pred_cls: Predicted object classes (list).\n target_cls: True object classes (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n ap, p, r = [], [], []\n for c in tqdm.tqdm(unique_classes, desc=\"Computing AP\"):\n i = pred_cls == c\n n_gt = (target_cls == c).sum() # Number of ground truth objects\n n_p = i.sum() # Number of predicted objects\n\n if n_p == 0 and n_gt == 0:\n continue\n elif n_p == 0 or n_gt == 0:\n ap.append(0)\n r.append(0)\n p.append(0)\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum()\n tpc = (tp[i]).cumsum()\n\n # Recall\n recall_curve = tpc / (n_gt + 1e-16)\n r.append(recall_curve[-1])\n\n # Precision\n precision_curve = tpc / (tpc + fpc)\n p.append(precision_curve[-1])\n\n # AP from recall-precision curve\n ap.append(compute_ap(recall_curve, precision_curve))\n\n # Compute F1 score (harmonic mean of precision and recall)\n p, r, ap = np.array(p), np.array(r), np.array(ap)\n f1 = 2 * p * r / (p + r + 1e-16)\n\n return p, r, ap, f1, unique_classes.astype(\"int32\")\n\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.0], recall, [1.0]))\n mpre = np.concatenate(([0.0], precision, [0.0]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef get_batch_statistics(outputs, targets, iou_threshold):\n \"\"\" Compute true positives, predicted scores and predicted labels per sample \"\"\"\n batch_metrics = []\n for sample_i in range(len(outputs)):\n\n if outputs[sample_i] is None:\n continue\n\n output = outputs[sample_i]\n pred_boxes = output[:, :4]\n pred_scores = output[:, 4]\n pred_labels = output[:, -1]\n\n true_positives = np.zeros(pred_boxes.shape[0])\n\n annotations = targets[targets[:, 0] == sample_i][:, 1:]\n target_labels = annotations[:, 0] if len(annotations) else []\n if len(annotations):\n detected_boxes = []\n target_boxes = annotations[:, 1:]\n\n for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):\n\n # If targets are found break\n if len(detected_boxes) == len(annotations):\n break\n\n # Ignore if label is not one of the target labels\n if pred_label not in target_labels:\n continue\n\n iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0)\n if iou >= iou_threshold and box_index not in detected_boxes:\n true_positives[pred_i] = 1\n detected_boxes += [box_index]\n batch_metrics.append([true_positives, pred_scores, pred_labels])\n return batch_metrics\n\n\ndef bbox_wh_iou(wh1, wh2):\n wh2 = wh2.t()\n w1, h1 = wh1[0], wh1[1]\n w2, h2 = wh2[0], wh2[1]\n inter_area = torch.min(w1, w2) * torch.min(h1, h2)\n union_area = (w1 * h1 + 1e-16) + w2 * h2 - inter_area\n return inter_area / union_area\n\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n \"\"\"\n Returns the IoU of two bounding boxes\n \"\"\"\n if not x1y1x2y2:\n # Transform from center and width to exact coordinates\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\n else:\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\n\n # get the corrdinates of the intersection rectangle\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n # Intersection area\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(\n inter_rect_y2 - inter_rect_y1 + 1, min=0\n )\n # Union Area\n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)\n\n iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)\n\n return iou\n\n\ndef non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n \"\"\"\n Removes detections with lower object confidence score than 'conf_thres' and performs\n Non-Maximum Suppression to further filter detections.\n Returns detections with shape:\n (x1, y1, x2, y2, object_conf, class_score, class_pred)\n \"\"\"\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = xywh2xyxy(prediction[..., :4])\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)\n detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n\n return output\n\n\ndef build_targets(pred_boxes, pred_cls, target, anchors, ignore_thres):\n\n ByteTensor = torch.cuda.ByteTensor if pred_boxes.is_cuda else torch.ByteTensor\n FloatTensor = torch.cuda.FloatTensor if pred_boxes.is_cuda else torch.FloatTensor\n\n nB = pred_boxes.size(0)\n nA = pred_boxes.size(1)\n nC = pred_cls.size(-1)\n nG = pred_boxes.size(2)\n\n # Output tensors\n obj_mask = ByteTensor(nB, nA, nG, nG).fill_(0)\n noobj_mask = ByteTensor(nB, nA, nG, nG).fill_(1)\n class_mask = FloatTensor(nB, nA, nG, nG).fill_(0)\n iou_scores = FloatTensor(nB, nA, nG, nG).fill_(0)\n tx = FloatTensor(nB, nA, nG, nG).fill_(0)\n ty = FloatTensor(nB, nA, nG, nG).fill_(0)\n tw = FloatTensor(nB, nA, nG, nG).fill_(0)\n th = FloatTensor(nB, nA, nG, nG).fill_(0)\n tcls = FloatTensor(nB, nA, nG, nG, nC).fill_(0)\n\n # Convert to position relative to box\n target_boxes = target[:, 2:6] * nG\n gxy = target_boxes[:, :2]\n gwh = target_boxes[:, 2:]\n # Get anchors with best iou\n ious = torch.stack([bbox_wh_iou(anchor, gwh) for anchor in anchors])\n best_ious, best_n = ious.max(0)\n # Separate target values\n b, target_labels = target[:, :2].long().t()\n gx, gy = gxy.t()\n gw, gh = gwh.t()\n gi, gj = gxy.long().t()\n # Set masks\n obj_mask[b, best_n, gj, gi] = 1\n noobj_mask[b, best_n, gj, gi] = 0\n\n # Set noobj mask to zero where iou exceeds ignore threshold\n for i, anchor_ious in enumerate(ious.t()):\n noobj_mask[b[i], anchor_ious > ignore_thres, gj[i], gi[i]] = 0\n\n # Coordinates\n tx[b, best_n, gj, gi] = gx - gx.floor()\n ty[b, best_n, gj, gi] = gy - gy.floor()\n # Width and height\n tw[b, best_n, gj, gi] = torch.log(gw / anchors[best_n][:, 0] + 1e-16)\n th[b, best_n, gj, gi] = torch.log(gh / anchors[best_n][:, 1] + 1e-16)\n # One-hot encoding of label\n tcls[b, best_n, gj, gi, target_labels] = 1\n # Compute label correctness and iou at best anchor\n class_mask[b, best_n, gj, gi] = (pred_cls[b, best_n, gj, gi].argmax(-1) == target_labels).float()\n iou_scores[b, best_n, gj, gi] = bbox_iou(pred_boxes[b, best_n, gj, gi], target_boxes, x1y1x2y2=False)\n\n tconf = obj_mask.float()\n return iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf\n" ]
[ [ "numpy.maximum", "torch.max", "numpy.unique", "torch.nn.init.constant_", "torch.min", "torch.from_numpy", "numpy.full", "numpy.concatenate", "torch.log", "torch.nn.init.normal_", "torch.stack", "numpy.argsort", "torch.clamp", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
seasker/current
[ "f040cec106e9758d4c3a04a1e9b0a4e384b3c7b4" ]
[ "src/train_yanan.py" ]
[ "import cv2\r\nimport sys\r\nimport time\r\nimport imageio\r\n\r\nimport tensorflow as tf\r\nimport scipy.misc as sm\r\nimport numpy as np\r\nimport scipy.io as sio\r\nimport math\r\n\r\nfrom mcnet import MCNET\r\nfrom utils import *\r\nfrom os import listdir, makedirs, system\r\nfrom os.path import exists\r\nfrom argparse import ArgumentParser\r\nfrom joblib import Parallel, delayed\r\n'''\r\nby seasker\r\nlr:学习率\r\nbatch_size:\r\n\r\n'''\r\ndef main(lr, batch_size, alpha, beta, image_size, K,\r\n T, num_iter, gpu):\r\n data_path = \"../data/traffic_data/\"\r\n data=load_data_from_days_mat('/home/seasker/CNN-master/dataset/Traffic-data-mat/trafficDayFSOdata.mat','trafficDayFSOdata')\r\n data_meta=compute_data_meta(data,(0,1,2))\r\n trans_data=nomalize(data,data_meta,'maxmin')\r\n train_num=(data.shape[0]-K-T+1)//10*9\r\n margin = 0.3 \r\n updateD = True\r\n updateG = True\r\n iters = 0\r\n prefix = (\"traffic_MCNET\"\r\n + \"_image_size=\"+str(image_size[0])\r\n +\"x\"+str(image_size[1])\r\n + \"_K=\"+str(K)\r\n + \"_T=\"+str(T)\r\n + \"_batch_size=\"+str(batch_size)\r\n + \"_alpha=\"+str(alpha)\r\n + \"_beta=\"+str(beta)\r\n + \"_lr=\"+str(lr))\r\n\r\n print(\"\\n\"+prefix+\"\\n\")\r\n checkpoint_dir = \"../models/\"+prefix+\"/\"\r\n samples_dir = \"../samples/\"+prefix+\"/\"\r\n summary_dir = \"../logs/\"+prefix+\"/\"\r\n\r\n if not exists(checkpoint_dir):\r\n makedirs(checkpoint_dir)\r\n if not exists(samples_dir):\r\n makedirs(samples_dir)\r\n if not exists(summary_dir):\r\n makedirs(summary_dir)\r\n\r\n with tf.device(\"/gpu:%d\"%gpu):\r\n model = MCNET(image_size=[image_size[0],image_size[1]], c_dim=3,\r\n K=K, batch_size=batch_size, T=T,\r\n checkpoint_dir=checkpoint_dir)\r\n d_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(\r\n model.d_loss, var_list=model.d_vars\r\n )\r\n g_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(\r\n alpha*model.L_img+beta*model.L_GAN, var_list=model.g_vars\r\n )\r\n\r\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\r\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=False\r\n #gpu_options=gpu_options\r\n )) as sess:\r\n\r\n tf.global_variables_initializer().run()\r\n\r\n if model.load(sess, checkpoint_dir):\r\n print(\" [*] Load SUCCESS\")\r\n else:\r\n print(\" [!] Load failed...\")\r\n\r\n g_sum = tf.summary.merge([model.L_p_sum,\r\n model.L_gdl_sum, model.loss_sum,\r\n model.L_GAN_sum])\r\n d_sum = tf.summary.merge([model.d_loss_real_sum, model.d_loss_sum,\r\n model.d_loss_fake_sum])\r\n writer = tf.summary.FileWriter(summary_dir, sess.graph)\r\n\r\n counter = iters+1\r\n start_time = time.time()\r\n\r\n with Parallel(n_jobs=4) as parallel:\r\n while iters < 100:\r\n mini_batches = get_minibatches_idx(train_num, batch_size, shuffle=False)\r\n for _, batchidx in mini_batches:\r\n if len(batchidx) == batch_size:\r\n seq_batch = np.zeros((batch_size, image_size[0], image_size[1],\r\n K+T, 3), dtype=\"float32\")\r\n diff_batch = np.zeros((batch_size, image_size[0], image_size[1],\r\n K-1, 3), dtype=\"float32\")\r\n t0 = time.time()\r\n Ts = np.repeat(np.array([T]),batch_size,axis=0)\r\n Ks = np.repeat(np.array([K]),batch_size,axis=0)\r\n \r\n shapes = np.repeat(np.array([image_size]),batch_size,axis=0)\r\n #f: txt file, p\r\n output = parallel(delayed(get_sample_data)(trans_data,i, k, t)\r\n for i,k,t in zip(batchidx,Ks, Ts))\r\n print(type(output[0][1]))\r\n for i in range(batch_size): \r\n \r\n seq_batch[i] = output[i][0]\r\n diff_batch[i] = output[i][1]\r\n\r\n if updateD:\r\n _, summary_str = sess.run([d_optim, d_sum],\r\n feed_dict={model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n writer.add_summary(summary_str, counter)\r\n\r\n if updateG:\r\n _, summary_str = sess.run([g_optim, g_sum],\r\n feed_dict={model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n writer.add_summary(summary_str, counter)\r\n\r\n errD_fake = model.d_loss_fake.eval({model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n errD_real = model.d_loss_real.eval({model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n errG = model.L_GAN.eval({model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n errG_img = model.L_img.eval({model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch}) \r\n\r\n pred=model.G.eval({model.diff_in: diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})\r\n\r\n print('real')\r\n print(seq_batch[0,:,:,K:]) \r\n print('pred')\r\n\r\n print(np.squeeze(pred[0])) \r\n\r\n\r\n print('diff')\r\n print(seq_batch[0,:,:,K]-np.squeeze(pred[0])) \r\n\r\n if errD_fake < margin or errD_real < margin:\r\n updateD = False\r\n if errD_fake > (1.-margin) or errD_real > (1.-margin):\r\n updateG = False\r\n if not updateD and not updateG:\r\n updateD = True\r\n updateG = True\r\n\r\n counter += 1\r\n \r\n print(\r\n \"Iters: [%2d] time: %4.4f, d_loss: %.8f, L_GAN: %.8f,L_img:%.8f\" \r\n % (iters, time.time() - start_time, errD_fake+errD_real,errG,errG_img)\r\n )\r\n\r\n if np.mod(counter, 50) == 1:\r\n batch_pred = sess.run([model.G],\r\n feed_dict={model.diff_in:diff_batch,\r\n model.xt: seq_batch[:,:,:,K-1],\r\n model.target: seq_batch})[0]\r\n batch_pred = batch_pred[0].swapaxes(0,2).swapaxes(1,2)\r\n batch_real = seq_batch[0,:,:,K:].swapaxes(0,2).swapaxes(1,2)\r\n pred=inverse_maxmin_normalize(batch_pred,data_meta)\r\n real=inverse_maxmin_normalize(batch_real,data_meta)\r\n print(pred)\r\n print('')\r\n print(real)\r\n mae,mre,rmse=compute_matrics(pred,real)\r\n print('mae:',mae,' ','mre:',mre,' ','rmse:',rmse)\r\n time.sleep(30)\r\n\r\n\r\n \r\n # samples = np.concatenate((samples,sbatch), axis=0)\r\n # print(\"Saving sample ...\")\r\n # save_images(samples[:,:,:,:], [2, T], \r\n # samples_dir+\"train_%s.png\" % (iters))\r\n if np.mod(counter, 500) == 2:\r\n model.save(sess, checkpoint_dir, counter)\r\n \r\n iters += 1\r\n\r\nif __name__ == \"__main__\":\r\n parser = ArgumentParser()\r\n parser.add_argument(\"--lr\", type=float, dest=\"lr\",\r\n default=0.0001, help=\"Base Learning Rate\")\r\n parser.add_argument(\"--batch_size\", type=int, dest=\"batch_size\",\r\n default=7, help=\"Mini-batch size\")\r\n parser.add_argument(\"--alpha\", type=float, dest=\"alpha\",\r\n default=1.0, help=\"Image loss weight\")\r\n parser.add_argument(\"--beta\", type=float, dest=\"beta\",\r\n default=0.02, help=\"GAN loss weight\")\r\n parser.add_argument(\"--image_size\", type=list, dest=\"image_size\",\r\n default=[35,168], help=\"Mini-batch size\")\r\n parser.add_argument(\"--K\", type=int, dest=\"K\",\r\n default=7, help=\"Number of steps to observe from the past\")\r\n parser.add_argument(\"--T\", type=int, dest=\"T\",\r\n default=1, help=\"Number of steps into the future\")\r\n parser.add_argument(\"--num_iter\", type=int, dest=\"num_iter\",\r\n default=100000, help=\"Number of iterations\")\r\n parser.add_argument(\"--gpu\", type=int, nargs=\"+\", dest=\"gpu\", default=0,\r\n help=\"GPU device id\")\r\n\r\n args = parser.parse_args()\r\n main(**vars(args))\r\n" ]
[ [ "tensorflow.device", "tensorflow.summary.FileWriter", "numpy.squeeze", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.train.AdamOptimizer", "numpy.mod", "numpy.array", "numpy.zeros", "tensorflow.summary.merge" ] ]
XingyuGuUCSD/deep-speaker
[ "4f16612af09f414eaabb39eef352af9b14f8a1c8" ]
[ "models_train.py" ]
[ "import logging\nfrom time import time\n\nimport numpy as np\nimport sys\n\nimport constants as c\nfrom librispeech_wav_reader import read_librispeech_structure\nfrom models import convolutional_model\nfrom next_batch import stochastic_mini_batch\nfrom triplet_loss import deep_speaker_loss\nfrom utils import get_last_checkpoint_if_any, create_dir_and_delete_content\n\n\ndef main(libri_dir=c.DATASET_DIR):\n logging.info('Looking for audio [wav] files in {}.'.format(libri_dir))\n libri = read_librispeech_structure(libri_dir)\n\n if len(libri) == 0:\n logging.warning('Have you converted flac files to wav? If not, run audio/convert_flac_2_wav.sh')\n exit(1)\n\n batch = stochastic_mini_batch(libri, batch_size=c.BATCH_NUM_TRIPLETS)\n batch_size = c.BATCH_NUM_TRIPLETS * 3 # A triplet has 3 parts.\n x, y = batch.to_inputs()\n b = x[0]\n num_frames = b.shape[0]\n logging.info('num_frames = {}'.format(num_frames))\n\n batch_shape = [batch_size * num_frames] + list(b.shape[1:])\n logging.info('batch shape: {}'.format(batch_shape))\n logging.info('batch size: {}'.format(batch_size))\n model = convolutional_model(batch_input_shape=batch_shape,\n batch_size=batch_size, num_frames=num_frames)\n logging.info(model.summary())\n\n logging.info('Compiling the model...')\n model.compile(optimizer='adam', loss=deep_speaker_loss)\n logging.info('[DONE]')\n\n grad_steps = 0\n last_checkpoint = get_last_checkpoint_if_any(c.CHECKPOINT_FOLDER)\n if last_checkpoint is not None:\n logging.info('Found checkpoint [{}]. Resume from here...'.format(last_checkpoint))\n model.load_weights(last_checkpoint)\n grad_steps = int(last_checkpoint.split('_')[-2])\n logging.info('[DONE]')\n\n logging.info('Starting training...')\n orig_time = time()\n\n while True:\n grad_steps += 1\n batch = stochastic_mini_batch(libri, batch_size=c.BATCH_NUM_TRIPLETS)\n x, _ = batch.to_inputs()\n\n # output.shape = (3, 383, 32, 32, 3) something like this\n # explanation = (batch_size, num_frames, width, height, channels)\n logging.info('x.shape before reshape: {}'.format(x.shape))\n x = np.reshape(x, (batch_size * num_frames, b.shape[2], b.shape[2], b.shape[3]))\n logging.info('x.shape after reshape: {}'.format(x.shape))\n\n # we don't need to use the targets y, because we know by the convention that:\n # we have [anchors, positive examples, negative examples]. The loss only uses x and\n # can determine if a sample is an anchor, positive or negative sample.\n stub_targets = np.random.uniform(size=(x.shape[0], 1))\n # result = model.predict(x, batch_size=x.shape[0])\n # logging.info(result.shape)\n # np.set_printoptions(precision=2)\n # logging.info(result[0:20, 0:5])\n\n logging.info('-' * 80)\n logging.info('== Presenting batch #{0}'.format(grad_steps))\n logging.info(batch.libri_batch)\n loss = model.train_on_batch(x, stub_targets)\n logging.info('== Processed in {0:.2f}s by the network, training loss = {1}.'.format(time() - orig_time, loss))\n orig_time = time()\n\n # record training loss\n with open(c.LOSS_FILE, \"a\") as f:\n f.write(\"{0},{1}\\n\".format(grad_steps, loss))\n\n # checkpoints are really heavy so let's just keep the last one.\n create_dir_and_delete_content(c.CHECKPOINT_FOLDER)\n model.save_weights('{0}/model_{1}_{2:.5f}.h5'.format(c.CHECKPOINT_FOLDER, grad_steps, loss))\n\n\n\nif __name__ == '__main__':\n logging.basicConfig(handlers=[logging.StreamHandler(stream=sys.stdout)], level=logging.INFO,\n format='%(asctime)-15s [%(levelname)s] %(filename)s/%(funcName)s | %(message)s')\n main()\n" ]
[ [ "numpy.reshape", "numpy.random.uniform" ] ]
mraabo/Dissertation--Bayesian-Neural-Networks
[ "629b1c5f4bbdb80ef1d1037b4a0a1b7f95ac710b" ]
[ "Python_code/Boston_BNN_1hidden_hiera.py" ]
[ "# # ----------------------------- INFO ---------------------------\n# In this python script we implement and run a BNN for predicting house prices\n# in Boston. The sampler is based on the NUTS sampler\n\n# # ----------------------------- IMPORTS ---------------------------\nimport warnings\nimport tensorflow as tf\nimport seaborn as sns\nimport sys\nimport time\nfrom keras.datasets import boston_housing\nfrom sklearn import metrics\nimport numpy as np\nimport pymc3 as pm\nimport theano\nimport arviz as az\nfrom arviz.utils import Numba\nimport theano.tensor as tt\nNumba.disable_numba()\nNumba.numba_flag\nfloatX = theano.config.floatX\nsns.set_style(\"white\")\n# # ----------------------------- Print versions ---------------------------\n\nprint(\"Running on Python version %s\" % sys.version)\nprint(f\"Running on PyMC3 version{pm.__version__}\")\nprint(\"Running on Theano version %s\" % theano.__version__)\nprint(\"Running on Arviz version %s\" % az.__version__)\nprint(\"Running on Numpy version %s\" % np.__version__)\n\n# Ignore warnings - NUTS provide many runtimeWarning\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\ntf.random.set_seed(42)\n# # ----------------------------- Loading Boston data ---------------------------\n(X_train, y_train), (X_test, y_test) = boston_housing.load_data(seed=3030)\n\n# pad Xs with 1's to add bias\nones_train = np.ones(X_train.shape[0])\nones_test = np.ones(X_test.shape[0])\nX_train = np.insert(X_train, 0, ones_train, axis=1)\nX_test = np.insert(X_test, 0, ones_test, axis=1)\n\n\n# # ----------------------------- Implementing a BNN function ---------------------------\n\ndef construct_bnn(ann_input, ann_output, n_hidden):\n # Initialize random weights between each layer\n init_1 = np.random.randn(X_train.shape[1], n_hidden).astype(floatX)*.1\n init_out = np.random.randn(n_hidden, 1).astype(floatX)*.1\n with pm.Model() as bayesian_neural_network:\n ann_input = pm.Data(\"ann_input\", X_train)\n ann_output = pm.Data(\"ann_output\", y_train)\n\n # prior on hyper parameters for weight 1\n mu1 = pm.Cauchy('mu1', shape=(\n X_train.shape[1], n_hidden), alpha=0, beta=1)\n sigma1 = pm.HalfNormal('sigma1', shape=(\n X_train.shape[1], n_hidden), sigma=1)\n\n # Input -> Layer 1\n weights_1 = pm.Normal('w_1', mu=mu1, sd=sigma1,\n shape=(X_train.shape[1], n_hidden),\n testval=init_1)\n acts_1 = pm.Deterministic(\n 'activations_1', tt.nnet.relu(tt.dot(ann_input, weights_1)))\n\n # prior on hyper parameters for weight_out\n mu_out = pm.Cauchy('mu_out', shape=(n_hidden, 1), alpha=0, beta=1)\n sigma_out = pm.HalfNormal('sigma_out', shape=(n_hidden, 1), sigma=1)\n\n # Layer 1 -> Output Layer\n weights_out = pm.Normal('w_out', mu=mu_out, sd=sigma_out,\n shape=(n_hidden, 1),\n testval=init_out)\n acts_out = pm.Deterministic(\n 'activations_out', tt.dot(acts_1, weights_out))\n\n # Define likelihood\n out = pm.Normal('out', mu=acts_out[:, 0], sd=1, observed=ann_output)\n\n return bayesian_neural_network\n\n\n# # ----------------------------- Sampling from posterior ---------------------------\n# Start time\ntic = time.perf_counter() # for timing\nbayesian_neural_network_NUTS = construct_bnn(X_train, y_train, n_hidden=10)\n\n# Sample from the posterior using the NUTS samplper\nwith bayesian_neural_network_NUTS:\n trace = pm.sample(draws=3000, tune=1000, chains=3, target_accept=.90)\n\n\n# # ----------------------------- Making predictions on training data ---------------------------\nppc1 = pm.sample_posterior_predictive(\n trace, model=bayesian_neural_network_NUTS)\n\n# Taking the mean over all samples to generate a prediction\ny_train_pred = ppc1['out'].mean(axis=0)\n\n\n# Replace shared variables with testing set\npm.set_data(new_data={\"ann_input\": X_test, \"ann_output\": y_test},\n model=bayesian_neural_network_NUTS)\n\n\n# # ----------------------------- Making predictions on test data ---------------------------\nppc2 = pm.sample_posterior_predictive(\n trace, model=bayesian_neural_network_NUTS)\n\n# Taking the mean over all samples to generate a prediction\ny_test_pred = ppc2['out'].mean(axis=0)\n\n# End time\ntoc = time.perf_counter()\nprint(f\"Run time {toc - tic:0.4f} seconds\")\n\n# Printing the performance measures\nprint('MSE (NUTS) on training data:',\n metrics.mean_squared_error(y_train, y_train_pred))\nprint('MSE (NUTS) on test data:', metrics.mean_squared_error(y_test, y_test_pred))\n" ]
[ [ "sklearn.metrics.mean_squared_error", "numpy.ones", "numpy.random.randn", "numpy.insert", "tensorflow.random.set_seed" ] ]
robosyn/TensorFlow2.0-Examples
[ "6b71ba04eae5e12cc0390ec48c95baf6a17d5765" ]
[ "4-Object_Detection/YOLOV3/test_model.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport cv2\nimport os\nimport time\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nfrom tqdm import tqdm\nfrom core.dataset import Dataset\nfrom core.yolov3 import YOLOv3, decode, compute_loss\nfrom core.config import cfg\n\n#print(\"OUTPUT\", output_details)\nimage = cv2.imread(\"/home/rick/Desktop/test.jpg\")\nimage = cv2.resize(image, (416, 416))\n\noryginal_image = image.astype(np.float32)\nimage = oryginal_image/255\nimage = np.reshape(image, (1, 416, 416, 3))\n\ninput_tensor = tf.keras.layers.Input([416, 416, 3])\nconv_tensors = YOLOv3(input_tensor)\n\noutput_tensors = []\nfor i, conv_tensor in enumerate(conv_tensors):\n pred_tensor = decode(conv_tensor, i)\n output_tensors.append(conv_tensor)\n output_tensors.append(pred_tensor)\n\nmodel = tf.keras.Model(input_tensor, output_tensors)\nmodel.load_weights(\"my_yolov3\")\npred_bbox = model.predict(image)\npred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\npred_bbox = tf.concat(pred_bbox, axis=0)\nbboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)\nbboxes = utils.nms(bboxes, 0.45, method='nms')\n\nimage = utils.draw_bbox(original_image, bboxes)\nimage = Image.fromarray(image)\nimage.show()" ]
[ [ "tensorflow.concat", "tensorflow.shape", "numpy.reshape", "tensorflow.keras.Model", "tensorflow.keras.layers.Input" ] ]
mathisme/scikit-learn
[ "8b23a61d87b97ec9445d8b151ce5b2ebc92ce555" ]
[ "sklearn/linear_model/_stochastic_gradient.py" ]
[ "# Authors: Peter Prettenhofer <[email protected]> (main author)\n# Mathieu Blondel (partial_fit support)\n#\n# License: BSD 3 clause\n\"\"\"Classification, regression and One-Class SVM using Stochastic Gradient\nDescent (SGD).\n\"\"\"\n\nimport numpy as np\nimport warnings\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom joblib import Parallel\n\nfrom ..base import clone, is_classifier\nfrom ._base import LinearClassifierMixin, SparseCoefMixin\nfrom ._base import make_dataset\nfrom ..base import BaseEstimator, RegressorMixin, OutlierMixin\nfrom ..utils import check_random_state\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.multiclass import _check_partial_fit_first_call\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\nfrom ..utils.fixes import delayed\nfrom ..exceptions import ConvergenceWarning\nfrom ..model_selection import StratifiedShuffleSplit, ShuffleSplit\n\nfrom ._sgd_fast import _plain_sgd\nfrom ..utils import compute_class_weight\nfrom ._sgd_fast import Hinge\nfrom ._sgd_fast import SquaredHinge\nfrom ._sgd_fast import Log\nfrom ._sgd_fast import ModifiedHuber\nfrom ._sgd_fast import SquaredLoss\nfrom ._sgd_fast import Huber\nfrom ._sgd_fast import EpsilonInsensitive\nfrom ._sgd_fast import SquaredEpsilonInsensitive\nfrom ..utils.fixes import _joblib_parallel_args\n\nLEARNING_RATE_TYPES = {\n \"constant\": 1,\n \"optimal\": 2,\n \"invscaling\": 3,\n \"adaptive\": 4,\n \"pa1\": 5,\n \"pa2\": 6,\n}\n\nPENALTY_TYPES = {\"none\": 0, \"l2\": 2, \"l1\": 1, \"elasticnet\": 3}\n\nDEFAULT_EPSILON = 0.1\n# Default value of ``epsilon`` parameter.\n\nMAX_INT = np.iinfo(np.int32).max\n\n\nclass _ValidationScoreCallback:\n \"\"\"Callback for early stopping based on validation score\"\"\"\n\n def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None):\n self.estimator = clone(estimator)\n self.estimator.t_ = 1 # to pass check_is_fitted\n if classes is not None:\n self.estimator.classes_ = classes\n self.X_val = X_val\n self.y_val = y_val\n self.sample_weight_val = sample_weight_val\n\n def __call__(self, coef, intercept):\n est = self.estimator\n est.coef_ = coef.reshape(1, -1)\n est.intercept_ = np.atleast_1d(intercept)\n return est.score(self.X_val, self.y_val, self.sample_weight_val)\n\n\nclass BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for SGD classification and regression.\"\"\"\n\n def __init__(\n self,\n loss,\n *,\n penalty=\"l2\",\n alpha=0.0001,\n C=1.0,\n l1_ratio=0.15,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n epsilon=0.1,\n random_state=None,\n learning_rate=\"optimal\",\n eta0=0.0,\n power_t=0.5,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n warm_start=False,\n average=False,\n ):\n self.loss = loss\n self.penalty = penalty\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.alpha = alpha\n self.C = C\n self.l1_ratio = l1_ratio\n self.fit_intercept = fit_intercept\n self.shuffle = shuffle\n self.random_state = random_state\n self.verbose = verbose\n self.eta0 = eta0\n self.power_t = power_t\n self.early_stopping = early_stopping\n self.validation_fraction = validation_fraction\n self.n_iter_no_change = n_iter_no_change\n self.warm_start = warm_start\n self.average = average\n self.max_iter = max_iter\n self.tol = tol\n # current tests expect init to do parameter validation\n # but we are not allowed to set attributes\n self._validate_params()\n\n def set_params(self, **kwargs):\n \"\"\"Set and validate the parameters of estimator.\n\n Parameters\n ----------\n **kwargs : dict\n Estimator parameters.\n\n Returns\n -------\n self : object\n Estimator instance.\n \"\"\"\n super().set_params(**kwargs)\n self._validate_params()\n return self\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n def _validate_params(self, for_partial_fit=False):\n \"\"\"Validate input params.\"\"\"\n if not isinstance(self.shuffle, bool):\n raise ValueError(\"shuffle must be either True or False\")\n if not isinstance(self.early_stopping, bool):\n raise ValueError(\"early_stopping must be either True or False\")\n if self.early_stopping and for_partial_fit:\n raise ValueError(\"early_stopping should be False with partial_fit\")\n if self.max_iter is not None and self.max_iter <= 0:\n raise ValueError(\"max_iter must be > zero. Got %f\" % self.max_iter)\n if not (0.0 <= self.l1_ratio <= 1.0):\n raise ValueError(\"l1_ratio must be in [0, 1]\")\n if not isinstance(self, SGDOneClassSVM) and self.alpha < 0.0:\n raise ValueError(\"alpha must be >= 0\")\n if self.n_iter_no_change < 1:\n raise ValueError(\"n_iter_no_change must be >= 1\")\n if not (0.0 < self.validation_fraction < 1.0):\n raise ValueError(\"validation_fraction must be in range (0, 1)\")\n if self.learning_rate in (\"constant\", \"invscaling\", \"adaptive\"):\n if self.eta0 <= 0.0:\n raise ValueError(\"eta0 must be > 0\")\n if self.learning_rate == \"optimal\" and self.alpha == 0:\n raise ValueError(\n \"alpha must be > 0 since \"\n \"learning_rate is 'optimal'. alpha is used \"\n \"to compute the optimal learning rate.\"\n )\n\n # raises ValueError if not registered\n self._get_penalty_type(self.penalty)\n self._get_learning_rate_type(self.learning_rate)\n\n if self.loss not in self.loss_functions:\n raise ValueError(\"The loss %s is not supported. \" % self.loss)\n\n if self.loss == \"squared_loss\":\n warnings.warn(\n \"The loss 'squared_loss' was deprecated in v1.0 and will be \"\n \"removed in version 1.2. Use `loss='squared_error'` which is \"\n \"equivalent.\",\n FutureWarning,\n )\n\n def _get_loss_function(self, loss):\n \"\"\"Get concrete ``LossFunction`` object for str ``loss``.\"\"\"\n try:\n loss_ = self.loss_functions[loss]\n loss_class, args = loss_[0], loss_[1:]\n if loss in (\"huber\", \"epsilon_insensitive\", \"squared_epsilon_insensitive\"):\n args = (self.epsilon,)\n return loss_class(*args)\n except KeyError as e:\n raise ValueError(\"The loss %s is not supported. \" % loss) from e\n\n def _get_learning_rate_type(self, learning_rate):\n try:\n return LEARNING_RATE_TYPES[learning_rate]\n except KeyError as e:\n raise ValueError(\n \"learning rate %s is not supported. \" % learning_rate\n ) from e\n\n def _get_penalty_type(self, penalty):\n penalty = str(penalty).lower()\n try:\n return PENALTY_TYPES[penalty]\n except KeyError as e:\n raise ValueError(\"Penalty %s is not supported. \" % penalty) from e\n\n def _allocate_parameter_mem(\n self, n_classes, n_features, coef_init=None, intercept_init=None, one_class=0\n ):\n \"\"\"Allocate mem for parameters; initialize if provided.\"\"\"\n if n_classes > 2:\n # allocate coef_ for multi-class\n if coef_init is not None:\n coef_init = np.asarray(coef_init, order=\"C\")\n if coef_init.shape != (n_classes, n_features):\n raise ValueError(\"Provided ``coef_`` does not match dataset. \")\n self.coef_ = coef_init\n else:\n self.coef_ = np.zeros(\n (n_classes, n_features), dtype=np.float64, order=\"C\"\n )\n\n # allocate intercept_ for multi-class\n if intercept_init is not None:\n intercept_init = np.asarray(intercept_init, order=\"C\")\n if intercept_init.shape != (n_classes,):\n raise ValueError(\"Provided intercept_init does not match dataset.\")\n self.intercept_ = intercept_init\n else:\n self.intercept_ = np.zeros(n_classes, dtype=np.float64, order=\"C\")\n else:\n # allocate coef_\n if coef_init is not None:\n coef_init = np.asarray(coef_init, dtype=np.float64, order=\"C\")\n coef_init = coef_init.ravel()\n if coef_init.shape != (n_features,):\n raise ValueError(\"Provided coef_init does not match dataset.\")\n self.coef_ = coef_init\n else:\n self.coef_ = np.zeros(n_features, dtype=np.float64, order=\"C\")\n\n # allocate intercept_\n if intercept_init is not None:\n intercept_init = np.asarray(intercept_init, dtype=np.float64)\n if intercept_init.shape != (1,) and intercept_init.shape != ():\n raise ValueError(\"Provided intercept_init does not match dataset.\")\n if one_class:\n self.offset_ = intercept_init.reshape(\n 1,\n )\n else:\n self.intercept_ = intercept_init.reshape(\n 1,\n )\n else:\n if one_class:\n self.offset_ = np.zeros(1, dtype=np.float64, order=\"C\")\n else:\n self.intercept_ = np.zeros(1, dtype=np.float64, order=\"C\")\n\n # initialize average parameters\n if self.average > 0:\n self._standard_coef = self.coef_\n self._average_coef = np.zeros(self.coef_.shape, dtype=np.float64, order=\"C\")\n if one_class:\n self._standard_intercept = 1 - self.offset_\n else:\n self._standard_intercept = self.intercept_\n\n self._average_intercept = np.zeros(\n self._standard_intercept.shape, dtype=np.float64, order=\"C\"\n )\n\n def _make_validation_split(self, y):\n \"\"\"Split the dataset between training set and validation set.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples, )\n Target values.\n\n Returns\n -------\n validation_mask : ndarray of shape (n_samples, )\n Equal to 1 on the validation set, 0 on the training set.\n \"\"\"\n n_samples = y.shape[0]\n validation_mask = np.zeros(n_samples, dtype=np.uint8)\n if not self.early_stopping:\n # use the full set for training, with an empty validation set\n return validation_mask\n\n if is_classifier(self):\n splitter_type = StratifiedShuffleSplit\n else:\n splitter_type = ShuffleSplit\n cv = splitter_type(\n test_size=self.validation_fraction, random_state=self.random_state\n )\n idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))\n if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:\n raise ValueError(\n \"Splitting %d samples into a train set and a validation set \"\n \"with validation_fraction=%r led to an empty set (%d and %d \"\n \"samples). Please either change validation_fraction, increase \"\n \"number of samples, or disable early_stopping.\"\n % (\n n_samples,\n self.validation_fraction,\n idx_train.shape[0],\n idx_val.shape[0],\n )\n )\n\n validation_mask[idx_val] = 1\n return validation_mask\n\n def _make_validation_score_cb(\n self, validation_mask, X, y, sample_weight, classes=None\n ):\n if not self.early_stopping:\n return None\n\n return _ValidationScoreCallback(\n self,\n X[validation_mask],\n y[validation_mask],\n sample_weight[validation_mask],\n classes=classes,\n )\n\n\ndef _prepare_fit_binary(est, y, i):\n \"\"\"Initialization for fit_binary.\n\n Returns y, coef, intercept, average_coef, average_intercept.\n \"\"\"\n y_i = np.ones(y.shape, dtype=np.float64, order=\"C\")\n y_i[y != est.classes_[i]] = -1.0\n average_intercept = 0\n average_coef = None\n\n if len(est.classes_) == 2:\n if not est.average:\n coef = est.coef_.ravel()\n intercept = est.intercept_[0]\n else:\n coef = est._standard_coef.ravel()\n intercept = est._standard_intercept[0]\n average_coef = est._average_coef.ravel()\n average_intercept = est._average_intercept[0]\n else:\n if not est.average:\n coef = est.coef_[i]\n intercept = est.intercept_[i]\n else:\n coef = est._standard_coef[i]\n intercept = est._standard_intercept[i]\n average_coef = est._average_coef[i]\n average_intercept = est._average_intercept[i]\n\n return y_i, coef, intercept, average_coef, average_intercept\n\n\ndef fit_binary(\n est,\n i,\n X,\n y,\n alpha,\n C,\n learning_rate,\n max_iter,\n pos_weight,\n neg_weight,\n sample_weight,\n validation_mask=None,\n random_state=None,\n):\n \"\"\"Fit a single binary classifier.\n\n The i'th class is considered the \"positive\" class.\n\n Parameters\n ----------\n est : Estimator object\n The estimator to fit\n\n i : int\n Index of the positive class\n\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n\n y : numpy array of shape [n_samples, ]\n Target values\n\n alpha : float\n The regularization parameter\n\n C : float\n Maximum step size for passive aggressive\n\n learning_rate : string\n The learning rate. Accepted values are 'constant', 'optimal',\n 'invscaling', 'pa1' and 'pa2'.\n\n max_iter : int\n The maximum number of iterations (epochs)\n\n pos_weight : float\n The weight of the positive class\n\n neg_weight : float\n The weight of the negative class\n\n sample_weight : numpy array of shape [n_samples, ]\n The weight of each sample\n\n validation_mask : numpy array of shape [n_samples, ], default=None\n Precomputed validation mask in case _fit_binary is called in the\n context of a one-vs-rest reduction.\n\n random_state : int, RandomState instance, default=None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n \"\"\"\n # if average is not true, average_coef, and average_intercept will be\n # unused\n y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary(\n est, y, i\n )\n assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]\n\n random_state = check_random_state(random_state)\n dataset, intercept_decay = make_dataset(\n X, y_i, sample_weight, random_state=random_state\n )\n\n penalty_type = est._get_penalty_type(est.penalty)\n learning_rate_type = est._get_learning_rate_type(learning_rate)\n\n if validation_mask is None:\n validation_mask = est._make_validation_split(y_i)\n classes = np.array([-1, 1], dtype=y_i.dtype)\n validation_score_cb = est._make_validation_score_cb(\n validation_mask, X, y_i, sample_weight, classes=classes\n )\n\n # numpy mtrand expects a C long which is a signed 32 bit integer under\n # Windows\n seed = random_state.randint(MAX_INT)\n\n tol = est.tol if est.tol is not None else -np.inf\n\n coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(\n coef,\n intercept,\n average_coef,\n average_intercept,\n est.loss_function_,\n penalty_type,\n alpha,\n C,\n est.l1_ratio,\n dataset,\n validation_mask,\n est.early_stopping,\n validation_score_cb,\n int(est.n_iter_no_change),\n max_iter,\n tol,\n int(est.fit_intercept),\n int(est.verbose),\n int(est.shuffle),\n seed,\n pos_weight,\n neg_weight,\n learning_rate_type,\n est.eta0,\n est.power_t,\n 0,\n est.t_,\n intercept_decay,\n est.average,\n )\n\n if est.average:\n if len(est.classes_) == 2:\n est._average_intercept[0] = average_intercept\n else:\n est._average_intercept[i] = average_intercept\n\n return coef, intercept, n_iter_\n\n\nclass BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):\n\n # TODO: Remove squared_loss in v1.2\n loss_functions = {\n \"hinge\": (Hinge, 1.0),\n \"squared_hinge\": (SquaredHinge, 1.0),\n \"perceptron\": (Hinge, 0.0),\n \"log\": (Log,),\n \"modified_huber\": (ModifiedHuber,),\n \"squared_error\": (SquaredLoss,),\n \"squared_loss\": (SquaredLoss,),\n \"huber\": (Huber, DEFAULT_EPSILON),\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),\n }\n\n @abstractmethod\n def __init__(\n self,\n loss=\"hinge\",\n *,\n penalty=\"l2\",\n alpha=0.0001,\n l1_ratio=0.15,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n epsilon=DEFAULT_EPSILON,\n n_jobs=None,\n random_state=None,\n learning_rate=\"optimal\",\n eta0=0.0,\n power_t=0.5,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n class_weight=None,\n warm_start=False,\n average=False,\n ):\n\n super().__init__(\n loss=loss,\n penalty=penalty,\n alpha=alpha,\n l1_ratio=l1_ratio,\n fit_intercept=fit_intercept,\n max_iter=max_iter,\n tol=tol,\n shuffle=shuffle,\n verbose=verbose,\n epsilon=epsilon,\n random_state=random_state,\n learning_rate=learning_rate,\n eta0=eta0,\n power_t=power_t,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change,\n warm_start=warm_start,\n average=average,\n )\n self.class_weight = class_weight\n self.n_jobs = n_jobs\n\n def _partial_fit(\n self,\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n max_iter,\n classes,\n sample_weight,\n coef_init,\n intercept_init,\n ):\n first_call = not hasattr(self, \"classes_\")\n X, y = self._validate_data(\n X,\n y,\n accept_sparse=\"csr\",\n dtype=np.float64,\n order=\"C\",\n accept_large_sparse=False,\n reset=first_call,\n )\n\n n_samples, n_features = X.shape\n\n _check_partial_fit_first_call(self, classes)\n\n n_classes = self.classes_.shape[0]\n\n # Allocate datastructures from input arguments\n self._expanded_class_weight = compute_class_weight(\n self.class_weight, classes=self.classes_, y=y\n )\n sample_weight = _check_sample_weight(sample_weight, X)\n\n if getattr(self, \"coef_\", None) is None or coef_init is not None:\n self._allocate_parameter_mem(\n n_classes, n_features, coef_init, intercept_init\n )\n elif n_features != self.coef_.shape[-1]:\n raise ValueError(\n \"Number of features %d does not match previous data %d.\"\n % (n_features, self.coef_.shape[-1])\n )\n\n self.loss_function_ = self._get_loss_function(loss)\n if not hasattr(self, \"t_\"):\n self.t_ = 1.0\n\n # delegate to concrete training procedure\n if n_classes > 2:\n self._fit_multiclass(\n X,\n y,\n alpha=alpha,\n C=C,\n learning_rate=learning_rate,\n sample_weight=sample_weight,\n max_iter=max_iter,\n )\n elif n_classes == 2:\n self._fit_binary(\n X,\n y,\n alpha=alpha,\n C=C,\n learning_rate=learning_rate,\n sample_weight=sample_weight,\n max_iter=max_iter,\n )\n else:\n raise ValueError(\n \"The number of classes has to be greater than one; got %d class\"\n % n_classes\n )\n\n return self\n\n def _fit(\n self,\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n coef_init=None,\n intercept_init=None,\n sample_weight=None,\n ):\n self._validate_params()\n if hasattr(self, \"classes_\"):\n self.classes_ = None\n\n X, y = self._validate_data(\n X,\n y,\n accept_sparse=\"csr\",\n dtype=np.float64,\n order=\"C\",\n accept_large_sparse=False,\n )\n\n # labels can be encoded as float, int, or string literals\n # np.unique sorts in asc order; largest class id is positive class\n classes = np.unique(y)\n\n if self.warm_start and hasattr(self, \"coef_\"):\n if coef_init is None:\n coef_init = self.coef_\n if intercept_init is None:\n intercept_init = self.intercept_\n else:\n self.coef_ = None\n self.intercept_ = None\n\n if self.average > 0:\n self._standard_coef = self.coef_\n self._standard_intercept = self.intercept_\n self._average_coef = None\n self._average_intercept = None\n\n # Clear iteration count for multiple call to fit.\n self.t_ = 1.0\n\n self._partial_fit(\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n self.max_iter,\n classes,\n sample_weight,\n coef_init,\n intercept_init,\n )\n\n if (\n self.tol is not None\n and self.tol > -np.inf\n and self.n_iter_ == self.max_iter\n ):\n warnings.warn(\n \"Maximum number of iteration reached before \"\n \"convergence. Consider increasing max_iter to \"\n \"improve the fit.\",\n ConvergenceWarning,\n )\n return self\n\n def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter):\n \"\"\"Fit a binary classifier on X and y.\"\"\"\n coef, intercept, n_iter_ = fit_binary(\n self,\n 1,\n X,\n y,\n alpha,\n C,\n learning_rate,\n max_iter,\n self._expanded_class_weight[1],\n self._expanded_class_weight[0],\n sample_weight,\n random_state=self.random_state,\n )\n\n self.t_ += n_iter_ * X.shape[0]\n self.n_iter_ = n_iter_\n\n # need to be 2d\n if self.average > 0:\n if self.average <= self.t_ - 1:\n self.coef_ = self._average_coef.reshape(1, -1)\n self.intercept_ = self._average_intercept\n else:\n self.coef_ = self._standard_coef.reshape(1, -1)\n self._standard_intercept = np.atleast_1d(intercept)\n self.intercept_ = self._standard_intercept\n else:\n self.coef_ = coef.reshape(1, -1)\n # intercept is a float, need to convert it to an array of length 1\n self.intercept_ = np.atleast_1d(intercept)\n\n def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):\n \"\"\"Fit a multi-class classifier by combining binary classifiers\n\n Each binary classifier predicts one class versus all others. This\n strategy is called OvA (One versus All) or OvR (One versus Rest).\n \"\"\"\n # Precompute the validation split using the multiclass labels\n # to ensure proper balancing of the classes.\n validation_mask = self._make_validation_split(y)\n\n # Use joblib to fit OvA in parallel.\n # Pick the random seed for each job outside of fit_binary to avoid\n # sharing the estimator random state between threads which could lead\n # to non-deterministic behavior\n random_state = check_random_state(self.random_state)\n seeds = random_state.randint(MAX_INT, size=len(self.classes_))\n result = Parallel(\n n_jobs=self.n_jobs,\n verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"),\n )(\n delayed(fit_binary)(\n self,\n i,\n X,\n y,\n alpha,\n C,\n learning_rate,\n max_iter,\n self._expanded_class_weight[i],\n 1.0,\n sample_weight,\n validation_mask=validation_mask,\n random_state=seed,\n )\n for i, seed in enumerate(seeds)\n )\n\n # take the maximum of n_iter_ over every binary fit\n n_iter_ = 0.0\n for i, (_, intercept, n_iter_i) in enumerate(result):\n self.intercept_[i] = intercept\n n_iter_ = max(n_iter_, n_iter_i)\n\n self.t_ += n_iter_ * X.shape[0]\n self.n_iter_ = n_iter_\n\n if self.average > 0:\n if self.average <= self.t_ - 1.0:\n self.coef_ = self._average_coef\n self.intercept_ = self._average_intercept\n else:\n self.coef_ = self._standard_coef\n self._standard_intercept = np.atleast_1d(self.intercept_)\n self.intercept_ = self._standard_intercept\n\n def partial_fit(self, X, y, classes=None, sample_weight=None):\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\n\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\n guaranteed that a minimum of the cost function is reached after calling\n it once. Matters such as objective convergence, early stopping, and\n learning rate adjustments should be handled by the user.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of the training data.\n\n y : ndarray of shape (n_samples,)\n Subset of the target values.\n\n classes : ndarray of shape (n_classes,), default=None\n Classes across all calls to partial_fit.\n Can be obtained by via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n\n sample_weight : array-like, shape (n_samples,), default=None\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n self._validate_params(for_partial_fit=True)\n if self.class_weight in [\"balanced\"]:\n raise ValueError(\n \"class_weight '{0}' is not supported for \"\n \"partial_fit. In order to use 'balanced' weights,\"\n \" use compute_class_weight('{0}', \"\n \"classes=classes, y=y). \"\n \"In place of y you can us a large enough sample \"\n \"of the full training set target to properly \"\n \"estimate the class frequency distributions. \"\n \"Pass the resulting weights as the class_weight \"\n \"parameter.\".format(self.class_weight)\n )\n return self._partial_fit(\n X,\n y,\n alpha=self.alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n max_iter=1,\n classes=classes,\n sample_weight=sample_weight,\n coef_init=None,\n intercept_init=None,\n )\n\n def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):\n \"\"\"Fit linear model with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n y : ndarray of shape (n_samples,)\n Target values.\n\n coef_init : ndarray of shape (n_classes, n_features), default=None\n The initial coefficients to warm-start the optimization.\n\n intercept_init : ndarray of shape (n_classes,), default=None\n The initial intercept to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), default=None\n Weights applied to individual samples.\n If not provided, uniform weights are assumed. These weights will\n be multiplied with class_weight (passed through the\n constructor) if class_weight is specified.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n return self._fit(\n X,\n y,\n alpha=self.alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n coef_init=coef_init,\n intercept_init=intercept_init,\n sample_weight=sample_weight,\n )\n\n\nclass SGDClassifier(BaseSGDClassifier):\n \"\"\"Linear classifiers (SVM, logistic regression, etc.) with SGD training.\n\n This estimator implements regularized linear models with stochastic\n gradient descent (SGD) learning: the gradient of the loss is estimated\n each sample at a time and the model is updated along the way with a\n decreasing strength schedule (aka learning rate). SGD allows minibatch\n (online/out-of-core) learning via the `partial_fit` method.\n For best results using the default learning rate schedule, the data should\n have zero mean and unit variance.\n\n This implementation works with data represented as dense or sparse arrays\n of floating point values for the features. The model it fits can be\n controlled with the loss parameter; by default, it fits a linear support\n vector machine (SVM).\n\n The regularizer is a penalty added to the loss function that shrinks model\n parameters towards the zero vector using either the squared euclidean norm\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\n parameter update crosses the 0.0 value because of the regularizer, the\n update is truncated to 0.0 to allow for learning sparse models and achieve\n online feature selection.\n\n Read more in the :ref:`User Guide <sgd>`.\n\n Parameters\n ----------\n loss : str, default='hinge'\n The loss function to be used. Defaults to 'hinge', which gives a\n linear SVM.\n\n The possible options are 'hinge', 'log', 'modified_huber',\n 'squared_hinge', 'perceptron', or a regression loss: 'squared_error',\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n\n The 'log' loss gives logistic regression, a probabilistic classifier.\n 'modified_huber' is another smooth loss that brings tolerance to\n outliers as well as probability estimates.\n 'squared_hinge' is like hinge but is quadratically penalized.\n 'perceptron' is the linear loss used by the perceptron algorithm.\n The other losses are designed for regression but can be useful in\n classification as well; see\n :class:`~sklearn.linear_model.SGDRegressor` for a description.\n\n More details about the losses formulas can be found in the\n :ref:`User Guide <sgd_mathematical_formulation>`.\n\n .. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent.\n\n penalty : {'l2', 'l1', 'elasticnet'}, default='l2'\n The penalty (aka regularization term) to be used. Defaults to 'l2'\n which is the standard regularizer for linear SVM models. 'l1' and\n 'elasticnet' might bring sparsity to the model (feature selection)\n not achievable with 'l2'.\n\n alpha : float, default=0.0001\n Constant that multiplies the regularization term. The higher the\n value, the stronger the regularization.\n Also used to compute the learning rate when set to `learning_rate` is\n set to 'optimal'.\n\n l1_ratio : float, default=0.15\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\n Only used if `penalty` is 'elasticnet'.\n\n fit_intercept : bool, default=True\n Whether the intercept should be estimated or not. If False, the\n data is assumed to be already centered.\n\n max_iter : int, default=1000\n The maximum number of passes over the training data (aka epochs).\n It only impacts the behavior in the ``fit`` method, and not the\n :meth:`partial_fit` method.\n\n .. versionadded:: 0.19\n\n tol : float, default=1e-3\n The stopping criterion. If it is not None, training will stop\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\n epochs.\n Convergence is checked against the training loss or the\n validation loss depending on the `early_stopping` parameter.\n\n .. versionadded:: 0.19\n\n shuffle : bool, default=True\n Whether or not the training data should be shuffled after each epoch.\n\n verbose : int, default=0\n The verbosity level.\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n For 'huber', determines the threshold at which it becomes less\n important to get the prediction exactly right.\n For epsilon-insensitive, any differences between the current prediction\n and the correct label are ignored if they are less than this threshold.\n\n n_jobs : int, default=None\n The number of CPUs to use to do the OVA (One Versus All, for\n multi-class problems) computation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n random_state : int, RandomState instance, default=None\n Used for shuffling the data, when ``shuffle`` is set to ``True``.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n learning_rate : str, default='optimal'\n The learning rate schedule:\n\n - 'constant': `eta = eta0`\n - 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n - 'invscaling': `eta = eta0 / pow(t, power_t)`\n - 'adaptive': eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n .. versionadded:: 0.20\n Added 'adaptive' option\n\n eta0 : double, default=0.0\n The initial learning rate for the 'constant', 'invscaling' or\n 'adaptive' schedules. The default value is 0.0 as eta0 is not used by\n the default schedule 'optimal'.\n\n power_t : double, default=0.5\n The exponent for inverse scaling learning rate [default 0.5].\n\n early_stopping : bool, default=False\n Whether to use early stopping to terminate training when validation\n score is not improving. If set to True, it will automatically set aside\n a stratified fraction of training data as validation and terminate\n training when validation score returned by the `score` method is not\n improving by at least tol for n_iter_no_change consecutive epochs.\n\n .. versionadded:: 0.20\n Added 'early_stopping' option\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Must be between 0 and 1.\n Only used if `early_stopping` is True.\n\n .. versionadded:: 0.20\n Added 'validation_fraction' option\n\n n_iter_no_change : int, default=5\n Number of iterations with no improvement to wait before stopping\n fitting.\n Convergence is checked against the training loss or the\n validation loss depending on the `early_stopping` parameter.\n\n .. versionadded:: 0.20\n Added 'n_iter_no_change' option\n\n class_weight : dict, {class_label: weight} or \"balanced\", default=None\n Preset for the class_weight fit parameter.\n\n Weights associated with classes. If not given, all classes\n are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n warm_start : bool, default=False\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n Repeatedly calling fit or partial_fit when warm_start is True can\n result in a different solution than when calling fit a single time\n because of the way the data is shuffled.\n If a dynamic learning rate is used, the learning rate is adapted\n depending on the number of samples already seen. Calling ``fit`` resets\n this counter, while ``partial_fit`` will result in increasing the\n existing counter.\n\n average : bool or int, default=False\n When set to True, computes the averaged SGD weights accross all\n updates and stores the result in the ``coef_`` attribute. If set to\n an int greater than 1, averaging will begin once the total number of\n samples seen reaches `average`. So ``average=10`` will begin\n averaging after seeing 10 samples.\n\n Attributes\n ----------\n coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \\\n (n_classes, n_features)\n Weights assigned to the features.\n\n intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)\n Constants in decision function.\n\n n_iter_ : int\n The actual number of iterations before reaching the stopping criterion.\n For multiclass fits, it is the maximum over every binary fit.\n\n loss_function_ : concrete ``LossFunction``\n\n classes_ : array of shape (n_classes,)\n\n t_ : int\n Number of weight updates performed during training.\n Same as ``(n_iter_ * n_samples)``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n See Also\n --------\n sklearn.svm.LinearSVC : Linear support vector classification.\n LogisticRegression : Logistic regression.\n Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to\n ``SGDClassifier(loss=\"perceptron\", eta0=1, learning_rate=\"constant\",\n penalty=None)``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import SGDClassifier\n >>> from sklearn.preprocessing import StandardScaler\n >>> from sklearn.pipeline import make_pipeline\n >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n >>> Y = np.array([1, 1, 2, 2])\n >>> # Always scale the input. The most convenient way is to use a pipeline.\n >>> clf = make_pipeline(StandardScaler(),\n ... SGDClassifier(max_iter=1000, tol=1e-3))\n >>> clf.fit(X, Y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('sgdclassifier', SGDClassifier())])\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n \"\"\"\n\n def __init__(\n self,\n loss=\"hinge\",\n *,\n penalty=\"l2\",\n alpha=0.0001,\n l1_ratio=0.15,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n epsilon=DEFAULT_EPSILON,\n n_jobs=None,\n random_state=None,\n learning_rate=\"optimal\",\n eta0=0.0,\n power_t=0.5,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n class_weight=None,\n warm_start=False,\n average=False,\n ):\n super().__init__(\n loss=loss,\n penalty=penalty,\n alpha=alpha,\n l1_ratio=l1_ratio,\n fit_intercept=fit_intercept,\n max_iter=max_iter,\n tol=tol,\n shuffle=shuffle,\n verbose=verbose,\n epsilon=epsilon,\n n_jobs=n_jobs,\n random_state=random_state,\n learning_rate=learning_rate,\n eta0=eta0,\n power_t=power_t,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change,\n class_weight=class_weight,\n warm_start=warm_start,\n average=average,\n )\n\n def _check_proba(self):\n if self.loss not in (\"log\", \"modified_huber\"):\n raise AttributeError(\n \"probability estimates are not available for loss=%r\" % self.loss\n )\n\n @property\n def predict_proba(self):\n \"\"\"Probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n Multiclass probability estimates are derived from binary (one-vs.-rest)\n estimates by simple normalization, as recommended by Zadrozny and\n Elkan.\n\n Binary probability estimates for loss=\"modified_huber\" are given by\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\n it is necessary to perform proper probability calibration by wrapping\n the classifier with\n :class:`~sklearn.calibration.CalibratedClassifierCV` instead.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n ndarray of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in `self.classes_`.\n\n References\n ----------\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\n probability estimates\", SIGKDD'02,\n http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf\n\n The justification for the formula in the loss=\"modified_huber\"\n case is in the appendix B in:\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\n \"\"\"\n self._check_proba()\n return self._predict_proba\n\n def _predict_proba(self, X):\n check_is_fitted(self)\n\n if self.loss == \"log\":\n return self._predict_proba_lr(X)\n\n elif self.loss == \"modified_huber\":\n binary = len(self.classes_) == 2\n scores = self.decision_function(X)\n\n if binary:\n prob2 = np.ones((scores.shape[0], 2))\n prob = prob2[:, 1]\n else:\n prob = scores\n\n np.clip(scores, -1, 1, prob)\n prob += 1.0\n prob /= 2.0\n\n if binary:\n prob2[:, 0] -= prob\n prob = prob2\n else:\n # the above might assign zero to all classes, which doesn't\n # normalize neatly; work around this to produce uniform\n # probabilities\n prob_sum = prob.sum(axis=1)\n all_zero = prob_sum == 0\n if np.any(all_zero):\n prob[all_zero, :] = 1\n prob_sum[all_zero] = len(self.classes_)\n\n # normalize\n prob /= prob_sum.reshape((prob.shape[0], -1))\n\n return prob\n\n else:\n raise NotImplementedError(\n \"predict_(log_)proba only supported when\"\n \" loss='log' or loss='modified_huber' \"\n \"(%r given)\"\n % self.loss\n )\n\n @property\n def predict_log_proba(self):\n \"\"\"Log of probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n When loss=\"modified_huber\", probability estimates may be hard zeros\n and ones, so taking the logarithm is not possible.\n\n See ``predict_proba`` for details.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n T : array-like, shape (n_samples, n_classes)\n Returns the log-probability of the sample for each class in the\n model, where classes are ordered as they are in\n `self.classes_`.\n \"\"\"\n self._check_proba()\n return self._predict_log_proba\n\n def _predict_log_proba(self, X):\n return np.log(self.predict_proba(X))\n\n def _more_tags(self):\n return {\n \"_xfail_checks\": {\n \"check_sample_weights_invariance\": (\n \"zero sample_weight is not equivalent to removing samples\"\n ),\n }\n }\n\n\nclass BaseSGDRegressor(RegressorMixin, BaseSGD):\n\n # TODO: Remove squared_loss in v1.2\n loss_functions = {\n \"squared_error\": (SquaredLoss,),\n \"squared_loss\": (SquaredLoss,),\n \"huber\": (Huber, DEFAULT_EPSILON),\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),\n }\n\n @abstractmethod\n def __init__(\n self,\n loss=\"squared_error\",\n *,\n penalty=\"l2\",\n alpha=0.0001,\n l1_ratio=0.15,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n epsilon=DEFAULT_EPSILON,\n random_state=None,\n learning_rate=\"invscaling\",\n eta0=0.01,\n power_t=0.25,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n warm_start=False,\n average=False,\n ):\n super().__init__(\n loss=loss,\n penalty=penalty,\n alpha=alpha,\n l1_ratio=l1_ratio,\n fit_intercept=fit_intercept,\n max_iter=max_iter,\n tol=tol,\n shuffle=shuffle,\n verbose=verbose,\n epsilon=epsilon,\n random_state=random_state,\n learning_rate=learning_rate,\n eta0=eta0,\n power_t=power_t,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change,\n warm_start=warm_start,\n average=average,\n )\n\n def _partial_fit(\n self,\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n max_iter,\n sample_weight,\n coef_init,\n intercept_init,\n ):\n first_call = getattr(self, \"coef_\", None) is None\n X, y = self._validate_data(\n X,\n y,\n accept_sparse=\"csr\",\n copy=False,\n order=\"C\",\n dtype=np.float64,\n accept_large_sparse=False,\n reset=first_call,\n )\n y = y.astype(np.float64, copy=False)\n\n n_samples, n_features = X.shape\n\n sample_weight = _check_sample_weight(sample_weight, X)\n\n # Allocate datastructures from input arguments\n if first_call:\n self._allocate_parameter_mem(1, n_features, coef_init, intercept_init)\n if self.average > 0 and getattr(self, \"_average_coef\", None) is None:\n self._average_coef = np.zeros(n_features, dtype=np.float64, order=\"C\")\n self._average_intercept = np.zeros(1, dtype=np.float64, order=\"C\")\n\n self._fit_regressor(\n X, y, alpha, C, loss, learning_rate, sample_weight, max_iter\n )\n\n return self\n\n def partial_fit(self, X, y, sample_weight=None):\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\n\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\n guaranteed that a minimum of the cost function is reached after calling\n it once. Matters such as objective convergence and early stopping\n should be handled by the user.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of training data\n\n y : numpy array of shape (n_samples,)\n Subset of target values\n\n sample_weight : array-like, shape (n_samples,), default=None\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._validate_params(for_partial_fit=True)\n return self._partial_fit(\n X,\n y,\n self.alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n max_iter=1,\n sample_weight=sample_weight,\n coef_init=None,\n intercept_init=None,\n )\n\n def _fit(\n self,\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n coef_init=None,\n intercept_init=None,\n sample_weight=None,\n ):\n self._validate_params()\n if self.warm_start and getattr(self, \"coef_\", None) is not None:\n if coef_init is None:\n coef_init = self.coef_\n if intercept_init is None:\n intercept_init = self.intercept_\n else:\n self.coef_ = None\n self.intercept_ = None\n\n # Clear iteration count for multiple call to fit.\n self.t_ = 1.0\n\n self._partial_fit(\n X,\n y,\n alpha,\n C,\n loss,\n learning_rate,\n self.max_iter,\n sample_weight,\n coef_init,\n intercept_init,\n )\n\n if (\n self.tol is not None\n and self.tol > -np.inf\n and self.n_iter_ == self.max_iter\n ):\n warnings.warn(\n \"Maximum number of iteration reached before \"\n \"convergence. Consider increasing max_iter to \"\n \"improve the fit.\",\n ConvergenceWarning,\n )\n\n return self\n\n def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):\n \"\"\"Fit linear model with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data\n\n y : ndarray of shape (n_samples,)\n Target values\n\n coef_init : ndarray of shape (n_features,), default=None\n The initial coefficients to warm-start the optimization.\n\n intercept_init : ndarray of shape (1,), default=None\n The initial intercept to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), default=None\n Weights applied to individual samples (1. for unweighted).\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return self._fit(\n X,\n y,\n alpha=self.alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n coef_init=coef_init,\n intercept_init=intercept_init,\n sample_weight=sample_weight,\n )\n\n def _decision_function(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n\n Returns\n -------\n ndarray of shape (n_samples,)\n Predicted target values per element in X.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n\n scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n return scores.ravel()\n\n def predict(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n\n Returns\n -------\n ndarray of shape (n_samples,)\n Predicted target values per element in X.\n \"\"\"\n return self._decision_function(X)\n\n def _fit_regressor(\n self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter\n ):\n dataset, intercept_decay = make_dataset(X, y, sample_weight)\n\n loss_function = self._get_loss_function(loss)\n penalty_type = self._get_penalty_type(self.penalty)\n learning_rate_type = self._get_learning_rate_type(learning_rate)\n\n if not hasattr(self, \"t_\"):\n self.t_ = 1.0\n\n validation_mask = self._make_validation_split(y)\n validation_score_cb = self._make_validation_score_cb(\n validation_mask, X, y, sample_weight\n )\n\n random_state = check_random_state(self.random_state)\n # numpy mtrand expects a C long which is a signed 32 bit integer under\n # Windows\n seed = random_state.randint(0, np.iinfo(np.int32).max)\n\n tol = self.tol if self.tol is not None else -np.inf\n\n if self.average:\n coef = self._standard_coef\n intercept = self._standard_intercept\n average_coef = self._average_coef\n average_intercept = self._average_intercept\n else:\n coef = self.coef_\n intercept = self.intercept_\n average_coef = None # Not used\n average_intercept = [0] # Not used\n\n coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(\n coef,\n intercept[0],\n average_coef,\n average_intercept[0],\n loss_function,\n penalty_type,\n alpha,\n C,\n self.l1_ratio,\n dataset,\n validation_mask,\n self.early_stopping,\n validation_score_cb,\n int(self.n_iter_no_change),\n max_iter,\n tol,\n int(self.fit_intercept),\n int(self.verbose),\n int(self.shuffle),\n seed,\n 1.0,\n 1.0,\n learning_rate_type,\n self.eta0,\n self.power_t,\n 0,\n self.t_,\n intercept_decay,\n self.average,\n )\n\n self.t_ += self.n_iter_ * X.shape[0]\n\n if self.average > 0:\n self._average_intercept = np.atleast_1d(average_intercept)\n self._standard_intercept = np.atleast_1d(intercept)\n\n if self.average <= self.t_ - 1.0:\n # made enough updates for averaging to be taken into account\n self.coef_ = average_coef\n self.intercept_ = np.atleast_1d(average_intercept)\n else:\n self.coef_ = coef\n self.intercept_ = np.atleast_1d(intercept)\n\n else:\n self.intercept_ = np.atleast_1d(intercept)\n\n\nclass SGDRegressor(BaseSGDRegressor):\n \"\"\"Linear model fitted by minimizing a regularized empirical loss with SGD\n\n SGD stands for Stochastic Gradient Descent: the gradient of the loss is\n estimated each sample at a time and the model is updated along the way with\n a decreasing strength schedule (aka learning rate).\n\n The regularizer is a penalty added to the loss function that shrinks model\n parameters towards the zero vector using either the squared euclidean norm\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\n parameter update crosses the 0.0 value because of the regularizer, the\n update is truncated to 0.0 to allow for learning sparse models and achieve\n online feature selection.\n\n This implementation works with data represented as dense numpy arrays of\n floating point values for the features.\n\n Read more in the :ref:`User Guide <sgd>`.\n\n Parameters\n ----------\n loss : str, default='squared_error'\n The loss function to be used. The possible values are 'squared_error',\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'\n\n The 'squared_error' refers to the ordinary least squares fit.\n 'huber' modifies 'squared_error' to focus less on getting outliers\n correct by switching from squared to linear loss past a distance of\n epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is\n linear past that; this is the loss function used in SVR.\n 'squared_epsilon_insensitive' is the same but becomes squared loss past\n a tolerance of epsilon.\n\n More details about the losses formulas can be found in the\n :ref:`User Guide <sgd_mathematical_formulation>`.\n\n .. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent.\n\n penalty : {'l2', 'l1', 'elasticnet'}, default='l2'\n The penalty (aka regularization term) to be used. Defaults to 'l2'\n which is the standard regularizer for linear SVM models. 'l1' and\n 'elasticnet' might bring sparsity to the model (feature selection)\n not achievable with 'l2'.\n\n alpha : float, default=0.0001\n Constant that multiplies the regularization term. The higher the\n value, the stronger the regularization.\n Also used to compute the learning rate when set to `learning_rate` is\n set to 'optimal'.\n\n l1_ratio : float, default=0.15\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\n Only used if `penalty` is 'elasticnet'.\n\n fit_intercept : bool, default=True\n Whether the intercept should be estimated or not. If False, the\n data is assumed to be already centered.\n\n max_iter : int, default=1000\n The maximum number of passes over the training data (aka epochs).\n It only impacts the behavior in the ``fit`` method, and not the\n :meth:`partial_fit` method.\n\n .. versionadded:: 0.19\n\n tol : float, default=1e-3\n The stopping criterion. If it is not None, training will stop\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\n epochs.\n Convergence is checked against the training loss or the\n validation loss depending on the `early_stopping` parameter.\n\n .. versionadded:: 0.19\n\n shuffle : bool, default=True\n Whether or not the training data should be shuffled after each epoch.\n\n verbose : int, default=0\n The verbosity level.\n\n epsilon : float, default=0.1\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n For 'huber', determines the threshold at which it becomes less\n important to get the prediction exactly right.\n For epsilon-insensitive, any differences between the current prediction\n and the correct label are ignored if they are less than this threshold.\n\n random_state : int, RandomState instance, default=None\n Used for shuffling the data, when ``shuffle`` is set to ``True``.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n learning_rate : string, default='invscaling'\n The learning rate schedule:\n\n - 'constant': `eta = eta0`\n - 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n - 'invscaling': `eta = eta0 / pow(t, power_t)`\n - 'adaptive': eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n .. versionadded:: 0.20\n Added 'adaptive' option\n\n eta0 : double, default=0.01\n The initial learning rate for the 'constant', 'invscaling' or\n 'adaptive' schedules. The default value is 0.01.\n\n power_t : double, default=0.25\n The exponent for inverse scaling learning rate.\n\n early_stopping : bool, default=False\n Whether to use early stopping to terminate training when validation\n score is not improving. If set to True, it will automatically set aside\n a fraction of training data as validation and terminate\n training when validation score returned by the `score` method is not\n improving by at least `tol` for `n_iter_no_change` consecutive\n epochs.\n\n .. versionadded:: 0.20\n Added 'early_stopping' option\n\n validation_fraction : float, default=0.1\n The proportion of training data to set aside as validation set for\n early stopping. Must be between 0 and 1.\n Only used if `early_stopping` is True.\n\n .. versionadded:: 0.20\n Added 'validation_fraction' option\n\n n_iter_no_change : int, default=5\n Number of iterations with no improvement to wait before stopping\n fitting.\n Convergence is checked against the training loss or the\n validation loss depending on the `early_stopping` parameter.\n\n .. versionadded:: 0.20\n Added 'n_iter_no_change' option\n\n warm_start : bool, default=False\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n Repeatedly calling fit or partial_fit when warm_start is True can\n result in a different solution than when calling fit a single time\n because of the way the data is shuffled.\n If a dynamic learning rate is used, the learning rate is adapted\n depending on the number of samples already seen. Calling ``fit`` resets\n this counter, while ``partial_fit`` will result in increasing the\n existing counter.\n\n average : bool or int, default=False\n When set to True, computes the averaged SGD weights accross all\n updates and stores the result in the ``coef_`` attribute. If set to\n an int greater than 1, averaging will begin once the total number of\n samples seen reaches `average`. So ``average=10`` will begin\n averaging after seeing 10 samples.\n\n Attributes\n ----------\n coef_ : ndarray of shape (n_features,)\n Weights assigned to the features.\n\n intercept_ : ndarray of shape (1,)\n The intercept term.\n\n n_iter_ : int\n The actual number of iterations before reaching the stopping criterion.\n\n t_ : int\n Number of weight updates performed during training.\n Same as ``(n_iter_ * n_samples)``.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.linear_model import SGDRegressor\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.preprocessing import StandardScaler\n >>> n_samples, n_features = 10, 5\n >>> rng = np.random.RandomState(0)\n >>> y = rng.randn(n_samples)\n >>> X = rng.randn(n_samples, n_features)\n >>> # Always scale the input. The most convenient way is to use a pipeline.\n >>> reg = make_pipeline(StandardScaler(),\n ... SGDRegressor(max_iter=1000, tol=1e-3))\n >>> reg.fit(X, y)\n Pipeline(steps=[('standardscaler', StandardScaler()),\n ('sgdregressor', SGDRegressor())])\n\n See Also\n --------\n Ridge, ElasticNet, Lasso, sklearn.svm.SVR\n\n \"\"\"\n\n def __init__(\n self,\n loss=\"squared_error\",\n *,\n penalty=\"l2\",\n alpha=0.0001,\n l1_ratio=0.15,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n epsilon=DEFAULT_EPSILON,\n random_state=None,\n learning_rate=\"invscaling\",\n eta0=0.01,\n power_t=0.25,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n warm_start=False,\n average=False,\n ):\n super().__init__(\n loss=loss,\n penalty=penalty,\n alpha=alpha,\n l1_ratio=l1_ratio,\n fit_intercept=fit_intercept,\n max_iter=max_iter,\n tol=tol,\n shuffle=shuffle,\n verbose=verbose,\n epsilon=epsilon,\n random_state=random_state,\n learning_rate=learning_rate,\n eta0=eta0,\n power_t=power_t,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n n_iter_no_change=n_iter_no_change,\n warm_start=warm_start,\n average=average,\n )\n\n def _more_tags(self):\n return {\n \"_xfail_checks\": {\n \"check_sample_weights_invariance\": (\n \"zero sample_weight is not equivalent to removing samples\"\n ),\n }\n }\n\n\nclass SGDOneClassSVM(BaseSGD, OutlierMixin):\n \"\"\"Solves linear One-Class SVM using Stochastic Gradient Descent.\n\n This implementation is meant to be used with a kernel approximation\n technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results\n similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by\n default.\n\n Read more in the :ref:`User Guide <sgd_online_one_class_svm>`.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n nu : float, optional\n The nu parameter of the One Class SVM: an upper bound on the\n fraction of training errors and a lower bound of the fraction of\n support vectors. Should be in the interval (0, 1]. By default 0.5\n will be taken.\n\n fit_intercept : bool\n Whether the intercept should be estimated or not. Defaults to True.\n\n max_iter : int, optional\n The maximum number of passes over the training data (aka epochs).\n It only impacts the behavior in the ``fit`` method, and not the\n `partial_fit`. Defaults to 1000.\n\n tol : float or None, optional\n The stopping criterion. If it is not None, the iterations will stop\n when (loss > previous_loss - tol). Defaults to 1e-3.\n\n shuffle : bool, optional\n Whether or not the training data should be shuffled after each epoch.\n Defaults to True.\n\n verbose : integer, optional\n The verbosity level\n\n random_state : int, RandomState instance or None, optional (default=None)\n The seed of the pseudo random number generator to use when shuffling\n the data. If int, random_state is the seed used by the random number\n generator; If RandomState instance, random_state is the random number\n generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n\n learning_rate : string, optional\n The learning rate schedule to use with `fit`. (If using `partial_fit`,\n learning rate must be controlled directly).\n\n 'constant':\n eta = eta0\n 'optimal': [default]\n eta = 1.0 / (alpha * (t + t0))\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n 'invscaling':\n eta = eta0 / pow(t, power_t)\n 'adaptive':\n eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n eta0 : double\n The initial learning rate for the 'constant', 'invscaling' or\n 'adaptive' schedules. The default value is 0.0 as eta0 is not used by\n the default schedule 'optimal'.\n\n power_t : double\n The exponent for inverse scaling learning rate [default 0.5].\n\n warm_start : bool, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n See :term:`the Glossary <warm_start>`.\n\n Repeatedly calling fit or partial_fit when warm_start is True can\n result in a different solution than when calling fit a single time\n because of the way the data is shuffled.\n If a dynamic learning rate is used, the learning rate is adapted\n depending on the number of samples already seen. Calling ``fit`` resets\n this counter, while ``partial_fit`` will result in increasing the\n existing counter.\n\n average : bool or int, optional\n When set to True, computes the averaged SGD weights and stores the\n result in the ``coef_`` attribute. If set to an int greater than 1,\n averaging will begin once the total number of samples seen reaches\n average. So ``average=10`` will begin averaging after seeing 10\n samples.\n\n Attributes\n ----------\n coef_ : array, shape (1, n_features)\n Weights assigned to the features.\n\n offset_ : array, shape (1,)\n Offset used to define the decision function from the raw scores.\n We have the relation: decision_function = score_samples - offset.\n\n n_iter_ : int\n The actual number of iterations to reach the stopping criterion.\n\n t_ : int\n Number of weight updates performed during training.\n Same as ``(n_iter_ * n_samples)``.\n\n loss_function_ : concrete ``LossFunction``\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import linear_model\n >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n >>> clf = linear_model.SGDOneClassSVM(random_state=42)\n >>> clf.fit(X)\n SGDOneClassSVM(random_state=42)\n\n >>> print(clf.predict([[4, 4]]))\n [1]\n\n See also\n --------\n sklearn.svm.OneClassSVM\n\n Notes\n -----\n This estimator has a linear complexity in the number of training samples\n and is thus better suited than the `sklearn.svm.OneClassSVM`\n implementation for datasets with a large number of training samples (say\n > 10,000).\n \"\"\"\n\n loss_functions = {\"hinge\": (Hinge, 1.0)}\n\n def __init__(\n self,\n nu=0.5,\n fit_intercept=True,\n max_iter=1000,\n tol=1e-3,\n shuffle=True,\n verbose=0,\n random_state=None,\n learning_rate=\"optimal\",\n eta0=0.0,\n power_t=0.5,\n warm_start=False,\n average=False,\n ):\n\n alpha = nu / 2\n self.nu = nu\n super(SGDOneClassSVM, self).__init__(\n loss=\"hinge\",\n penalty=\"l2\",\n alpha=alpha,\n C=1.0,\n l1_ratio=0,\n fit_intercept=fit_intercept,\n max_iter=max_iter,\n tol=tol,\n shuffle=shuffle,\n verbose=verbose,\n epsilon=DEFAULT_EPSILON,\n random_state=random_state,\n learning_rate=learning_rate,\n eta0=eta0,\n power_t=power_t,\n early_stopping=False,\n validation_fraction=0.1,\n n_iter_no_change=5,\n warm_start=warm_start,\n average=average,\n )\n\n def _validate_params(self, for_partial_fit=False):\n \"\"\"Validate input params.\"\"\"\n if not (0 < self.nu <= 1):\n raise ValueError(\"nu must be in (0, 1], got nu=%f\" % self.nu)\n\n super(SGDOneClassSVM, self)._validate_params(for_partial_fit=for_partial_fit)\n\n def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter):\n \"\"\"Uses SGD implementation with X and y=np.ones(n_samples).\"\"\"\n\n # The One-Class SVM uses the SGD implementation with\n # y=np.ones(n_samples).\n n_samples = X.shape[0]\n y = np.ones(n_samples, dtype=np.float64, order=\"C\")\n\n dataset, offset_decay = make_dataset(X, y, sample_weight)\n\n penalty_type = self._get_penalty_type(self.penalty)\n learning_rate_type = self._get_learning_rate_type(learning_rate)\n\n # early stopping is set to False for the One-Class SVM. thus\n # validation_mask and validation_score_cb will be set to values\n # associated to early_stopping=False in _make_validation_split and\n # _make_validation_score_cb respectively.\n validation_mask = self._make_validation_split(y)\n validation_score_cb = self._make_validation_score_cb(\n validation_mask, X, y, sample_weight\n )\n\n random_state = check_random_state(self.random_state)\n # numpy mtrand expects a C long which is a signed 32 bit integer under\n # Windows\n seed = random_state.randint(0, np.iinfo(np.int32).max)\n\n tol = self.tol if self.tol is not None else -np.inf\n\n one_class = 1\n # There are no class weights for the One-Class SVM and they are\n # therefore set to 1.\n pos_weight = 1\n neg_weight = 1\n\n if self.average:\n coef = self._standard_coef\n intercept = self._standard_intercept\n average_coef = self._average_coef\n average_intercept = self._average_intercept\n else:\n coef = self.coef_\n intercept = 1 - self.offset_\n average_coef = None # Not used\n average_intercept = [0] # Not used\n\n coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(\n coef,\n intercept[0],\n average_coef,\n average_intercept[0],\n self.loss_function_,\n penalty_type,\n alpha,\n C,\n self.l1_ratio,\n dataset,\n validation_mask,\n self.early_stopping,\n validation_score_cb,\n int(self.n_iter_no_change),\n max_iter,\n tol,\n int(self.fit_intercept),\n int(self.verbose),\n int(self.shuffle),\n seed,\n neg_weight,\n pos_weight,\n learning_rate_type,\n self.eta0,\n self.power_t,\n one_class,\n self.t_,\n offset_decay,\n self.average,\n )\n\n self.t_ += self.n_iter_ * n_samples\n\n if self.average > 0:\n\n self._average_intercept = np.atleast_1d(average_intercept)\n self._standard_intercept = np.atleast_1d(intercept)\n\n if self.average <= self.t_ - 1.0:\n # made enough updates for averaging to be taken into account\n self.coef_ = average_coef\n self.offset_ = 1 - np.atleast_1d(average_intercept)\n else:\n self.coef_ = coef\n self.offset_ = 1 - np.atleast_1d(intercept)\n\n else:\n self.offset_ = 1 - np.atleast_1d(intercept)\n\n def _partial_fit(\n self,\n X,\n alpha,\n C,\n loss,\n learning_rate,\n max_iter,\n sample_weight,\n coef_init,\n offset_init,\n ):\n first_call = getattr(self, \"coef_\", None) is None\n X = self._validate_data(\n X,\n None,\n accept_sparse=\"csr\",\n dtype=np.float64,\n order=\"C\",\n accept_large_sparse=False,\n reset=first_call,\n )\n\n n_features = X.shape[1]\n\n # Allocate datastructures from input arguments\n sample_weight = _check_sample_weight(sample_weight, X)\n\n # We use intercept = 1 - offset where intercept is the intercept of\n # the SGD implementation and offset is the offset of the One-Class SVM\n # optimization problem.\n if getattr(self, \"coef_\", None) is None or coef_init is not None:\n self._allocate_parameter_mem(1, n_features, coef_init, offset_init, 1)\n elif n_features != self.coef_.shape[-1]:\n raise ValueError(\n \"Number of features %d does not match previous data %d.\"\n % (n_features, self.coef_.shape[-1])\n )\n\n if self.average and getattr(self, \"_average_coef\", None) is None:\n self._average_coef = np.zeros(n_features, dtype=np.float64, order=\"C\")\n self._average_intercept = np.zeros(1, dtype=np.float64, order=\"C\")\n\n self.loss_function_ = self._get_loss_function(loss)\n if not hasattr(self, \"t_\"):\n self.t_ = 1.0\n\n # delegate to concrete training procedure\n self._fit_one_class(\n X,\n alpha=alpha,\n C=C,\n learning_rate=learning_rate,\n sample_weight=sample_weight,\n max_iter=max_iter,\n )\n\n return self\n\n def partial_fit(self, X, y=None, sample_weight=None):\n \"\"\"Fit linear One-Class SVM with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Subset of the training data.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n alpha = self.nu / 2\n self._validate_params(for_partial_fit=True)\n\n return self._partial_fit(\n X,\n alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n max_iter=1,\n sample_weight=sample_weight,\n coef_init=None,\n offset_init=None,\n )\n\n def _fit(\n self,\n X,\n alpha,\n C,\n loss,\n learning_rate,\n coef_init=None,\n offset_init=None,\n sample_weight=None,\n ):\n self._validate_params()\n\n if self.warm_start and hasattr(self, \"coef_\"):\n if coef_init is None:\n coef_init = self.coef_\n if offset_init is None:\n offset_init = self.offset_\n else:\n self.coef_ = None\n self.offset_ = None\n\n # Clear iteration count for multiple call to fit.\n self.t_ = 1.0\n\n self._partial_fit(\n X,\n alpha,\n C,\n loss,\n learning_rate,\n self.max_iter,\n sample_weight,\n coef_init,\n offset_init,\n )\n\n if (\n self.tol is not None\n and self.tol > -np.inf\n and self.n_iter_ == self.max_iter\n ):\n warnings.warn(\n \"Maximum number of iteration reached before \"\n \"convergence. Consider increasing max_iter to \"\n \"improve the fit.\",\n ConvergenceWarning,\n )\n\n return self\n\n def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):\n \"\"\"Fit linear One-Class SVM with Stochastic Gradient Descent.\n\n This solves an equivalent optimization problem of the\n One-Class SVM primal optimization problem and returns a weight vector\n w and an offset rho such that the decision function is given by\n <w, x> - rho.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n coef_init : array, shape (n_classes, n_features)\n The initial coefficients to warm-start the optimization.\n\n offset_init : array, shape (n_classes,)\n The initial offset to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed. These weights will\n be multiplied with class_weight (passed through the\n constructor) if class_weight is specified.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n alpha = self.nu / 2\n self._fit(\n X,\n alpha=alpha,\n C=1.0,\n loss=self.loss,\n learning_rate=self.learning_rate,\n coef_init=coef_init,\n offset_init=offset_init,\n sample_weight=sample_weight,\n )\n\n return self\n\n def decision_function(self, X):\n \"\"\"Signed distance to the separating hyperplane.\n\n Signed distance is positive for an inlier and negative for an\n outlier.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n dec : array-like, shape (n_samples,)\n Decision function values of the samples.\n \"\"\"\n\n check_is_fitted(self, \"coef_\")\n\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_\n\n return decisions.ravel()\n\n def score_samples(self, X):\n \"\"\"Raw scoring function of the samples.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n score_samples : array-like, shape (n_samples,)\n Unshiffted scoring function values of the samples.\n \"\"\"\n score_samples = self.decision_function(X) + self.offset_\n return score_samples\n\n def predict(self, X):\n \"\"\"Return labels (1 inlier, -1 outlier) of the samples.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Testing data.\n\n Returns\n -------\n y : array, shape (n_samples,)\n Labels of the samples.\n \"\"\"\n y = (self.decision_function(X) >= 0).astype(np.int32)\n y[y == 0] = -1 # for consistency with outlier detectors\n return y\n\n def _more_tags(self):\n return {\n \"_xfail_checks\": {\n \"check_sample_weights_invariance\": (\n \"zero sample_weight is not equivalent to removing samples\"\n )\n }\n }\n" ]
[ [ "numpy.unique", "numpy.asarray", "numpy.clip", "numpy.ones", "numpy.atleast_1d", "numpy.iinfo", "numpy.any", "numpy.array", "numpy.zeros" ] ]
alexmalins/actions-ci-test
[ "a83bfc42aecce58a3cef54db2aad46c3f7532b19" ]
[ "tests/test_somecode.py" ]
[ "\"\"\"Unit tests for functions exported from actionscicd package\"\"\"\r\n\r\nimport unittest\r\nimport numpy as np\r\nfrom actionscicd import add_arrays, load_datafile\r\n\r\n\r\nclass TestSomeCode(unittest.TestCase):\r\n \"\"\"Unit tests for functions in somecode.py\"\"\"\r\n\r\n def test_add_arrays(self) -> None:\r\n \"\"\"Test addition of two numpy arrays\"\"\"\r\n\r\n arr1 = np.array([1, 2])\r\n arr2 = np.array([3, 4])\r\n self.assertTrue((add_arrays(arr1, arr2) == np.array([4, 6])).all())\r\n\r\n def test_load_datafile(self) -> None:\r\n \"\"\"Test loading of people's names and ages from TSV file\"\"\"\r\n\r\n names = [\"Bob\", \"Bill\", \"Jenny\"]\r\n ages = [1, 2, 3]\r\n self.assertEqual(load_datafile(\"mogi_data.tsv\"), (names, ages))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n" ]
[ [ "numpy.array" ] ]
arthpatel573/google-research
[ "eee881ac0ca58299cf6540618a34fc6f6924d268" ]
[ "tft/libs/tft_model.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Temporal Fusion Transformer Model.\n\nContains the full TFT architecture and associated components. Defines functions\nfor training, evaluation and prediction using simple Pandas Dataframe inputs.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gc\nimport json\nimport os\nimport shutil\n\nimport data_formatters.base\nimport libs.utils as utils\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n# Layer definitions.\nconcat = tf.keras.backend.concatenate\nstack = tf.keras.backend.stack\nK = tf.keras.backend\nAdd = tf.keras.layers.Add\nLayerNorm = tf.keras.layers.LayerNormalization\nDense = tf.keras.layers.Dense\nMultiply = tf.keras.layers.Multiply\nDropout = tf.keras.layers.Dropout\nActivation = tf.keras.layers.Activation\nLambda = tf.keras.layers.Lambda\n\n# Default input types.\nInputTypes = data_formatters.base.InputTypes\n\n\n# Layer utility functions.\ndef linear_layer(size, activation=None, use_time_distributed=False, use_bias=True):\n \"\"\"Returns simple Keras linear layer.\n\n Args:\n size: Output size\n activation: Activation function to apply if required\n use_time_distributed: Whether to apply layer across time\n use_bias: Whether bias should be included in layer\n \"\"\"\n linear = tf.keras.layers.Dense(size, activation=activation, use_bias=use_bias)\n if use_time_distributed:\n linear = tf.keras.layers.TimeDistributed(linear)\n return linear\n\n\ndef apply_mlp(\n inputs,\n hidden_size,\n output_size,\n output_activation=None,\n hidden_activation=\"tanh\",\n use_time_distributed=False,\n):\n \"\"\"Applies simple feed-forward network to an input.\n\n Args:\n inputs: MLP inputs\n hidden_size: Hidden state size\n output_size: Output size of MLP\n output_activation: Activation function to apply on output\n hidden_activation: Activation function to apply on input\n use_time_distributed: Whether to apply across time\n\n Returns:\n Tensor for MLP outputs.\n \"\"\"\n if use_time_distributed:\n hidden = tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(hidden_size, activation=hidden_activation)\n )(inputs)\n return tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(output_size, activation=output_activation)\n )(hidden)\n else:\n hidden = tf.keras.layers.Dense(hidden_size, activation=hidden_activation)(\n inputs\n )\n return tf.keras.layers.Dense(output_size, activation=output_activation)(hidden)\n\n\ndef apply_gating_layer(\n x, hidden_layer_size, dropout_rate=None, use_time_distributed=True, activation=None\n):\n \"\"\"Applies a Gated Linear Unit (GLU) to an input.\n\n Args:\n x: Input to gating layer\n hidden_layer_size: Dimension of GLU\n dropout_rate: Dropout rate to apply if any\n use_time_distributed: Whether to apply across time\n activation: Activation function to apply to the linear feature transform if\n necessary\n\n Returns:\n Tuple of tensors for: (GLU output, gate)\n \"\"\"\n\n if dropout_rate is not None:\n x = tf.keras.layers.Dropout(dropout_rate)(x)\n\n if use_time_distributed:\n activation_layer = tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(hidden_layer_size, activation=activation)\n )(x)\n gated_layer = tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(hidden_layer_size, activation=\"sigmoid\")\n )(x)\n else:\n activation_layer = tf.keras.layers.Dense(\n hidden_layer_size, activation=activation\n )(x)\n gated_layer = tf.keras.layers.Dense(hidden_layer_size, activation=\"sigmoid\")(x)\n\n return tf.keras.layers.Multiply()([activation_layer, gated_layer]), gated_layer\n\n\ndef add_and_norm(x_list):\n \"\"\"Applies skip connection followed by layer normalisation.\n\n Args:\n x_list: List of inputs to sum for skip connection\n\n Returns:\n Tensor output from layer.\n \"\"\"\n tmp = Add()(x_list)\n tmp = LayerNorm()(tmp)\n return tmp\n\n\ndef gated_residual_network(\n x,\n hidden_layer_size,\n output_size=None,\n dropout_rate=None,\n use_time_distributed=True,\n additional_context=None,\n return_gate=False,\n):\n \"\"\"Applies the gated residual network (GRN) as defined in paper.\n\n Args:\n x: Network inputs\n hidden_layer_size: Internal state size\n output_size: Size of output layer\n dropout_rate: Dropout rate if dropout is applied\n use_time_distributed: Whether to apply network across time dimension\n additional_context: Additional context vector to use if relevant\n return_gate: Whether to return GLU gate for diagnostic purposes\n\n Returns:\n Tuple of tensors for: (GRN output, GLU gate)\n \"\"\"\n\n # Setup skip connection\n if output_size is None:\n output_size = hidden_layer_size\n skip = x\n else:\n linear = Dense(output_size)\n if use_time_distributed:\n linear = tf.keras.layers.TimeDistributed(linear)\n skip = linear(x)\n\n # Apply feedforward network\n hidden = linear_layer(\n hidden_layer_size, activation=None, use_time_distributed=use_time_distributed\n )(x)\n if additional_context is not None:\n hidden = hidden + linear_layer(\n hidden_layer_size,\n activation=None,\n use_time_distributed=use_time_distributed,\n use_bias=False,\n )(additional_context)\n hidden = tf.keras.layers.Activation(\"elu\")(hidden)\n hidden = linear_layer(\n hidden_layer_size, activation=None, use_time_distributed=use_time_distributed\n )(hidden)\n\n gating_layer, gate = apply_gating_layer(\n hidden,\n output_size,\n dropout_rate=dropout_rate,\n use_time_distributed=use_time_distributed,\n activation=None,\n )\n\n if return_gate:\n return add_and_norm([skip, gating_layer]), gate\n else:\n return add_and_norm([skip, gating_layer])\n\n\n# Attention Components.\ndef get_decoder_mask(self_attn_inputs):\n \"\"\"Returns causal mask to apply for self-attention layer.\n\n Args:\n self_attn_inputs: Inputs to self attention layer to determine mask shape\n \"\"\"\n len_s = tf.shape(self_attn_inputs)[1]\n bs = tf.shape(self_attn_inputs)[:1]\n mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)\n return mask\n\n\nclass ScaledDotProductAttention:\n \"\"\"Defines scaled dot product attention layer.\n\n Attributes:\n dropout: Dropout rate to use\n activation: Normalisation function for scaled dot product attention (e.g.\n softmax by default)\n \"\"\"\n\n def __init__(self, attn_dropout=0.0):\n self.dropout = Dropout(attn_dropout)\n self.activation = Activation(\"softmax\")\n\n def __call__(self, q, k, v, mask):\n \"\"\"Applies scaled dot product attention.\n\n Args:\n q: Queries\n k: Keys\n v: Values\n mask: Masking if required -- sets softmax to very large value\n\n Returns:\n Tuple of (layer outputs, attention weights)\n \"\"\"\n temper = tf.sqrt(tf.cast(tf.shape(k)[-1], dtype=\"float32\"))\n attn = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[2, 2]) / temper)(\n [q, k]\n ) # shape=(batch, q, k)\n if mask is not None:\n mmask = Lambda(lambda x: (-1e9) * (1.0 - K.cast(x, \"float32\")))(\n mask\n ) # setting to infinity\n attn = Add()([attn, mmask])\n attn = self.activation(attn)\n attn = self.dropout(attn)\n output = Lambda(lambda x: K.batch_dot(x[0], x[1]))([attn, v])\n return output, attn\n\n\nclass InterpretableMultiHeadAttention:\n \"\"\"Defines interpretable multi-head attention layer.\n\n Attributes:\n n_head: Number of heads\n d_k: Key/query dimensionality per head\n d_v: Value dimensionality\n dropout: Dropout rate to apply\n qs_layers: List of queries across heads\n ks_layers: List of keys across heads\n vs_layers: List of values across heads\n attention: Scaled dot product attention layer\n w_o: Output weight matrix to project internal state to the original TFT\n state size\n \"\"\"\n\n def __init__(self, n_head, d_model, dropout):\n \"\"\"Initialises layer.\n\n Args:\n n_head: Number of heads\n d_model: TFT state dimensionality\n dropout: Dropout discard rate\n \"\"\"\n\n self.n_head = n_head\n self.d_k = self.d_v = d_k = d_v = d_model // n_head\n self.dropout = dropout\n\n self.qs_layers = []\n self.ks_layers = []\n self.vs_layers = []\n\n # Use same value layer to facilitate interp\n vs_layer = Dense(d_v, use_bias=False)\n\n for _ in range(n_head):\n self.qs_layers.append(Dense(d_k, use_bias=False))\n self.ks_layers.append(Dense(d_k, use_bias=False))\n self.vs_layers.append(vs_layer) # use same vs_layer\n\n self.attention = ScaledDotProductAttention()\n self.w_o = Dense(d_model, use_bias=False)\n\n def __call__(self, q, k, v, mask=None):\n \"\"\"Applies interpretable multihead attention.\n\n Using T to denote the number of time steps fed into the transformer.\n\n Args:\n q: Query tensor of shape=(?, T, d_model)\n k: Key of shape=(?, T, d_model)\n v: Values of shape=(?, T, d_model)\n mask: Masking if required with shape=(?, T, T)\n\n Returns:\n Tuple of (layer outputs, attention weights)\n \"\"\"\n n_head = self.n_head\n\n heads = []\n attns = []\n for i in range(n_head):\n qs = self.qs_layers[i](q)\n ks = self.ks_layers[i](k)\n vs = self.vs_layers[i](v)\n head, attn = self.attention(qs, ks, vs, mask)\n\n head_dropout = Dropout(self.dropout)(head)\n heads.append(head_dropout)\n attns.append(attn)\n head = K.stack(heads) if n_head > 1 else heads[0]\n attn = K.stack(attns)\n\n outputs = K.mean(head, axis=0) if n_head > 1 else head\n outputs = self.w_o(outputs)\n outputs = Dropout(self.dropout)(outputs) # output dropout\n\n return outputs, attn\n\n\nclass TFTDataCache(object):\n \"\"\"Caches data for the TFT.\"\"\"\n\n _data_cache = {}\n\n @classmethod\n def update(cls, data, key):\n \"\"\"Updates cached data.\n\n Args:\n data: Source to update\n key: Key to dictionary location\n \"\"\"\n cls._data_cache[key] = data\n\n @classmethod\n def get(cls, key):\n \"\"\"Returns data stored at key location.\"\"\"\n return cls._data_cache[key].copy()\n\n @classmethod\n def contains(cls, key):\n \"\"\"Retuns boolean indicating whether key is present in cache.\"\"\"\n\n return key in cls._data_cache\n\n\n# TFT model definitions.\nclass TemporalFusionTransformer(object):\n \"\"\"Defines Temporal Fusion Transformer.\n\n Attributes:\n name: Name of model\n time_steps: Total number of input time steps per forecast date (i.e. Width\n of Temporal fusion decoder N)\n input_size: Total number of inputs\n output_size: Total number of outputs\n category_counts: Number of categories per categorical variable\n n_multiprocessing_workers: Number of workers to use for parallel\n computations\n column_definition: List of tuples of (string, DataType, InputType) that\n define each column\n quantiles: Quantiles to forecast for TFT\n use_cudnn: Whether to use Keras CuDNNLSTM or standard LSTM layers\n hidden_layer_size: Internal state size of TFT\n dropout_rate: Dropout discard rate\n max_gradient_norm: Maximum norm for gradient clipping\n learning_rate: Initial learning rate of ADAM optimizer\n minibatch_size: Size of minibatches for training\n num_epochs: Maximum number of epochs for training\n early_stopping_patience: Maximum number of iterations of non-improvement\n before early stopping kicks in\n num_encoder_steps: Size of LSTM encoder -- i.e. number of past time steps\n before forecast date to use\n num_stacks: Number of self-attention layers to apply (default is 1 for basic\n TFT)\n num_heads: Number of heads for interpretable mulit-head attention\n model: Keras model for TFT\n \"\"\"\n\n def __init__(self, raw_params, use_cudnn=False):\n \"\"\"Builds TFT from parameters.\n\n Args:\n raw_params: Parameters to define TFT\n use_cudnn: Whether to use CUDNN GPU optimised LSTM\n \"\"\"\n\n self.name = self.__class__.__name__\n\n params = dict(raw_params) # copy locally\n\n # Data parameters\n self.time_steps = int(params[\"total_time_steps\"])\n self.input_size = int(params[\"input_size\"])\n self.output_size = int(params[\"output_size\"])\n self.category_counts = json.loads(str(params[\"category_counts\"]))\n self.n_multiprocessing_workers = int(params[\"multiprocessing_workers\"])\n\n # Relevant indices for TFT\n self._input_obs_loc = json.loads(str(params[\"input_obs_loc\"]))\n self._static_input_loc = json.loads(str(params[\"static_input_loc\"]))\n self._known_regular_input_idx = json.loads(str(params[\"known_regular_inputs\"]))\n self._known_categorical_input_idx = json.loads(\n str(params[\"known_categorical_inputs\"])\n )\n\n self.column_definition = params[\"column_definition\"]\n\n # Network params\n self.quantiles = [0.1, 0.5, 0.9]\n self.use_cudnn = use_cudnn # Whether to use GPU optimised LSTM\n self.hidden_layer_size = int(params[\"hidden_layer_size\"])\n self.dropout_rate = float(params[\"dropout_rate\"])\n self.max_gradient_norm = float(params[\"max_gradient_norm\"])\n self.learning_rate = float(params[\"learning_rate\"])\n self.minibatch_size = int(params[\"minibatch_size\"])\n self.num_epochs = int(params[\"num_epochs\"])\n self.early_stopping_patience = int(params[\"early_stopping_patience\"])\n\n self.num_encoder_steps = int(params[\"num_encoder_steps\"])\n self.num_stacks = int(params[\"stack_size\"])\n self.num_heads = int(params[\"num_heads\"])\n\n # Serialisation options\n self._temp_folder = os.path.join(params[\"model_folder\"], \"tmp\")\n self.reset_temp_folder()\n\n # Extra components to store Tensorflow nodes for attention computations\n self._input_placeholder = None\n self._attention_components = None\n self._prediction_parts = None\n\n print(\"*** {} params ***\".format(self.name))\n for k in params:\n print(\"# {} = {}\".format(k, params[k]))\n\n # Build model\n self.model = self.build_model()\n\n def get_tft_embeddings(self, all_inputs):\n \"\"\"Transforms raw inputs to embeddings.\n\n Applies linear transformation onto continuous variables and uses embeddings\n for categorical variables.\n\n Args:\n all_inputs: Inputs to transform\n\n Returns:\n Tensors for transformed inputs.\n \"\"\"\n\n time_steps = self.time_steps\n\n # Sanity checks\n for i in self._known_regular_input_idx:\n if i in self._input_obs_loc:\n raise ValueError(\"Observation cannot be known a priori!\")\n for i in self._input_obs_loc:\n if i in self._static_input_loc:\n raise ValueError(\"Observation cannot be static!\")\n\n if all_inputs.get_shape().as_list()[-1] != self.input_size:\n raise ValueError(\n \"Illegal number of inputs! Inputs observed={}, expected={}\".format(\n all_inputs.get_shape().as_list()[-1], self.input_size\n )\n )\n\n num_categorical_variables = len(self.category_counts)\n num_regular_variables = self.input_size - num_categorical_variables\n\n embedding_sizes = [\n self.hidden_layer_size for i, size in enumerate(self.category_counts)\n ]\n\n embeddings = []\n for i in range(num_categorical_variables):\n\n embedding = tf.keras.Sequential(\n [\n tf.keras.layers.InputLayer([time_steps]),\n tf.keras.layers.Embedding(\n self.category_counts[i],\n embedding_sizes[i],\n input_length=time_steps,\n dtype=tf.float32,\n ),\n ]\n )\n embeddings.append(embedding)\n\n regular_inputs, categorical_inputs = (\n all_inputs[:, :, :num_regular_variables],\n all_inputs[:, :, num_regular_variables:],\n )\n\n embedded_inputs = [\n embeddings[i](categorical_inputs[Ellipsis, i])\n for i in range(num_categorical_variables)\n ]\n\n # Static inputs\n if self._static_input_loc:\n static_inputs = [\n tf.keras.layers.Dense(self.hidden_layer_size)(\n regular_inputs[:, 0, i : i + 1]\n )\n for i in range(num_regular_variables)\n if i in self._static_input_loc\n ] + [\n embedded_inputs[i][:, 0, :]\n for i in range(num_categorical_variables)\n if i + num_regular_variables in self._static_input_loc\n ]\n static_inputs = tf.keras.backend.stack(static_inputs, axis=1)\n\n else:\n static_inputs = None\n\n def convert_real_to_embedding(x):\n \"\"\"Applies linear transformation for time-varying inputs.\"\"\"\n return tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(self.hidden_layer_size)\n )(x)\n\n # Targets\n obs_inputs = tf.keras.backend.stack(\n [\n convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1])\n for i in self._input_obs_loc\n ],\n axis=-1,\n )\n\n # Observed (a prioir unknown) inputs\n wired_embeddings = []\n for i in range(num_categorical_variables):\n if (\n i not in self._known_categorical_input_idx\n and i not in self._input_obs_loc\n ):\n e = embeddings[i](categorical_inputs[:, :, i])\n wired_embeddings.append(e)\n\n unknown_inputs = []\n for i in range(regular_inputs.shape[-1]):\n if i not in self._known_regular_input_idx and i not in self._input_obs_loc:\n e = convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1])\n unknown_inputs.append(e)\n\n if unknown_inputs + wired_embeddings:\n unknown_inputs = tf.keras.backend.stack(\n unknown_inputs + wired_embeddings, axis=-1\n )\n else:\n unknown_inputs = None\n\n # A priori known inputs\n known_regular_inputs = [\n convert_real_to_embedding(regular_inputs[Ellipsis, i : i + 1])\n for i in self._known_regular_input_idx\n if i not in self._static_input_loc\n ]\n known_categorical_inputs = [\n embedded_inputs[i]\n for i in self._known_categorical_input_idx\n if i + num_regular_variables not in self._static_input_loc\n ]\n\n known_combined_layer = tf.keras.backend.stack(\n known_regular_inputs + known_categorical_inputs, axis=-1\n )\n\n return unknown_inputs, known_combined_layer, obs_inputs, static_inputs\n\n def _get_single_col_by_type(self, input_type):\n \"\"\"Returns name of single column for input type.\"\"\"\n\n return utils.get_single_col_by_input_type(input_type, self.column_definition)\n\n def training_data_cached(self):\n \"\"\"Returns boolean indicating if training data has been cached.\"\"\"\n\n return TFTDataCache.contains(\"train\") and TFTDataCache.contains(\"valid\")\n\n def cache_batched_data(self, data, cache_key, num_samples=-1):\n \"\"\"Batches and caches data once for using during training.\n\n Args:\n data: Data to batch and cache\n cache_key: Key used for cache\n num_samples: Maximum number of samples to extract (-1 to use all data)\n \"\"\"\n\n if num_samples > 0:\n TFTDataCache.update(\n self._batch_sampled_data(data, max_samples=num_samples), cache_key\n )\n else:\n TFTDataCache.update(self._batch_data(data), cache_key)\n\n print('Cached data \"{}\" updated'.format(cache_key))\n\n def _batch_sampled_data(self, data, max_samples):\n \"\"\"Samples segments into a compatible format.\n\n Args:\n data: Sources data to sample and batch\n max_samples: Maximum number of samples in batch\n\n Returns:\n Dictionary of batched data with the maximum samples specified.\n \"\"\"\n\n if max_samples < 1:\n raise ValueError(\n \"Illegal number of samples specified! samples={}\".format(max_samples)\n )\n\n id_col = self._get_single_col_by_type(InputTypes.ID)\n time_col = self._get_single_col_by_type(InputTypes.TIME)\n\n data.sort_values(by=[id_col, time_col], inplace=True)\n\n print(\"Getting valid sampling locations.\")\n valid_sampling_locations = []\n split_data_map = {}\n for identifier, df in data.groupby(id_col):\n print(\"Getting locations for {}\".format(identifier))\n num_entries = len(df)\n if num_entries >= self.time_steps:\n valid_sampling_locations += [\n (identifier, self.time_steps + i)\n for i in range(num_entries - self.time_steps + 1)\n ]\n split_data_map[identifier] = df\n\n inputs = np.zeros((max_samples, self.time_steps, self.input_size))\n outputs = np.zeros((max_samples, self.time_steps, self.output_size))\n time = np.empty((max_samples, self.time_steps, 1), dtype=object)\n identifiers = np.empty((max_samples, self.time_steps, 1), dtype=object)\n\n if max_samples > 0 and len(valid_sampling_locations) > max_samples:\n print(\"Extracting {} samples...\".format(max_samples))\n ranges = [\n valid_sampling_locations[i]\n for i in np.random.choice(\n len(valid_sampling_locations), max_samples, replace=False\n )\n ]\n else:\n print(\n \"Max samples={} exceeds # available segments={}\".format(\n max_samples, len(valid_sampling_locations)\n )\n )\n ranges = valid_sampling_locations\n\n id_col = self._get_single_col_by_type(InputTypes.ID)\n time_col = self._get_single_col_by_type(InputTypes.TIME)\n target_col = self._get_single_col_by_type(InputTypes.TARGET)\n input_cols = [\n tup[0]\n for tup in self.column_definition\n if tup[2] not in {InputTypes.ID, InputTypes.TIME}\n ]\n\n for i, tup in enumerate(ranges):\n if ((i + 1) % 1000) == 0:\n print(i + 1, \"of\", max_samples, \"samples done...\")\n identifier, start_idx = tup\n sliced = split_data_map[identifier].iloc[\n start_idx - self.time_steps : start_idx\n ]\n inputs[i, :, :] = sliced[input_cols]\n outputs[i, :, :] = sliced[[target_col]]\n time[i, :, 0] = sliced[time_col]\n identifiers[i, :, 0] = sliced[id_col]\n\n sampled_data = {\n \"inputs\": inputs,\n \"outputs\": outputs[:, self.num_encoder_steps :, :],\n \"active_entries\": np.ones_like(outputs[:, self.num_encoder_steps :, :]),\n \"time\": time,\n \"identifier\": identifiers,\n }\n\n return sampled_data\n\n def _batch_data(self, data):\n \"\"\"Batches data for training.\n\n Converts raw dataframe from a 2-D tabular format to a batched 3-D array\n to feed into Keras model.\n\n Args:\n data: DataFrame to batch\n\n Returns:\n Batched Numpy array with shape=(?, self.time_steps, self.input_size)\n \"\"\"\n\n # Functions.\n def _batch_single_entity(input_data):\n time_steps = len(input_data)\n lags = self.time_steps\n x = input_data.values\n if time_steps >= lags:\n return np.stack(\n [x[i : time_steps - (lags - 1) + i, :] for i in range(lags)], axis=1\n )\n\n else:\n return None\n\n id_col = self._get_single_col_by_type(InputTypes.ID)\n time_col = self._get_single_col_by_type(InputTypes.TIME)\n target_col = self._get_single_col_by_type(InputTypes.TARGET)\n input_cols = [\n tup[0]\n for tup in self.column_definition\n if tup[2] not in {InputTypes.ID, InputTypes.TIME}\n ]\n\n data_map = {}\n for _, sliced in data.groupby(id_col):\n\n col_mappings = {\n \"identifier\": [id_col],\n \"time\": [time_col],\n \"outputs\": [target_col],\n \"inputs\": input_cols,\n }\n\n for k in col_mappings:\n cols = col_mappings[k]\n arr = _batch_single_entity(sliced[cols].copy())\n\n if k not in data_map:\n data_map[k] = [arr]\n else:\n data_map[k].append(arr)\n\n # Combine all data\n for k in data_map:\n data_map[k] = np.concatenate(data_map[k], axis=0)\n\n # Shorten target so we only get decoder steps\n data_map[\"outputs\"] = data_map[\"outputs\"][:, self.num_encoder_steps :, :]\n\n active_entries = np.ones_like(data_map[\"outputs\"])\n if \"active_entries\" not in data_map:\n data_map[\"active_entries\"] = active_entries\n else:\n data_map[\"active_entries\"].append(active_entries)\n\n return data_map\n\n def _get_active_locations(self, x):\n \"\"\"Formats sample weights for Keras training.\"\"\"\n return (np.sum(x, axis=-1) > 0.0) * 1.0\n\n def _build_base_graph(self):\n \"\"\"Returns graph defining layers of the TFT.\"\"\"\n\n # Size definitions.\n time_steps = self.time_steps\n combined_input_size = self.input_size\n encoder_steps = self.num_encoder_steps\n\n # Inputs.\n all_inputs = tf.keras.layers.Input(shape=(time_steps, combined_input_size,))\n\n (\n unknown_inputs,\n known_combined_layer,\n obs_inputs,\n static_inputs,\n ) = self.get_tft_embeddings(all_inputs)\n\n # Isolate known and observed historical inputs.\n if unknown_inputs is not None:\n historical_inputs = concat(\n [\n unknown_inputs[:, :encoder_steps, :],\n known_combined_layer[:, :encoder_steps, :],\n obs_inputs[:, :encoder_steps, :],\n ],\n axis=-1,\n )\n else:\n historical_inputs = concat(\n [\n known_combined_layer[:, :encoder_steps, :],\n obs_inputs[:, :encoder_steps, :],\n ],\n axis=-1,\n )\n\n # Isolate only known future inputs.\n future_inputs = known_combined_layer[:, encoder_steps:, :]\n\n def static_combine_and_mask(embedding):\n \"\"\"Applies variable selection network to static inputs.\n\n Args:\n embedding: Transformed static inputs\n\n Returns:\n Tensor output for variable selection network\n \"\"\"\n\n # Add temporal features\n _, num_static, _ = embedding.get_shape().as_list()\n\n flatten = tf.keras.layers.Flatten()(embedding)\n\n # Nonlinear transformation with gated residual network.\n mlp_outputs = gated_residual_network(\n flatten,\n self.hidden_layer_size,\n output_size=num_static,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n additional_context=None,\n )\n\n sparse_weights = tf.keras.layers.Activation(\"softmax\")(mlp_outputs)\n sparse_weights = K.expand_dims(sparse_weights, axis=-1)\n\n trans_emb_list = []\n for i in range(num_static):\n e = gated_residual_network(\n embedding[:, i : i + 1, :],\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n )\n trans_emb_list.append(e)\n\n transformed_embedding = concat(trans_emb_list, axis=1)\n\n combined = tf.keras.layers.Multiply()(\n [sparse_weights, transformed_embedding]\n )\n\n static_vec = K.sum(combined, axis=1)\n\n return static_vec, sparse_weights\n\n static_encoder, static_weights = static_combine_and_mask(static_inputs)\n\n static_context_variable_selection = gated_residual_network(\n static_encoder,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n )\n static_context_enrichment = gated_residual_network(\n static_encoder,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n )\n static_context_state_h = gated_residual_network(\n static_encoder,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n )\n static_context_state_c = gated_residual_network(\n static_encoder,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=False,\n )\n\n def lstm_combine_and_mask(embedding):\n \"\"\"Apply temporal variable selection networks.\n\n Args:\n embedding: Transformed inputs.\n\n Returns:\n Processed tensor outputs.\n \"\"\"\n\n # Add temporal features\n _, time_steps, embedding_dim, num_inputs = embedding.get_shape().as_list()\n\n flatten = K.reshape(embedding, [-1, time_steps, embedding_dim * num_inputs])\n\n expanded_static_context = K.expand_dims(\n static_context_variable_selection, axis=1\n )\n\n # Variable selection weights\n mlp_outputs, static_gate = gated_residual_network(\n flatten,\n self.hidden_layer_size,\n output_size=num_inputs,\n dropout_rate=self.dropout_rate,\n use_time_distributed=True,\n additional_context=expanded_static_context,\n return_gate=True,\n )\n\n sparse_weights = tf.keras.layers.Activation(\"softmax\")(mlp_outputs)\n sparse_weights = tf.expand_dims(sparse_weights, axis=2)\n\n # Non-linear Processing & weight application\n trans_emb_list = []\n for i in range(num_inputs):\n grn_output = gated_residual_network(\n embedding[Ellipsis, i],\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=True,\n )\n trans_emb_list.append(grn_output)\n\n transformed_embedding = stack(trans_emb_list, axis=-1)\n\n combined = tf.keras.layers.Multiply()(\n [sparse_weights, transformed_embedding]\n )\n temporal_ctx = K.sum(combined, axis=-1)\n\n return temporal_ctx, sparse_weights, static_gate\n\n historical_features, historical_flags, _ = lstm_combine_and_mask(\n historical_inputs\n )\n future_features, future_flags, _ = lstm_combine_and_mask(future_inputs)\n\n # LSTM layer\n def get_lstm(return_state):\n \"\"\"Returns LSTM cell initialized with default parameters.\"\"\"\n if self.use_cudnn:\n lstm = tf.keras.layers.CuDNNLSTM(\n self.hidden_layer_size,\n return_sequences=True,\n return_state=return_state,\n stateful=False,\n )\n else:\n lstm = tf.keras.layers.LSTM(\n self.hidden_layer_size,\n return_sequences=True,\n return_state=return_state,\n stateful=False,\n # Additional params to ensure LSTM matches CuDNN, See TF 2.0 :\n # (https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM)\n activation=\"tanh\",\n recurrent_activation=\"sigmoid\",\n recurrent_dropout=0,\n unroll=False,\n use_bias=True,\n )\n return lstm\n\n history_lstm, state_h, state_c = get_lstm(return_state=True)(\n historical_features,\n initial_state=[static_context_state_h, static_context_state_c],\n )\n\n future_lstm = get_lstm(return_state=False)(\n future_features, initial_state=[state_h, state_c]\n )\n\n lstm_layer = concat([history_lstm, future_lstm], axis=1)\n\n # Apply gated skip connection\n input_embeddings = concat([historical_features, future_features], axis=1)\n\n lstm_layer, _ = apply_gating_layer(\n lstm_layer, self.hidden_layer_size, self.dropout_rate, activation=None\n )\n temporal_feature_layer = add_and_norm([lstm_layer, input_embeddings])\n\n # Static enrichment layers\n expanded_static_context = K.expand_dims(static_context_enrichment, axis=1)\n enriched, _ = gated_residual_network(\n temporal_feature_layer,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=True,\n additional_context=expanded_static_context,\n return_gate=True,\n )\n\n # Decoder self attention\n self_attn_layer = InterpretableMultiHeadAttention(\n self.num_heads, self.hidden_layer_size, dropout=self.dropout_rate\n )\n\n mask = get_decoder_mask(enriched)\n x, self_att = self_attn_layer(enriched, enriched, enriched, mask=mask)\n\n x, _ = apply_gating_layer(\n x, self.hidden_layer_size, dropout_rate=self.dropout_rate, activation=None\n )\n x = add_and_norm([x, enriched])\n\n # Nonlinear processing on outputs\n decoder = gated_residual_network(\n x,\n self.hidden_layer_size,\n dropout_rate=self.dropout_rate,\n use_time_distributed=True,\n )\n\n # Final skip connection\n decoder, _ = apply_gating_layer(\n decoder, self.hidden_layer_size, activation=None\n )\n transformer_layer = add_and_norm([decoder, temporal_feature_layer])\n\n # Attention components for explainability\n attention_components = {\n # Temporal attention weights\n \"decoder_self_attn\": self_att,\n # Static variable selection weights\n \"static_flags\": static_weights[Ellipsis, 0],\n # Variable selection weights of past inputs\n \"historical_flags\": historical_flags[Ellipsis, 0, :],\n # Variable selection weights of future inputs\n \"future_flags\": future_flags[Ellipsis, 0, :],\n }\n\n return transformer_layer, all_inputs, attention_components\n\n def build_model(self):\n \"\"\"Build model and defines training losses.\n\n Returns:\n Fully defined Keras model.\n \"\"\"\n\n with tf.variable_scope(self.name):\n\n (\n transformer_layer,\n all_inputs,\n attention_components,\n ) = self._build_base_graph()\n\n outputs = tf.keras.layers.TimeDistributed(\n tf.keras.layers.Dense(self.output_size * len(self.quantiles))\n )(transformer_layer[Ellipsis, self.num_encoder_steps :, :])\n\n self._attention_components = attention_components\n\n adam = tf.keras.optimizers.Adam(\n lr=self.learning_rate, clipnorm=self.max_gradient_norm\n )\n\n model = tf.keras.Model(inputs=all_inputs, outputs=outputs)\n\n print(model.summary())\n\n valid_quantiles = self.quantiles\n output_size = self.output_size\n\n class QuantileLossCalculator(object):\n \"\"\"Computes the combined quantile loss for prespecified quantiles.\n\n Attributes:\n quantiles: Quantiles to compute losses\n \"\"\"\n\n def __init__(self, quantiles):\n \"\"\"Initializes computer with quantiles for loss calculations.\n\n Args:\n quantiles: Quantiles to use for computations.\n \"\"\"\n self.quantiles = quantiles\n\n def quantile_loss(self, a, b):\n \"\"\"Returns quantile loss for specified quantiles.\n\n Args:\n a: Targets\n b: Predictions\n \"\"\"\n quantiles_used = set(self.quantiles)\n\n loss = 0.0\n for i, quantile in enumerate(valid_quantiles):\n if quantile in quantiles_used:\n loss += utils.tensorflow_quantile_loss(\n a[Ellipsis, output_size * i : output_size * (i + 1)],\n b[Ellipsis, output_size * i : output_size * (i + 1)],\n quantile,\n )\n return loss\n\n quantile_loss = QuantileLossCalculator(valid_quantiles).quantile_loss\n\n model.compile(\n loss=quantile_loss, optimizer=adam, sample_weight_mode=\"temporal\"\n )\n\n self._input_placeholder = all_inputs\n\n return model\n\n def fit(self, train_df=None, valid_df=None):\n \"\"\"Fits deep neural network for given training and validation data.\n\n Args:\n train_df: DataFrame for training data\n valid_df: DataFrame for validation data\n \"\"\"\n\n print(\"*** Fitting {} ***\".format(self.name))\n\n # Add relevant callbacks\n callbacks = [\n tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=self.early_stopping_patience,\n min_delta=1e-4,\n ),\n tf.keras.callbacks.ModelCheckpoint(\n filepath=self.get_keras_saved_path(self._temp_folder),\n monitor=\"val_loss\",\n save_best_only=True,\n save_weights_only=True,\n ),\n tf.keras.callbacks.TerminateOnNaN(),\n ]\n\n print(\"Getting batched_data\")\n if train_df is None:\n print(\"Using cached training data\")\n train_data = TFTDataCache.get(\"train\")\n else:\n train_data = self._batch_data(valid_df)\n\n if valid_df is None:\n print(\"Using cached validation data\")\n valid_data = TFTDataCache.get(\"valid\")\n else:\n valid_data = self._batch_data(valid_df)\n\n print(\"Using keras standard fit\")\n\n def _unpack(data):\n return (\n data[\"inputs\"],\n data[\"outputs\"],\n self._get_active_locations(data[\"active_entries\"]),\n )\n\n # Unpack without sample weights\n data, labels, active_flags = _unpack(train_data)\n val_data, val_labels, val_flags = _unpack(valid_data)\n\n all_callbacks = callbacks\n\n self.model.fit(\n x=data,\n y=np.concatenate([labels, labels, labels], axis=-1),\n sample_weight=active_flags,\n epochs=self.num_epochs,\n batch_size=self.minibatch_size,\n validation_data=(\n val_data,\n np.concatenate([val_labels, val_labels, val_labels], axis=-1),\n val_flags,\n ),\n callbacks=all_callbacks,\n shuffle=True,\n use_multiprocessing=True,\n workers=self.n_multiprocessing_workers,\n )\n\n # Load best checkpoint again\n tmp_checkpont = self.get_keras_saved_path(self._temp_folder)\n if os.path.exists(tmp_checkpont):\n self.load(self._temp_folder, use_keras_loadings=True)\n\n else:\n print(\"Cannot load from {}, skipping ...\".format(self._temp_folder))\n\n def evaluate(self, data=None, eval_metric=\"loss\"):\n \"\"\"Applies evaluation metric to the training data.\n\n Args:\n data: Dataframe for evaluation\n eval_metric: Evaluation metic to return, based on model definition.\n\n Returns:\n Computed evaluation loss.\n \"\"\"\n\n if data is None:\n print(\"Using cached validation data\")\n raw_data = TFTDataCache.get(\"valid\")\n else:\n raw_data = self._batch_data(data)\n\n inputs = raw_data[\"inputs\"]\n outputs = raw_data[\"outputs\"]\n active_entries = self._get_active_locations(raw_data[\"active_entries\"])\n\n metric_values = self.model.evaluate(\n x=inputs,\n y=np.concatenate([outputs, outputs, outputs], axis=-1),\n sample_weight=active_entries,\n workers=16,\n use_multiprocessing=True,\n )\n\n metrics = pd.Series(metric_values, self.model.metrics_names)\n\n return metrics[eval_metric]\n\n def predict(self, df, return_targets=False):\n \"\"\"Computes predictions for a given input dataset.\n\n Args:\n df: Input dataframe\n return_targets: Whether to also return outputs aligned with predictions to\n faciliate evaluation\n\n Returns:\n Input dataframe or tuple of (input dataframe, algined output dataframe).\n \"\"\"\n\n data = self._batch_data(df)\n\n inputs = data[\"inputs\"]\n time = data[\"time\"]\n identifier = data[\"identifier\"]\n outputs = data[\"outputs\"]\n\n combined = self.model.predict(\n inputs, workers=16, use_multiprocessing=True, batch_size=self.minibatch_size\n )\n\n # Format output_csv\n if self.output_size != 1:\n raise NotImplementedError(\"Current version only supports 1D targets!\")\n\n def format_outputs(prediction):\n \"\"\"Returns formatted dataframes for prediction.\"\"\"\n\n flat_prediction = pd.DataFrame(\n prediction[:, :, 0],\n columns=[\n \"t+{}\".format(i)\n for i in range(self.time_steps - self.num_encoder_steps)\n ],\n )\n cols = list(flat_prediction.columns)\n flat_prediction[\"forecast_time\"] = time[:, self.num_encoder_steps - 1, 0]\n flat_prediction[\"identifier\"] = identifier[:, 0, 0]\n\n # Arrange in order\n return flat_prediction[[\"forecast_time\", \"identifier\"] + cols]\n\n # Extract predictions for each quantile into different entries\n process_map = {\n \"p{}\".format(int(q * 100)): combined[\n Ellipsis, i * self.output_size : (i + 1) * self.output_size\n ]\n for i, q in enumerate(self.quantiles)\n }\n\n if return_targets:\n # Add targets if relevant\n process_map[\"targets\"] = outputs\n\n return {k: format_outputs(process_map[k]) for k in process_map}\n\n def get_attention(self, df):\n \"\"\"Computes TFT attention weights for a given dataset.\n\n Args:\n df: Input dataframe\n\n Returns:\n Dictionary of numpy arrays for temporal attention weights and variable\n selection weights, along with their identifiers and time indices\n \"\"\"\n\n data = self._batch_data(df)\n inputs = data[\"inputs\"]\n identifiers = data[\"identifier\"]\n time = data[\"time\"]\n\n def get_batch_attention_weights(input_batch):\n \"\"\"Returns weights for a given minibatch of data.\"\"\"\n input_placeholder = self._input_placeholder\n attention_weights = {}\n for k in self._attention_components:\n attention_weight = tf.keras.backend.get_session().run(\n self._attention_components[k],\n {input_placeholder: input_batch.astype(np.float32)},\n )\n attention_weights[k] = attention_weight\n return attention_weights\n\n # Compute number of batches\n batch_size = self.minibatch_size\n n = inputs.shape[0]\n num_batches = n // batch_size\n if n - (num_batches * batch_size) > 0:\n num_batches += 1\n\n # Split up inputs into batches\n batched_inputs = [\n inputs[i * batch_size : (i + 1) * batch_size, Ellipsis]\n for i in range(num_batches)\n ]\n\n # Get attention weights, while avoiding large memory increases\n attention_by_batch = [\n get_batch_attention_weights(batch) for batch in batched_inputs\n ]\n attention_weights = {}\n for k in self._attention_components:\n attention_weights[k] = []\n for batch_weights in attention_by_batch:\n attention_weights[k].append(batch_weights[k])\n\n if len(attention_weights[k][0].shape) == 4:\n tmp = np.concatenate(attention_weights[k], axis=1)\n else:\n tmp = np.concatenate(attention_weights[k], axis=0)\n\n del attention_weights[k]\n gc.collect()\n attention_weights[k] = tmp\n\n attention_weights[\"identifiers\"] = identifiers[:, 0, 0]\n attention_weights[\"time\"] = time[:, :, 0]\n\n return attention_weights\n\n # Serialisation.\n def reset_temp_folder(self):\n \"\"\"Deletes and recreates folder with temporary Keras training outputs.\"\"\"\n print(\"Resetting temp folder...\")\n utils.create_folder_if_not_exist(self._temp_folder)\n shutil.rmtree(self._temp_folder)\n os.makedirs(self._temp_folder)\n\n def get_keras_saved_path(self, model_folder):\n \"\"\"Returns path to keras checkpoint.\"\"\"\n return os.path.join(model_folder, \"{}.check\".format(self.name))\n\n def save(self, model_folder):\n \"\"\"Saves optimal TFT weights.\n\n Args:\n model_folder: Location to serialze model.\n \"\"\"\n # Allows for direct serialisation of tensorflow variables to avoid spurious\n # issue with Keras that leads to different performance evaluation results\n # when model is reloaded (https://github.com/keras-team/keras/issues/4875).\n\n utils.save(\n tf.keras.backend.get_session(),\n model_folder,\n cp_name=self.name,\n scope=self.name,\n )\n\n def load(self, model_folder, use_keras_loadings=False):\n \"\"\"Loads TFT weights.\n\n Args:\n model_folder: Folder containing serialized models.\n use_keras_loadings: Whether to load from Keras checkpoint.\n\n Returns:\n\n \"\"\"\n if use_keras_loadings:\n # Loads temporary Keras model saved during training.\n serialisation_path = self.get_keras_saved_path(model_folder)\n print(\"Loading model from {}\".format(serialisation_path))\n self.model.load_weights(serialisation_path)\n else:\n # Loads tensorflow graph for optimal models.\n utils.load(\n tf.keras.backend.get_session(),\n model_folder,\n cp_name=self.name,\n scope=self.name,\n )\n\n @classmethod\n def get_hyperparm_choices(cls):\n \"\"\"Returns hyperparameter ranges for random search.\"\"\"\n return {\n \"dropout_rate\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9],\n \"hidden_layer_size\": [10, 20, 40, 80, 160, 240, 320],\n \"minibatch_size\": [64, 128, 256],\n \"learning_rate\": [1e-4, 1e-3, 1e-2],\n \"max_gradient_norm\": [0.01, 1.0, 100.0],\n \"num_heads\": [1, 4],\n \"stack_size\": [1],\n }\n\n" ]
[ [ "pandas.Series", "tensorflow.keras.layers.InputLayer", "numpy.concatenate", "numpy.ones_like", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.TimeDistributed", "tensorflow.keras.layers.Multiply", "tensorflow.keras.callbacks.EarlyStopping", "numpy.zeros", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.CuDNNLSTM", "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.callbacks.TerminateOnNaN", "numpy.sum", "tensorflow.keras.layers.Activation", "tensorflow.keras.backend.get_session", "tensorflow.eye", "tensorflow.expand_dims", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.stack", "tensorflow.keras.layers.LSTM", "tensorflow.variable_scope", "tensorflow.keras.layers.Dropout", "numpy.empty", "tensorflow.keras.layers.Input" ] ]
mzgubic/fair-hmumu
[ "dd5caa3bc0bcc459d8500c837cd633333ac741d2" ]
[ "fair_hmumu/configuration.py" ]
[ "import os\nimport ast\nimport itertools\nimport configparser\nimport pandas as pd\nfrom fair_hmumu import utils\n\n\nclass Configuration:\n\n def __init__(self, path):\n\n # path\n self.path = os.path.abspath(path)\n self.name = os.path.basename(self.path)\n self.loc = os.path.dirname(self.path)\n\n # parser\n self.config = configparser.ConfigParser()\n\n # read\n self.read()\n\n @classmethod\n def from_dict(cls, d, path):\n\n # create the configuration\n conf = Configuration(path)\n\n # add sections and options\n for section in d.keys():\n\n # add section\n try:\n conf.config.add_section(section)\n except configparser.DuplicateSectionError:\n pass\n\n # add option\n for option in d[section]:\n conf.set(section, option, d[section][option])\n\n return conf\n\n def as_dict(self, which='all'):\n \"\"\"\n Get the section settings as a dict, with types already converted.\n \"\"\"\n\n # check input\n assert which in ['all', 'fixed', 'sweep'], \"which much be in ['all', 'fixed', 'sweep']\"\n\n # build dictionary\n d = {section:{} for section in self.config.sections()}\n\n for section in self.config.sections():\n for option in self.config[section]:\n\n value = self.get(section, option)\n fixed = not isinstance(value, list)\n\n if which == 'all':\n d[section][option] = value\n\n if which == 'fixed' and fixed:\n d[section][option] = value\n\n if which == 'sweep' and not fixed:\n d[section][option] = value\n\n return d\n\n def get(self, section, option=None):\n \"\"\"\n Handle types other than strings.\n \"\"\"\n\n # if only section is provided return a dict\n if option is None:\n d = {}\n for opt in self.config.options(section):\n d[opt] = self.get(section, opt)\n return d\n\n # if section and option are provided return the option value\n else:\n try:\n return ast.literal_eval(self.config.get(section, option))\n except (ValueError, SyntaxError) as e:\n return self.config.get(section, option)\n\n def set(self, section, option, value):\n \"\"\"\n Handle types other than strings.\n \"\"\"\n try:\n self.config.set(section, option, value)\n except TypeError:\n self.config.set(section, option, str(value))\n\n def read(self):\n\n self.config.read(self.path)\n\n def write(self):\n\n with open(self.path, 'w') as f:\n self.config.write(f)\n\n def __str__(self):\n\n ret = '\\n'\n for section in self.config.sections():\n\n # write a section\n ret += '[{}]\\n'.format(section)\n for option in self.config.options(section):\n ret += '{} = {}\\n'.format(option, self.config[section][option])\n ret += '\\n'\n\n return ret[:-1]\n\n def make_new_runconf_dir(self):\n\n for i in itertools.count():\n\n # does this run dir already exist?\n trial_path = os.path.join(self.loc, 'points', 'run{:04d}'.format(i))\n\n # try next one if it exists, otherwise return the path\n if os.path.exists(trial_path):\n continue\n else:\n return utils.makedir(trial_path)\n\n def __iter__(self):\n \"\"\"\n If it is a sweep conf, iterate over run confs.\n \"\"\"\n\n # get a dict of all fixed and variable parameters\n fixed = self.as_dict('fixed')\n sweep = self.as_dict('sweep')\n\n # make all the combinations\n sw_sections = [section for section in sweep if not sweep[section] == {}]\n desc = [(section, option) for section in sw_sections for option in sweep[section]]\n lists = [sweep[section][option] for section in sw_sections for option in sweep[section]]\n\n combinations = list(itertools.product(*lists))\n\n # loop over combinations and make run configs\n for comb in combinations:\n\n # construct the run conf dictionary\n par_dict = fixed\n for (section, option), value in zip(desc, comb):\n par_dict[section][option] = value\n\n # get location and write the run conf\n loc = self.make_new_runconf_dir()\n run_conf = Configuration.from_dict(par_dict, os.path.join(loc, 'run_conf.ini'))\n run_conf.write()\n\n yield run_conf\n\ndef read_results(sweepname):\n\n # get the location\n sweep_loc = os.path.join(os.getenv('RUN'), sweepname)\n points_loc = os.path.join(sweep_loc, 'points')\n\n # construct the dataframe\n sweep_conf = Configuration(os.path.join(sweep_loc, 'sweep_conf.ini'))\n sweep_dict = sweep_conf.as_dict()\n options = ['{}__{}'.format(section, option) for section in sweep_dict for option in sweep_dict[section]]\n scores = [fname.split('.')[0] for fname in os.listdir(os.path.join(points_loc, 'run0000')) if fname.startswith('metric__')]\n metrics = list(set([score.split('__')[-1] for score in scores]))\n results = pd.DataFrame(columns=options+scores)\n\n # and fill it up\n for run in os.listdir(points_loc):\n run_dict = Configuration(os.path.join(points_loc, run, 'run_conf.ini')).as_dict()\n point_dict = {'{}__{}'.format(section, option):run_dict[section][option] for section in run_dict for option in run_dict[section]}\n\n # see if the particular run has finished\n try:\n for score in scores:\n score_path = os.path.join(points_loc, run, '{}.txt'.format(score))\n with open(score_path, 'r') as f:\n point_dict[score.replace('metric__', '')] = float(f.read())\n\n # ignore if not\n except FileNotFoundError:\n print('{} not found.'.format(score_path))\n continue\n\n results = results.append(point_dict, ignore_index=True)\n\n return results, options, metrics\n\n\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
lukoucky/swimming_pool_attendance_prediction
[ "016fd81df6fb56019c9a48ec6d904da119e5d3d4", "016fd81df6fb56019c9a48ec6d904da119e5d3d4" ]
[ "models/tree_models.py", "models/neural_network_base.py" ]
[ "from sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error, make_scorer\nfrom data_helper import DataHelper\nimport numpy as np\nimport pickle\nimport random\n\nclass TreeModelBase():\n \"\"\"\n Base class for all models based on Decision trees like Random Forest or Extra Trees\n \"\"\"\n def __init__(self, double_model=False):\n \"\"\"\n Constructor\n :param double_model: Bool if model is single or double. Single model have one tree model for prediction of all days.\n Double model have one model for weekdays prediction and another model for weekend days prediction.\n \"\"\"\n self.dh = DataHelper()\n self.double_model = double_model\n\n def load_model(self, pickle_path=None):\n \"\"\"\n Loads model from pickle in pickle_path. If pickle_path is None loads from default location defined by self.name\n :param pickle_path: path where to load model from\n \"\"\"\n if pickle_path is None:\n pickle_path = 'data/'+self.name+'.pickle'\n\n if self.double_model: \n with open(pickle_path, 'rb') as f:\n self.weekend_model, self.weekday_model = pickle.load(f)\n else:\n with open(pickle_path, 'rb') as f:\n self.model = pickle.load(f)\n\n def save_model(self, pickle_path=None):\n \"\"\"\n Saves model to pickle in pickle_path. If pickle_path is None saves to default location defined by self.name\n :param pickle_path: path where to save model with file name\n \"\"\"\n if pickle_path is None:\n pickle_path = 'data/'+self.name+'.pickle'\n\n if self.double_model: \n with open(pickle_path, 'wb') as f:\n pickle.dump([self.weekend_model, self.weekday_model], f)\n else:\n with open(pickle_path, 'wb') as f:\n pickle.dump(self.model, f)\n\n def fit_on_training_set(self):\n \"\"\"\n Fit model (or models for double predictor) on training data.\n \"\"\"\n if self.double_model:\n self.fit_two_models()\n else:\n x_train, y_train, x_test, y_test = self.dh.generate_feature_vectors(self.columns, self.time_steps_back)\n self.model.fit(x_train, y_train.ravel())\n\n def get_mse(self):\n \"\"\"\n Computes mean square error on model\n :return: mean square error\n \"\"\"\n if self.double_model:\n columns_to_drop = self.dh.columns_to_drop_from_columns_to_keep(self.columns)\n weekdays, weekends = self.split_data_to_weekend_and_weekday(self.dh.get_testing_days())\n\n mse_weekday = list()\n mse_weekend = list()\n for day in weekdays:\n x, y = self.dh.get_feature_vectors_from_days([day], columns_to_drop, self.time_steps_back,1,True)\n mse_weekday.append(self.dh.mse_on_day(x, y, self.weekday_model, self.time_steps_back))\n for day in weekends:\n x, y = self.dh.get_feature_vectors_from_days([day], columns_to_drop, self.time_steps_back,1,True)\n mse_weekend.append(self.dh.mse_on_day(x, y, self.weekend_model, self.time_steps_back))\n\n print('Weekday MSE = %.1f, Weekend MSE = %.1f'%( (sum(mse_weekday)/len(weekdays)), (sum(mse_weekend)/len(weekends))))\n return (sum(mse_weekday)+sum(mse_weekend))/(len(weekdays) + len(weekends))\n else:\n return self.dh.mse_on_testing_days(self.model, self.columns, self.time_steps_back)\n\n def show_n_predictions(self, n):\n \"\"\"\n Plots `n` predictions from training data using this model.\n :param n: If integer then represents number of random testing days. If list of integers\n then represents day ids from testing days. Last possible option is \n string `all` that will plot all testing days.\n \"\"\"\n if self.double_model:\n weekdays = list()\n weekends = list()\n for i, day in enumerate(self.dh.get_testing_days()):\n if day.data['day_of_week'].iloc[0] < 5:\n weekdays.append(i)\n else:\n weekends.append(i)\n random.shuffle(weekdays)\n random.shuffle(weekends)\n print('Weekdays')\n self.dh.show_n_days_prediction(self.weekday_model, self.columns, weekdays[:n], self.time_steps_back, False, True)\n print('\\n\\nWeekends')\n self.dh.show_n_days_prediction(self.weekend_model, self.columns, weekends[:n], self.time_steps_back, False, True)\n else:\n self.dh.show_n_days_prediction(self.model, self.columns, n, self.time_steps_back, False, True)\n\n def print_mse(self):\n \"\"\"\n Prints mean square error\n \"\"\"\n print('MSE = %.2f'%(self.get_mse()))\n\n def fit_two_models(self):\n all_days = self.dh.get_training_days(True)\n weekdays, weekends = self.split_data_to_weekend_and_weekday(all_days)\n\n # columns_to_drop = self.dh.columns_to_drop_from_columns_to_keep(self.columns)\n columns_to_drop = ['time']\n print('columns to drop: ',columns_to_drop)\n x_weekdays, y_weekdays = self.dh.get_feature_vectors_from_days(weekdays, columns_to_drop, self.time_steps_back)\n x_weekends, y_weekends = self.dh.get_feature_vectors_from_days(weekends, columns_to_drop, self.time_steps_back)\n\n self.weekday_model.fit(x_weekdays, y_weekdays.ravel())\n self.weekend_model.fit(x_weekends, y_weekends.ravel())\n\n def split_data_to_weekend_and_weekday(self, days_list):\n weekdays = list()\n weekends = list()\n for day in days_list:\n if day.data['day_of_week'].iloc[0] < 5:\n weekdays.append(day)\n else:\n weekends.append(day)\n return weekdays, weekends\n\n\n\nclass MyExtraTreesRegressor(TreeModelBase):\n\n def __init__(self):\n super(MyExtraTreesRegressor, self).__init__()\n self.model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)\n self.time_steps_back = 9\n self.columns = ['pool', 'lines_reserved', 'day_of_week', 'month', 'day', 'hour', 'minute', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Strahov Cup', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other', 'minute_of_day', 'year']\n self.name = 'MyExtraTreesRegressor'\n\n\nclass MyExtraTreesClassifier(TreeModelBase):\n\n def __init__(self):\n super(MyExtraTreesClassifier, self).__init__()\n self.model = ExtraTreesClassifier(random_state=17, n_estimators=10, max_depth=50, min_samples_split=5, min_samples_leaf=2)\n self.time_steps_back = 10\n self.columns = ['pool','day_of_week','month','minute_of_day', 'year', 'reserved_Vodnik','lines_reserved']\n self.name = 'MyExtraTreesClassifier'\n\nclass MyRandomForestRegressor(TreeModelBase):\n\n def __init__(self):\n super(MyRandomForestRegressor, self).__init__()\n self.model = RandomForestRegressor(random_state=17, n_estimators=30, max_depth=20, min_samples_split=5, min_samples_leaf=1, max_features=20, max_leaf_nodes=None)\n self.time_steps_back = 10\n self.columns = ['pool', 'lines_reserved', 'day_of_week', 'month', 'day', 'hour', 'minute', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Strahov Cup', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other', 'minute_of_day', 'year']\n self.name = 'MyRandomForestRegressor'\n\n\nclass MyRandomForestClassifier(TreeModelBase):\n\n def __init__(self):\n super(MyRandomForestClassifier, self).__init__()\n self.model = RandomForestClassifier(random_state=17, n_estimators=10, max_depth=30, min_samples_split=2, min_samples_leaf=2)\n self.time_steps_back = 5\n self.columns = ['pool','day_of_week','month','minute_of_day', 'year', 'reserved_Vodnik','lines_reserved']\n self.name = 'MyRandomForestClassifier'\n\nclass DoubleExtraTreesRegressor(TreeModelBase):\n\n def __init__(self):\n super(DoubleExtraTreesRegressor, self).__init__(True)\n self.weekend_model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)\n self.weekday_model = ExtraTreesRegressor(random_state=17, n_estimators=50, max_depth=35, min_samples_split=2, min_samples_leaf=1, max_features=40, max_leaf_nodes=None)\n self.time_steps_back = 9\n self.columns = ['pool', 'lines_reserved', 'day_of_week', 'year', 'month', 'day', 'minute_of_day', 'holiday', 'reserved_Lavoda', 'reserved_Club Junior', 'reserved_Elab', 'reserved_Vodnik', 'reserved_Spirala', 'reserved_Amalka', 'reserved_Dukla', 'reserved_Lodicka', 'reserved_Elab team', 'reserved_Sports Team', 'reserved_Modra Hvezda', 'reserved_VSC MSMT', 'reserved_Orka', 'reserved_Activity', 'reserved_Aquamen', 'reserved_Zralok', 'reserved_SK Impuls', 'reserved_Motylek', 'reserved_3fit', 'reserved_Jitka Vachtova', 'reserved_Hodbod', 'reserved_DUFA', 'reserved_The Swim', 'reserved_Neptun', 'reserved_Apneaman', 'reserved_Michovsky', 'reserved_Betri', 'reserved_Pospisil', 'reserved_Vachtova', 'reserved_Riverside', 'reserved_Vodni polo Sparta', 'reserved_Road 2 Kona', 'reserved_Water Polo Sparta Praha', 'reserved_Sucha', 'reserved_Totkovicova', 'reserved_DDM Spirala', 'reserved_PS Perla', 'reserved_Dufkova - pulka drahy', 'reserved_Pavlovec', 'reserved_Sidorovich', 'reserved_OS DUFA', 'reserved_SK Neptun', 'temperature_binned', 'wind_binned', 'humidity_binned', 'precipitation_binned', 'pressure_binned', 'reserved_other']\n self.name = 'DoubleExtraTreesRegressor'", "from abc import ABC, abstractmethod\nfrom data_helper import DataHelper\nimport numpy as np\nimport pickle\nfrom keras.models import model_from_json\nimport tensorflow as tf\ntf.random.set_seed(17)\n\n\nclass NeuralNetworkBase(ABC):\n\t\"\"\"\n\tBase class for all neural network models.\n\t\"\"\"\n\tdef __init__(self, model_name=None):\n\t\t\"\"\"\n\t\tConstructor settign up necessary member variables.\n\t\t:param model_name: Name of the model used for saving the model\n\t\t\"\"\"\n\t\tself.dh = DataHelper()\n\t\tself.columns = ['pool','day_of_week','month','minute_of_day','year'] \n\t\tself.time_steps_back = 5\n\t\tself.model = None\n\t\tself.model_name = model_name\n\t\tself.fit_history = {'val_loss':list(), 'loss':list(), 'mse':list()}\n\t\tself.time_steps_back = 5\n\t\tself.columns = ['pool','lines_reserved','day_of_week','month','minute_of_day','year','reserved_Vodnik'] \n\n\t@abstractmethod\n\tdef build_model(self):\n\t\t\"\"\"\n\t\tAbstrac function that builds model. Must be implemented by child class.\n\t\tIn here should be implemented neural network keras model in self.model\n\t\t\"\"\"\n\t\tpass\n\n\tdef fit_with_training_data(self, epochs=10, validation_split=0.3, verbose=1, use_cpu=True, test_mse=True):\n\t\t\"\"\"\n\t\tTrains model for defined number of epochs on training data.\n\t\t:param epochs: Number of epochs for fitting.\n\t\t:param validation_split: Percentage of training data that will be used for validation\n\t\t:param verbose: Verbose settign of fit function\n\t\t:param use_cpu: If True it will force to use CPU even if GPU is available\n\t\t:param test_mse: If True mean square error on testing set will be tested after fitting is done and result save in history\n\t\t\"\"\"\n\t\tx_train, y_train, x_test, y_test = self.dh.generate_feature_vectors_for_cnn(self.columns, self.time_steps_back)\n\t\tself.fit(x_train, y_train, epochs, validation_split, verbose, use_cpu, test_mse)\n\n\tdef fit(self, x_train, y_train, epochs=10, validation_split=0.3, verbose=1, use_cpu=True, test_mse=True):\n\t\t\"\"\"\n\t\tTrains model for defined number of epochs on provided data.\n\t\t:param x_train: Numpy array with training data\n\t\t:param y_train: Numpy array with ground truth results for training data\n\t\t:param epochs: Number of epochs for fitting.\n\t\t:param validation_split: Percentage of training data that will be used for validation\n\t\t:param verbose: Verbose settign of fit function\n\t\t:param use_cpu: If True it will force to use CPU even if GPU is available\n\t\t:param test_mse: If True mean square error on testing set will be tested after fitting is done and result save in history\n\t\t\"\"\"\n\t\tif self.model is not None:\n\t\t\tif use_cpu:\n\t\t\t\twith tf.device('/cpu:0'):\n\t\t\t\t\thistory = self.model.fit(x_train, y_train, epochs=epochs, validation_split=validation_split, verbose=verbose)\n\t\t\telse:\n\t\t\t\thistory = self.model.fit(x_train, y_train, epochs=epochs, validation_split=validation_split, verbose=verbose)\n\t\t\tself.update_fit_history(history, test_mse)\n\t\t\tself.save_model()\n\t\telse:\n\t\t\tprint('Cannot fit. Build model first.')\n\n\tdef predict(self, x):\n\t\t\"\"\"\n\t\tPredicts output for given data.\n\t\t:param x: Numpy array with data for prediction\n\t\t:return: Prediction output\n\t\t\"\"\"\n\t\treturn self.model.predict(x)\n\n\tdef save_model(self, name_addition=None):\n\t\t\"\"\"\n\t\tSaves model and weights with name defined in model_name\n\t\t\"\"\"\n\t\tname = self.model_name\n\t\tif name_addition is not None:\n\t\t\tname += name_addition\n\n\t\tmodel_json = self.model.to_json()\n\t\twith open(name+'.json', 'w') as json_file:\n\t\t json_file.write(model_json)\n\n\t\tself.model.save_weights(name+'_weights.h5')\n\t\tprint('Model saved to disk with name: ' + name)\n\n\tdef load_model(self, name_addition=None):\n\t\t\"\"\"\n\t\tLoads model and weights with name defined in model_name\n\t\t:param name_addition: \n\t\t\"\"\"\n\t\tname = self.model_name\n\t\tif name_addition is not None:\n\t\t\tname += name_addition\n\n\t\tjson_file = open(name+'.json', 'r')\n\t\tmodel_json = json_file.read()\n\t\tjson_file.close()\n\t\tself.model = model_from_json(model_json)\n\t\tself.model.load_weights(name+'_weights.h5')\n\t\tprint('Model %s loaded from disk'%(name))\n\n\tdef setup(self, columns, time_steps_back):\n\t\t\"\"\"\n\t\tTrains model for defined number of epochs on provided data.\n\t\t:param columns: Colmuns to keep in data\n\t\t:param time_steps_back: Number of time steps used for prediction\n\t\t\"\"\"\n\t\tself.columns = columns\n\t\tself.time_steps_back = time_steps_back\n\t\tself.build_model()\n\n\tdef get_mse(self):\n\t\t\"\"\"\n\t\tComputes mean square error on model\n\t\t:return: mean square error\n\t\t\"\"\"\n\t\treturn self.dh.mse_on_testing_days(self.model, self.columns, self.time_steps_back, True)\n\n\tdef show_n_predictions(self, n):\n\t\t\"\"\"\n\t\tPlots `n` predictions from training data using this model.\n\t\t:param n: If integer then represents number of random testing days. If list of integers\n\t\t\t\t\tthen represents day ids from testing days. Last possible option is \n\t\t\t\t\tstring `all` that will plot all testing days.\n\t\t\"\"\"\n\t\tself.dh.show_n_days_prediction(self.model, self.columns, n, self.time_steps_back, True)\n\n\tdef print_mse(self):\n\t\t\"\"\"\n\t\tPrints mean square error\n\t\t\"\"\"\n\t\tprint('\\nMSE = %.2f\\n'%(self.get_mse()))\n\n\tdef update_fit_history(self, history, test_mse):\n\t\t\"\"\"\n\t\tAdds progress of validation loss and training loss to `fit_history`\n\t\tIf test_mse is True than also tests MSE and adds result to `fit_history`\n\t\t:param history: Keras fit history object\n\t\t:param test_mse: Flag if MSE should be tested\n\t\t\"\"\"\n\t\tif 'val_loss' in history.history.keys():\n\t\t\tfor value in history.history['val_loss']:\n\t\t\t\tself.fit_history['val_loss'].append(value)\n\t\tif 'loss' in history.history.keys():\n\t\t\tfor value in history.history['loss']:\n\t\t\t\tself.fit_history['loss'].append(value)\n\t\tif test_mse:\n\t\t\tmse = self.get_mse()\n\t\t\tif len(self.fit_history['mse']) > 0 and mse < min(self.fit_history['mse']):\n\t\t\t\tname_addition = '_MSE_%.0f'%(mse)\n\t\t\t\tself.save_model(name_addition)\n\t\t\t\tprint('\\nNew best MSE = %.2f\\n'%(mse))\n\t\t\telse:\n\t\t\t\tprint('\\nMSE = %.2f\\n'%(mse))\n\t\t\tself.fit_history['mse'].append(mse)\t\t\t\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.ensemble.ExtraTreesClassifier", "sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.ExtraTreesRegressor" ], [ "tensorflow.device", "tensorflow.random.set_seed" ] ]
prkhrv/High-Performance-Python-with-CUDA
[ "5ac07e0ace3cd07f3583af8a3954bb52ac74b6e8" ]
[ "vector_add.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport time\n\nfrom numba import vectorize, cuda\n\n@vectorize(['float32(float32, float32)'], target='cuda')\ndef VectorAdd(a, b):\n return a + b\n\ndef main():\n N = 32000000\n\n A = np.ones(N, dtype=np.float32)\n B = np.ones(N, dtype=np.float32)\n\n start = time.time()\n C = VectorAdd(A, B)\n vector_add_time = time.time() - start\n\n print (\"C[:5] = \" + str(C[:5]))\n print (\"C[-5:] = \" + str(C[-5:]))\n\n print (\"VectorAdd took for % seconds\" % vector_add_time)\n\nif __name__=='__main__':\n main()\n" ]
[ [ "numpy.ones" ] ]
mycal-tucker/IC3Net
[ "bd71cc92d3ec5a5bfc12860babdb5a570421021d" ]
[ "nns/probe.py" ]
[ "import torch.nn as nn\n\n\nclass Probe(nn.Module):\n def __init__(self, input_dim, output_dim, hidden_size=64, num_layers=1, dropout_rate=0.8):\n super(Probe, self).__init__()\n self.layers = nn.ModuleList()\n self.out_dim = output_dim\n prev_size = input_dim\n self.dropout1 = nn.Dropout(p=dropout_rate)\n for layer_id in range(num_layers):\n next_dim = hidden_size if layer_id < num_layers - 1 else output_dim\n self.layers.append(nn.Linear(prev_size, next_dim))\n prev_size = hidden_size\n\n def forward(self, x):\n x = self.dropout1(x)\n for layer_id, layer in enumerate(self.layers):\n x = layer(x)\n if layer_id < len(self.layers) - 1:\n x = x.clamp(min=0)\n return x\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Linear", "torch.nn.ModuleList" ] ]
kumar10725/windpowerlib
[ "865a6b697edbccf815d1e98cad994239b5ccd395" ]
[ "windpowerlib/wind_turbine.py" ]
[ "\"\"\"\nThe ``wind_turbine`` module contains the class WindTurbine that implements\na wind turbine in the windpowerlib and functions needed for the modelling of a\nwind turbine.\n\nSPDX-FileCopyrightText: 2019 oemof developer group <[email protected]>\nSPDX-License-Identifier: MIT\n\"\"\"\nimport pandas as pd\nimport logging\nimport warnings\nimport requests\nimport os\nfrom windpowerlib.tools import WindpowerlibUserWarning\nfrom typing import NamedTuple\n\n\nclass WindTurbine(object):\n r\"\"\"\n Defines a standard set of wind turbine attributes.\n\n Parameters\n ----------\n hub_height : float\n Hub height of the wind turbine in m.\n power_curve : :pandas:`pandas.DataFrame<frame>` or dict (optional)\n If provided directly sets the power curve. DataFrame/dictionary must\n have 'wind_speed' and 'value' columns/keys with wind speeds in m/s and\n the corresponding power curve value in W. If not set the value is\n retrieved from 'power_curve.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n power_coefficient_curve : :pandas:`pandas.DataFrame<frame>` or dict (optional)\n If provided directly sets the power coefficient curve.\n DataFrame/dictionary must have 'wind_speed' and 'value' columns/keys\n with wind speeds in m/s and the corresponding power coefficient curve\n value. If not set the value is retrieved from\n 'power_coefficient_curve.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n turbine_type : str (optional)\n Name of the wind turbine type. Must be provided if power (coefficient)\n curve, nominal power or rotor diameter is retrieved from self-provided\n or oedb turbine library csv files. If turbine_type is None it is not\n possible to retrieve turbine data from file.\n Use :py:func:`~.get_turbine_types` to see a table of all wind turbines\n for which power (coefficient) curve data and other turbine data is\n provided in the oedb turbine library.\n Default: None.\n rotor_diameter : float (optional)\n Diameter of the rotor in m. If not set the value is\n retrieved from 'turbine_data.csv' file in `path`. In that case a\n `turbine_type` is needed.\n The rotor diameter only needs to be set if power output\n is calculated using the power coefficient curve. Default: None.\n nominal_power : float (optional)\n The nominal power of the wind turbine in W. If not set the value is\n retrieved from 'turbine_data.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n path : str (optional)\n Directory where the turbine database files are located. The files need\n to be named 'power_coefficient_curve.csv', 'power_curve.csv', and\n 'turbine_data.csv'. By default the oedb turbine library files are used.\n Set path to `None` to ignore turbine data from files. Default: 'oedb'.\n\n Attributes\n ----------\n turbine_type : str\n Name of the wind turbine.\n hub_height : float\n Hub height of the wind turbine in m.\n rotor_diameter : None or float\n Diameter of the rotor in m. Default: None.\n power_coefficient_curve : None, pandas.DataFrame or dictionary\n Power coefficient curve of the wind turbine. DataFrame/dictionary\n containing 'wind_speed' and 'value' columns/keys with wind speeds\n in m/s and the corresponding power coefficients. Default: None.\n power_curve : None, pandas.DataFrame or dictionary\n Power curve of the wind turbine. DataFrame/dictionary containing\n 'wind_speed' and 'value' columns/keys with wind speeds in m/s and the\n corresponding power curve value in W. Default: None.\n nominal_power : None or float\n The nominal output of the wind turbine in W. Default: None.\n \n Notes\n ------\n Your wind turbine object needs to have a power coefficient or power curve.\n By default they are fetched from the oedb turbine library that is provided\n along with the windpowerlib. In that case `turbine_type` must be specified.\n You can also set the curves directly or provide your own csv files with\n power coefficient and power curves. See `example_power_curves.csv',\n `example_power_coefficient_curves.csv` and `example_turbine_data.csv`\n in example/data for the required format of such csv files.\n\n Examples\n --------\n >>> import os\n >>> from windpowerlib import WindTurbine\n >>> enerconE126={\n ... 'hub_height': 135,\n ... 'turbine_type': 'E-126/4200'}\n >>> e126=WindTurbine(**enerconE126)\n >>> print(e126.nominal_power)\n 4200000.0\n >>> # Example with own path\n >>> path=os.path.join(os.path.dirname(__file__), '../tests/data')\n >>> example_turbine={\n ... 'hub_height': 100,\n ... 'rotor_diameter': 70,\n ... 'turbine_type': 'DUMMY 3',\n ... 'path' : path}\n >>> e_t_1=WindTurbine(**example_turbine)\n >>> print(e_t_1.power_curve['value'][7])\n 18000.0\n >>> print(e_t_1.nominal_power)\n 1500000.0\n \"\"\"\n\n def __init__(\n self,\n hub_height,\n nominal_power=None,\n path=\"oedb\",\n power_curve=None,\n power_coefficient_curve=None,\n rotor_diameter=None,\n turbine_type=None,\n **kwargs,\n ):\n\n self.hub_height = hub_height\n self.turbine_type = turbine_type\n self.rotor_diameter = rotor_diameter\n self.nominal_power = nominal_power\n self.power_curve = power_curve\n self.power_coefficient_curve = power_coefficient_curve\n\n if path == \"oedb\":\n path = os.path.join(os.path.dirname(__file__), \"oedb\")\n\n if turbine_type is not None and path is not None:\n if power_curve is None:\n try:\n fn = os.path.join(path, \"power_curves.csv\")\n self.power_curve = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No power curve found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n if power_coefficient_curve is None:\n try:\n fn = os.path.join(path, \"power_coefficient_curves.csv\")\n self.power_coefficient_curve = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No power coefficient curve found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n\n if nominal_power is None or (\n rotor_diameter is None\n and self.power_coefficient_curve is not None\n ):\n turbine_data = None\n try:\n fn = os.path.join(path, \"turbine_data.csv\")\n turbine_data = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No turbine data found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n\n if self.nominal_power is None and turbine_data is not None:\n self.nominal_power = float(turbine_data[\"nominal_power\"])\n if self.rotor_diameter is None and turbine_data is not None:\n self.rotor_diameter = float(turbine_data[\"rotor_diameter\"])\n\n if self.rotor_diameter:\n if self.hub_height <= 0.5 * self.rotor_diameter:\n msg = \"1/2rotor_diameter cannot be greater than hub_height\"\n raise ValueError(msg)\n\n if self.power_curve is None and self.power_coefficient_curve is None:\n msg = (\n \"The WindTurbine has been initialised without a power curve\"\n \" and without a power coefficient curve.\\nYou will not be\"\n \" able to calculate the power output.\\n\"\n \" Check if the turbine type {0} is in your database file\"\n \" or if you passed a valid curve.\"\n )\n warnings.warn(msg.format(turbine_type), WindpowerlibUserWarning)\n else:\n # power (coefficient) curve to pd.DataFrame in case of being dict\n if isinstance(self.power_curve, dict):\n self.power_curve = pd.DataFrame(self.power_curve)\n if isinstance(self.power_coefficient_curve, dict):\n self.power_coefficient_curve = pd.DataFrame(\n self.power_coefficient_curve\n )\n # sort power (coefficient) curve by wind speed\n if isinstance(self.power_curve, pd.DataFrame):\n self.power_curve.sort_values(by=\"wind_speed\")\n elif self.power_curve is not None:\n msg = (\n \"Type of power curve of {} is {} but should be \"\n \"pd.DataFrame or dict.\"\n )\n raise TypeError(\n msg.format(self.__repr__(), type(self.power_curve))\n )\n if isinstance(self.power_coefficient_curve, pd.DataFrame):\n self.power_coefficient_curve.sort_values(by=\"wind_speed\")\n elif self.power_coefficient_curve is not None:\n msg = (\n \"Type of power coefficient curve of {} is {} but \"\n \"should be pd.DataFrame or dict.\"\n )\n raise TypeError(\n msg.format(\n self.__repr__(), type(self.power_coefficient_curve)\n )\n )\n\n def __repr__(self):\n info = []\n if self.nominal_power is not None:\n info.append(\"nominal power={} W\".format(self.nominal_power))\n if self.hub_height is not None:\n info.append(\"hub height={} m\".format(self.hub_height))\n if self.rotor_diameter is not None:\n info.append(\"rotor diameter={} m\".format(self.rotor_diameter))\n if self.power_coefficient_curve is not None:\n info.append(\"power_coefficient_curve={}\".format(\"True\"))\n else:\n info.append(\"power_coefficient_curve={}\".format(\"False\"))\n if self.power_curve is not None:\n info.append(\"power_curve={}\".format(\"True\"))\n else:\n info.append(\"power_curve={}\".format(\"False\"))\n\n if self.turbine_type is not None:\n turbine_repr = \"Wind turbine: {name} {info}\".format(\n name=self.turbine_type, info=info\n )\n else:\n turbine_repr = \"Wind turbine: {info}\".format(info=info)\n\n return turbine_repr\n\n def to_group(self, number_turbines=None, total_capacity=None):\n r\"\"\"\n Creates a :class:`~windpowerlib.wind_turbine.WindTurbineGroup`, a\n NamedTuple data container with the fields 'number_of_turbines' and\n 'wind_turbine'. If no parameter is passed the number of turbines is\n set to one.\n\n It can be used to calculate the number of turbines for a given total\n capacity or to create a namedtuple that can be used to define a\n :class:`~windpowerlib.wind_farm.WindFarm` object.\n\n Parameters\n ----------\n number_turbines : float\n Number of turbines of the defined type. Default: 1\n total_capacity : float\n Total capacity of the group of wind turbines of the same type.\n\n Returns\n -------\n :class:`~windpowerlib.wind_turbine.WindTurbineGroup`\n A namedtuple with two fields: 'number_of_turbines' and\n 'wind_turbine'.\n\n Examples\n --------\n >>> from windpowerlib import WindTurbine\n >>> enerconE126={\n ... 'hub_height': 135,\n ... 'turbine_type': 'E-126/4200'}\n >>> e126=WindTurbine(**enerconE126)\n >>> e126.to_group(5).number_of_turbines\n 5\n >>> e126.to_group().number_of_turbines\n 1\n >>> e126.to_group(number_turbines=7).number_of_turbines\n 7\n >>> e126.to_group(total_capacity=12600000).number_of_turbines\n 3.0\n >>> e126.to_group(total_capacity=14700000).number_of_turbines\n 3.5\n >>> e126.to_group(total_capacity=12600000).wind_turbine.nominal_power\n 4200000.0\n >>> type(e126.to_group(5))\n <class 'windpowerlib.wind_turbine.WindTurbineGroup'>\n >>> e126.to_group(5) # doctest: +NORMALIZE_WHITESPACE\n WindTurbineGroup(wind_turbine=Wind turbine: E-126/4200 ['nominal\n power=4200000.0 W', 'hub height=135 m', 'rotor diameter=127.0 m',\n 'power_coefficient_curve=True', 'power_curve=True'],\n number_of_turbines=5)\n \"\"\"\n\n if number_turbines is not None and total_capacity is not None:\n raise ValueError(\n \"The 'number' and the 'total_capacity' parameter \"\n \"are mutually exclusive. Use just one of them.\"\n )\n elif total_capacity is not None:\n number_turbines = total_capacity / self.nominal_power\n elif number_turbines is None:\n number_turbines = 1\n\n return WindTurbineGroup(\n wind_turbine=self, number_of_turbines=number_turbines\n )\n\n\n# This is working for Python >= 3.5.\n# There a cleaner solutions for Python >= 3.6, once the support of 3.5 is\n# dropped: https://stackoverflow.com/a/50038614\nclass WindTurbineGroup(\n NamedTuple(\n \"WindTurbineGroup\",\n [(\"wind_turbine\", WindTurbine), (\"number_of_turbines\", float)],\n )\n):\n \"\"\"\n A simple data container to define more than one turbine of the same type.\n Use the :func:`~windpowerlib.wind_turbine.WindTurbine.to_group` method to\n easily create a WindTurbineGroup from a\n :class:`~windpowerlib.wind_turbine.WindTurbine` object.\n\n Parameters\n ----------\n 'wind_turbine' : WindTurbine\n A WindTurbine object with all necessary attributes.\n 'number_of_turbines' : float\n The number of turbines. The number is not restricted to integer values.\n \"\"\"\n\n __slots__ = ()\n\n\nWindTurbineGroup.wind_turbine.__doc__ = (\n \"A :class:`~windpowerlib.wind_farm.WindTurbine` object.\"\n)\nWindTurbineGroup.number_of_turbines.__doc__ = (\n \"Number of turbines of type WindTurbine\"\n)\n\n\ndef get_turbine_data_from_file(turbine_type, path):\n r\"\"\"\n Fetches turbine data from a csv file.\n\n See `example_power_curves.csv', `example_power_coefficient_curves.csv` and\n `example_turbine_data.csv` in example/data for the required format of\n a csv file. Make sure to provide wind speeds in m/s and power in W or\n convert units after loading the data.\n\n Parameters\n ----------\n turbine_type : str\n Specifies the turbine type data is fetched for.\n path : str\n Specifies the source of the turbine data.\n See the example below for how to use the example data.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>` or float\n Power curve or power coefficient curve (pandas.DataFrame) or nominal\n power (float) of one wind turbine type. Power (coefficient) curve\n DataFrame contains power coefficient curve values (dimensionless) or\n power curve values (in dimension given in file) with the corresponding\n wind speeds (in dimension given in file).\n\n Examples\n --------\n >>> from windpowerlib import wind_turbine\n >>> import os\n >>> path=os.path.join(os.path.dirname(__file__), '../tests/data',\n ... 'power_curves.csv')\n >>> d3=get_turbine_data_from_file('DUMMY 3', path)\n >>> print(d3['value'][7])\n 18000.0\n >>> print(d3['value'].max())\n 1500000.0\n \"\"\"\n\n try:\n df = pd.read_csv(path, index_col=0)\n except FileNotFoundError:\n raise FileNotFoundError(\"The file '{}' was not found.\".format(path))\n wpp_df = df[df.index == turbine_type].copy()\n # if turbine not in data file\n if wpp_df.shape[0] == 0:\n msg = \"Wind converter type {0} not provided. Possible types: {1}\"\n raise KeyError(msg.format(turbine_type, list(df.index)))\n # if turbine in data file\n # get nominal power or power (coefficient) curve\n if \"turbine_data\" in path:\n return wpp_df\n else:\n wpp_df.dropna(axis=1, inplace=True)\n wpp_df = wpp_df.transpose().reset_index()\n wpp_df.columns = [\"wind_speed\", \"value\"]\n # transform wind speeds to floats\n wpp_df[\"wind_speed\"] = wpp_df[\"wind_speed\"].apply(lambda x: float(x))\n return wpp_df\n\n\ndef create_power_curve(wind_speed, power):\n \"\"\"\n A list, numpy.array, pandas.Series or other iterables can be passed to\n define the wind speed and the power output. Make sure that the order is\n not mutable because, values from both parameters will be used as value\n pairs.\n\n Parameters\n ----------\n wind_speed : iterable\n A series of wind speed values in meter per second [m/s].\n power : iterable\n A series of power values in Watt [W].\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return pd.DataFrame(data={\"value\": power, \"wind_speed\": wind_speed})\n\n\ndef load_turbine_data_from_oedb(schema=\"supply\", table=\"wind_turbine_library\"):\n r\"\"\"\n Loads turbine library from the OpenEnergy database (oedb).\n\n Turbine data is saved to csv files ('oedb_power_curves.csv',\n 'oedb_power_coefficient_curves.csv' and 'oedb_nominal_power') for offline\n usage of the windpowerlib. If the files already exist they are overwritten.\n\n Parameters\n ----------\n schema : str\n Database schema of the turbine library.\n table : str\n Table name of the turbine library.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>`\n Turbine data of different turbines such as 'manufacturer',\n 'turbine_type', 'nominal_power'.\n\n \"\"\"\n # url of OpenEnergy Platform that contains the oedb\n oep_url = \"http://oep.iks.cs.ovgu.de/\"\n\n # load data\n result = requests.get(\n oep_url + \"/api/v0/schema/{}/tables/{}/rows/?\".format(schema, table),\n )\n if not result.status_code == 200:\n raise ConnectionError(\n \"Database connection not successful. \"\n \"Response: [{}]\".format(result.status_code)\n )\n # extract data to dataframe\n turbine_data = pd.DataFrame(result.json())\n # standard file name for saving data\n filename = os.path.join(os.path.dirname(__file__), \"oedb\", \"{}.csv\")\n # get all power (coefficient) curves and save to file\n # for curve_type in ['power_curve', 'power_coefficient_curve']:\n for curve_type in [\"power_curve\", \"power_coefficient_curve\"]:\n curves_df = pd.DataFrame(columns=[\"wind_speed\"])\n for index in turbine_data.index:\n if (\n turbine_data[\"{}_wind_speeds\".format(curve_type)][index]\n and turbine_data[\"{}_values\".format(curve_type)][index]\n ):\n df = (\n pd.DataFrame(\n data=[\n eval(\n turbine_data[\n \"{}_wind_speeds\".format(curve_type)\n ][index]\n ),\n eval(\n turbine_data[\"{}_values\".format(curve_type)][\n index\n ]\n ),\n ]\n )\n .transpose()\n .rename(\n columns={\n 0: \"wind_speed\",\n 1: turbine_data[\"turbine_type\"][index],\n }\n )\n )\n curves_df = pd.merge(\n left=curves_df, right=df, how=\"outer\", on=\"wind_speed\"\n )\n curves_df = curves_df.set_index(\"wind_speed\").sort_index().transpose()\n # power curve values in W\n if curve_type == \"power_curve\":\n curves_df *= 1000\n curves_df.index.name = \"turbine_type\"\n curves_df.to_csv(filename.format(\"{}s\".format(curve_type)))\n\n # get turbine data and save to file (excl. curves)\n turbine_data_df = turbine_data.drop(\n [\n \"power_curve_wind_speeds\",\n \"power_curve_values\",\n \"power_coefficient_curve_wind_speeds\",\n \"power_coefficient_curve_values\",\n \"thrust_coefficient_curve_wind_speeds\",\n \"thrust_coefficient_curve_values\",\n ],\n axis=1,\n ).set_index(\"turbine_type\")\n # nominal power in W\n turbine_data_df[\"nominal_power\"] *= 1000\n turbine_data_df.to_csv(filename.format(\"turbine_data\"))\n return turbine_data\n\n\ndef get_turbine_types(turbine_library=\"local\", print_out=True, filter_=True):\n r\"\"\"\n Get all provided wind turbine types provided.\n\n Choose by `turbine_library` whether to get wind turbine types provided by\n the OpenEnergy Database ('oedb') or wind turbine types provided in your\n local file(s) ('local').\n By default only turbine types for which a power coefficient curve or power\n curve is provided are returned. Set `filter_=False` to see all turbine\n types for which any data (e.g. hub height, rotor diameter, ...) is\n provided.\n\n Parameters\n ----------\n turbine_library : str\n Specifies if the oedb turbine library ('oedb') or your local turbine\n data file ('local') is evaluated. Default: 'local'.\n print_out : bool\n Directly prints a tabular containing the turbine types in column\n 'turbine_type', the manufacturer in column 'manufacturer' and\n information about whether a power (coefficient) curve exists (True) or\n not (False) in columns 'has_power_curve' and 'has_cp_curve'.\n Default: True.\n filter_ : bool\n If True only turbine types for which a power coefficient curve or\n power curve is provided in the oedb turbine library are\n returned. Default: True.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>`\n Contains turbine types in column 'turbine_type', the manufacturer in\n column 'manufacturer' and information about whether a power\n (coefficient) curve exists (True) or not (False) in columns\n 'has_power_curve' and 'has_cp_curve'.\n\n Notes\n -----\n If the power (coefficient) curve of the desired turbine type (or the\n turbine type itself) is missing you can contact us via github or\n [email protected]. You can help us by providing data in the\n format as shown in\n `the data base <https://openenergy-platform.org/dataedit/view/supply/wind_turbine_library>`_.\n\n Examples\n --------\n >>> from windpowerlib import wind_turbine\n >>> df=wind_turbine.get_turbine_types(print_out=False)\n >>> print(df[df[\"turbine_type\"].str.contains(\"E-126\")].iloc[0])\n manufacturer Enercon\n turbine_type E-126/4200\n has_power_curve True\n has_cp_curve True\n Name: 5, dtype: object\n >>> print(df[df[\"manufacturer\"].str.contains(\"Enercon\")].iloc[0])\n manufacturer Enercon\n turbine_type E-101/3050\n has_power_curve True\n has_cp_curve True\n Name: 1, dtype: object\n\n \"\"\"\n if turbine_library == \"local\":\n filename = os.path.join(\n os.path.dirname(__file__), \"oedb\", \"turbine_data.csv\"\n )\n df = pd.read_csv(filename, index_col=0).reset_index()\n elif turbine_library == \"oedb\":\n df = load_turbine_data_from_oedb()\n else:\n raise ValueError(\n \"`turbine_library` is '{}' \".format(turbine_library)\n + \"but must be 'local' or 'oedb'.\"\n )\n if filter_:\n cp_curves_df = df.loc[df[\"has_cp_curve\"]][\n [\"manufacturer\", \"turbine_type\", \"has_cp_curve\"]\n ]\n p_curves_df = df.loc[df[\"has_power_curve\"]][\n [\"manufacturer\", \"turbine_type\", \"has_power_curve\"]\n ]\n curves_df = pd.merge(\n p_curves_df, cp_curves_df, how=\"outer\", sort=True\n ).fillna(False)\n else:\n curves_df = df[\n [\"manufacturer\", \"turbine_type\", \"has_power_curve\", \"has_cp_curve\"]\n ]\n if print_out:\n pd.set_option(\"display.max_rows\", len(curves_df))\n print(curves_df)\n pd.reset_option(\"display.max_rows\")\n return curves_df\n" ]
[ [ "pandas.reset_option", "pandas.merge", "pandas.read_csv", "pandas.DataFrame" ] ]
jakevdp/Mmani
[ "681b6cdbd358b207e8b6c4a482262c84bea15bd7" ]
[ "megaman/geometry/tests/test_adjacency.py" ]
[ "# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE\n\nfrom nose import SkipTest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_raises, assert_equal\nfrom scipy.sparse import isspmatrix\nfrom scipy.spatial.distance import cdist, pdist, squareform\n\nfrom megaman.geometry import (Geometry, compute_adjacency_matrix, Adjacency,\n adjacency_methods)\n\n\ntry:\n import pyflann as pyf\n NO_PYFLANN = False\nexcept ImportError:\n NO_PYFLANN = True\n\n\ndef test_adjacency_methods():\n assert_equal(set(adjacency_methods()),\n {'auto', 'pyflann', 'ball_tree',\n 'cyflann', 'brute', 'kd_tree'})\n\n\ndef test_adjacency_input_validation():\n X = np.random.rand(20, 3)\n # need to specify radius or n_neighbors\n assert_raises(ValueError, compute_adjacency_matrix, X)\n # cannot specify both radius and n_neighbors\n assert_raises(ValueError, compute_adjacency_matrix, X,\n radius=1, n_neighbors=10)\n\n\ndef test_adjacency():\n X = np.random.rand(100, 3)\n Gtrue = {}\n\n exact_methods = [m for m in Adjacency.methods()\n if not m.endswith('flann')]\n\n def check_kneighbors(n_neighbors, method):\n if method == 'pyflann' and NO_PYFLANN:\n raise SkipTest(\"pyflann not installed\")\n\n G = compute_adjacency_matrix(X, method=method,\n n_neighbors=n_neighbors)\n assert isspmatrix(G)\n assert G.shape == (X.shape[0], X.shape[0])\n if method in exact_methods:\n assert_allclose(G.toarray(), Gtrue[n_neighbors].toarray())\n\n def check_radius(radius, method):\n if method == 'pyflann' and NO_PYFLANN:\n raise SkipTest(\"pyflann not installed\")\n\n G = compute_adjacency_matrix(X, method=method,\n radius=radius)\n assert isspmatrix(G)\n assert G.shape == (X.shape[0], X.shape[0])\n if method in exact_methods:\n assert_allclose(G.toarray(), Gtrue[radius].toarray())\n\n for n_neighbors in [5, 10, 15]:\n Gtrue[n_neighbors] = compute_adjacency_matrix(X, method='brute',\n n_neighbors=n_neighbors)\n for method in Adjacency.methods():\n yield check_kneighbors, n_neighbors, method\n\n for radius in [0.1, 0.5, 1.0]:\n Gtrue[radius] = compute_adjacency_matrix(X, method='brute',\n radius=radius)\n for method in Adjacency.methods():\n yield check_radius, radius, method\n\n\ndef test_unknown_method():\n X = np.arange(20).reshape((10, 2))\n assert_raises(ValueError, compute_adjacency_matrix, X, 'foo')\n\n\ndef test_all_methods_close():\n rand = np.random.RandomState(36)\n X = rand.randn(10, 2)\n D_true = squareform(pdist(X))\n D_true[D_true > 0.5] = 0\n\n def check_method(method):\n kwargs = {}\n if method == 'pyflann':\n try:\n import pyflann as pyf\n except ImportError:\n raise SkipTest(\"pyflann not installed.\")\n flindex = pyf.FLANN()\n flindex.build_index(X, algorithm='kmeans',\n target_precision=0.9)\n kwargs['flann_index'] = flindex\n this_D = compute_adjacency_matrix(X, method=method, radius=0.5,\n **kwargs)\n assert_allclose(this_D.toarray(), D_true, rtol=1E-5)\n\n for method in ['auto', 'cyflann', 'pyflann', 'brute']:\n yield check_method, method\n\n\ndef test_custom_adjacency():\n class CustomAdjacency(Adjacency):\n name = \"custom\"\n def adjacency_graph(self, X):\n return squareform(pdist(X))\n\n rand = np.random.RandomState(42)\n X = rand.rand(10, 2)\n D = compute_adjacency_matrix(X, method='custom', radius=1)\n assert_allclose(D, cdist(X, X))\n\n Adjacency._remove_from_registry(\"custom\")\n" ]
[ [ "scipy.sparse.isspmatrix", "numpy.arange", "scipy.spatial.distance.cdist", "scipy.spatial.distance.pdist", "numpy.testing.assert_raises", "numpy.random.rand", "numpy.random.RandomState" ] ]
aahmadian-liu/ood-likefree-invertible
[ "977e70eccaa7f2eb09724b5bf6f28156f4940461" ]
[ "Codes/Resflow_Procs/preprocessing/convert_to_pth.py" ]
[ "import sys\nimport re\nimport numpy as np\nimport torch\n\ninfile='celeba_full_64x64_5bit.npy'\nimg = torch.tensor(np.load(infile))\nimg = img.permute(0, 3, 1, 2)\ntorch.save(img, re.sub('.npy$', '.pth', infile))\n" ]
[ [ "numpy.load" ] ]